diff options
1452 files changed, 79930 insertions, 46812 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net index 2f1788111cd9..e2e0fe553ad8 100644 --- a/Documentation/ABI/testing/sysfs-class-net +++ b/Documentation/ABI/testing/sysfs-class-net @@ -117,7 +117,7 @@ Description: full: full duplex Note: This attribute is only valid for interfaces that implement - the ethtool get_settings method (mostly Ethernet). + the ethtool get_link_ksettings method (mostly Ethernet). What: /sys/class/net/<iface>/flags Date: April 2005 @@ -224,7 +224,7 @@ Description: an integer representing the link speed in Mbits/sec. Note: this attribute is only valid for interfaces that implement - the ethtool get_settings method (mostly Ethernet ). + the ethtool get_link_ksettings method (mostly Ethernet). What: /sys/class/net/<iface>/tx_queue_len Date: April 2005 diff --git a/Documentation/ABI/testing/sysfs-class-net-dsa b/Documentation/ABI/testing/sysfs-class-net-dsa new file mode 100644 index 000000000000..f240221e071e --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-net-dsa @@ -0,0 +1,7 @@ +What: /sys/class/net/<iface>/tagging +Date: August 2018 +KernelVersion: 4.20 +Contact: netdev@vger.kernel.org +Description: + String indicating the type of tagging protocol used by the + DSA slave network device. diff --git a/Documentation/devicetree/bindings/mips/mscc.txt b/Documentation/devicetree/bindings/mips/mscc.txt index ae15ec333542..bc817e984628 100644 --- a/Documentation/devicetree/bindings/mips/mscc.txt +++ b/Documentation/devicetree/bindings/mips/mscc.txt @@ -41,3 +41,19 @@ Example: compatible = "mscc,ocelot-cpu-syscon", "syscon"; reg = <0x70000000 0x2c>; }; + +o HSIO regs: + +The SoC has a few registers (HSIO) handling miscellaneous functionalities: +configuration and status of PLL5, RCOMP, SyncE, SerDes configurations and +status, SerDes muxing and a thermal sensor. + +Required properties: +- compatible: Should be "mscc,ocelot-hsio", "syscon", "simple-mfd" +- reg : Should contain registers location and length + +Example: + syscon@10d0000 { + compatible = "mscc,ocelot-hsio", "syscon", "simple-mfd"; + reg = <0x10d0000 0x10000>; + }; diff --git a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt index 4648948f7c3b..e15589f47787 100644 --- a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt +++ b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt @@ -19,6 +19,9 @@ Optional properties: - interrupt-names: must be "mdio_done_error" when there is a share interrupt fed to this hardware block, or must be "mdio_done" for the first interrupt and "mdio_error" for the second when there are separate interrupts +- clocks: A reference to the clock supplying the MDIO bus controller +- clock-frequency: the MDIO bus clock that must be output by the MDIO bus + hardware, if absent, the default hardware values are used Child nodes of this MDIO bus controller node are standard Ethernet PHY device nodes as described in Documentation/devicetree/bindings/net/phy.txt diff --git a/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt b/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt new file mode 100644 index 000000000000..886cbe8ffb38 --- /dev/null +++ b/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt @@ -0,0 +1,143 @@ +Lantiq GSWIP Ethernet switches +================================== + +Required properties for GSWIP core: + +- compatible : "lantiq,xrx200-gswip" for the embedded GSWIP in the + xRX200 SoC +- reg : memory range of the GSWIP core registers + : memory range of the GSWIP MDIO registers + : memory range of the GSWIP MII registers + +See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of +additional required and optional properties. + + +Required properties for MDIO bus: +- compatible : "lantiq,xrx200-mdio" for the MDIO bus inside the GSWIP + core of the xRX200 SoC and the PHYs connected to it. + +See Documentation/devicetree/bindings/net/mdio.txt for a list of additional +required and optional properties. + + +Required properties for GPHY firmware loading: +- compatible : "lantiq,xrx200-gphy-fw", "lantiq,gphy-fw" + "lantiq,xrx300-gphy-fw", "lantiq,gphy-fw" + "lantiq,xrx330-gphy-fw", "lantiq,gphy-fw" + for the loading of the firmware into the embedded + GPHY core of the SoC. +- lantiq,rcu : reference to the rcu syscon + +The GPHY firmware loader has a list of GPHY entries, one for each +embedded GPHY + +- reg : Offset of the GPHY firmware register in the RCU + register range +- resets : list of resets of the embedded GPHY +- reset-names : list of names of the resets + +Example: + +Ethernet switch on the VRX200 SoC: + +switch@e108000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "lantiq,xrx200-gswip"; + reg = < 0xe108000 0x3100 /* switch */ + 0xe10b100 0xd8 /* mdio */ + 0xe10b1d8 0x130 /* mii */ + >; + dsa,member = <0 0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "lan3"; + phy-mode = "rgmii"; + phy-handle = <&phy0>; + }; + + port@1 { + reg = <1>; + label = "lan4"; + phy-mode = "rgmii"; + phy-handle = <&phy1>; + }; + + port@2 { + reg = <2>; + label = "lan2"; + phy-mode = "internal"; + phy-handle = <&phy11>; + }; + + port@4 { + reg = <4>; + label = "lan1"; + phy-mode = "internal"; + phy-handle = <&phy13>; + }; + + port@5 { + reg = <5>; + label = "wan"; + phy-mode = "rgmii"; + phy-handle = <&phy5>; + }; + + port@6 { + reg = <0x6>; + label = "cpu"; + ethernet = <ð0>; + }; + }; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + compatible = "lantiq,xrx200-mdio"; + reg = <0>; + + phy0: ethernet-phy@0 { + reg = <0x0>; + }; + phy1: ethernet-phy@1 { + reg = <0x1>; + }; + phy5: ethernet-phy@5 { + reg = <0x5>; + }; + phy11: ethernet-phy@11 { + reg = <0x11>; + }; + phy13: ethernet-phy@13 { + reg = <0x13>; + }; + }; + + gphy-fw { + compatible = "lantiq,xrx200-gphy-fw", "lantiq,gphy-fw"; + lantiq,rcu = <&rcu0>; + #address-cells = <1>; + #size-cells = <0>; + + gphy@20 { + reg = <0x20>; + + resets = <&reset0 31 30>; + reset-names = "gphy"; + }; + + gphy@68 { + reg = <0x68>; + + resets = <&reset0 29 28>; + reset-names = "gphy"; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/net/lantiq,xrx200-net.txt b/Documentation/devicetree/bindings/net/lantiq,xrx200-net.txt new file mode 100644 index 000000000000..5ff5e68bbbb6 --- /dev/null +++ b/Documentation/devicetree/bindings/net/lantiq,xrx200-net.txt @@ -0,0 +1,21 @@ +Lantiq xRX200 GSWIP PMAC Ethernet driver +================================== + +Required properties: + +- compatible : "lantiq,xrx200-net" for the PMAC of the embedded + : GSWIP in the xXR200 +- reg : memory range of the PMAC core inside of the GSWIP core +- interrupts : TX and RX DMA interrupts. Use interrupt-names "tx" for + : the TX interrupt and "rx" for the RX interrupt. + +Example: + +ethernet@e10b308 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "lantiq,xrx200-net"; + reg = <0xe10b308 0xcf8>; + interrupts = <73>, <72>; + interrupt-names = "tx", "rx"; +}; diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt index fc019df0d863..b78397669320 100644 --- a/Documentation/devicetree/bindings/net/marvell-pp2.txt +++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt @@ -31,7 +31,7 @@ required. Required properties (port): -- interrupts: interrupt for the port +- interrupts: interrupt(s) for the port - port-id: ID of the port from the MAC point of view - gop-port-id: only for marvell,armada-7k-pp2, ID of the port from the GOP (Group Of Ports) point of view. This ID is used to index the @@ -43,10 +43,12 @@ Optional properties (port): - marvell,loopback: port is loopback mode - phy: a phandle to a phy node defining the PHY address (as the reg property, a single integer). -- interrupt-names: if more than a single interrupt for rx is given, must - be the name associated to the interrupts listed. Valid - names are: "tx-cpu0", "tx-cpu1", "tx-cpu2", "tx-cpu3", - "rx-shared", "link". +- interrupt-names: if more than a single interrupt for is given, must be the + name associated to the interrupts listed. Valid names are: + "hifX", with X in [0..8], and "link". The names "tx-cpu0", + "tx-cpu1", "tx-cpu2", "tx-cpu3" and "rx-shared" are supported + for backward compatibility but shouldn't be used for new + additions. - marvell,system-controller: a phandle to the system controller. Example for marvell,armada-375-pp2: @@ -89,9 +91,14 @@ cpm_ethernet: ethernet@0 { <ICU_GRP_NSR 43 IRQ_TYPE_LEVEL_HIGH>, <ICU_GRP_NSR 47 IRQ_TYPE_LEVEL_HIGH>, <ICU_GRP_NSR 51 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 55 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2", - "tx-cpu3", "rx-shared"; + <ICU_GRP_NSR 55 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 59 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 63 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 67 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 71 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 129 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "hif0", "hif1", "hif2", "hif3", "hif4", + "hif5", "hif6", "hif7", "hif8", "link"; port-id = <0>; gop-port-id = <0>; }; @@ -101,9 +108,14 @@ cpm_ethernet: ethernet@0 { <ICU_GRP_NSR 44 IRQ_TYPE_LEVEL_HIGH>, <ICU_GRP_NSR 48 IRQ_TYPE_LEVEL_HIGH>, <ICU_GRP_NSR 52 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 56 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2", - "tx-cpu3", "rx-shared"; + <ICU_GRP_NSR 56 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 60 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 64 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 68 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 72 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 128 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "hif0", "hif1", "hif2", "hif3", "hif4", + "hif5", "hif6", "hif7", "hif8", "link"; port-id = <1>; gop-port-id = <2>; }; @@ -113,9 +125,14 @@ cpm_ethernet: ethernet@0 { <ICU_GRP_NSR 45 IRQ_TYPE_LEVEL_HIGH>, <ICU_GRP_NSR 49 IRQ_TYPE_LEVEL_HIGH>, <ICU_GRP_NSR 53 IRQ_TYPE_LEVEL_HIGH>, - <ICU_GRP_NSR 57 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2", - "tx-cpu3", "rx-shared"; + <ICU_GRP_NSR 57 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 61 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 65 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 69 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 73 IRQ_TYPE_LEVEL_HIGH>, + <ICU_GRP_NSR 127 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "hif0", "hif1", "hif2", "hif3", "hif4", + "hif5", "hif6", "hif7", "hif8", "link"; port-id = <2>; gop-port-id = <3>; }; diff --git a/Documentation/devicetree/bindings/net/mscc-ocelot.txt b/Documentation/devicetree/bindings/net/mscc-ocelot.txt index 0a84711abece..9e5c17d426ce 100644 --- a/Documentation/devicetree/bindings/net/mscc-ocelot.txt +++ b/Documentation/devicetree/bindings/net/mscc-ocelot.txt @@ -12,7 +12,6 @@ Required properties: - "sys" - "rew" - "qs" - - "hsio" - "qsys" - "ana" - "portX" with X from 0 to the number of last port index available on that @@ -45,7 +44,6 @@ Example: reg = <0x1010000 0x10000>, <0x1030000 0x10000>, <0x1080000 0x100>, - <0x10d0000 0x10000>, <0x11e0000 0x100>, <0x11f0000 0x100>, <0x1200000 0x100>, @@ -59,10 +57,9 @@ Example: <0x1280000 0x100>, <0x1800000 0x80000>, <0x1880000 0x10000>; - reg-names = "sys", "rew", "qs", "hsio", "port0", - "port1", "port2", "port3", "port4", "port5", - "port6", "port7", "port8", "port9", "port10", - "qsys", "ana"; + reg-names = "sys", "rew", "qs", "port0", "port1", "port2", + "port3", "port4", "port5", "port6", "port7", + "port8", "port9", "port10", "qsys", "ana"; interrupts = <21 22>; interrupt-names = "xtr", "inj"; diff --git a/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt b/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt index 0eedabe22cc3..5ff37c68c941 100644 --- a/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt +++ b/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt @@ -1,10 +1,5 @@ * Microsemi - vsc8531 Giga bit ethernet phy -Required properties: -- compatible : Should contain phy id as "ethernet-phy-idAAAA.BBBB" - The PHY device uses the binding described in - Documentation/devicetree/bindings/net/phy.txt - Optional properties: - vsc8531,vddmac : The vddmac in mV. Allowed values is listed in the first row of Table 1 (below). @@ -27,14 +22,16 @@ Optional properties: 'vddmac'. Default value is 0%. Ref: Table:1 - Edge rate change (below). -- vsc8531,led-0-mode : LED mode. Specify how the LED[0] should behave. - Allowed values are define in - "include/dt-bindings/net/mscc-phy-vsc8531.h". - Default value is VSC8531_LINK_1000_ACTIVITY (1). -- vsc8531,led-1-mode : LED mode. Specify how the LED[1] should behave. - Allowed values are define in +- vsc8531,led-[N]-mode : LED mode. Specify how the LED[N] should behave. + N depends on the number of LEDs supported by a + PHY. + Allowed values are defined in "include/dt-bindings/net/mscc-phy-vsc8531.h". - Default value is VSC8531_LINK_100_ACTIVITY (2). + Default values are VSC8531_LINK_1000_ACTIVITY (1), + VSC8531_LINK_100_ACTIVITY (2), + VSC8531_LINK_ACTIVITY (0) and + VSC8531_DUPLEX_COLLISION (8). + Table: 1 - Edge rate change ----------------------------------------------------------------| diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt index da249b7c406c..3530256a879c 100644 --- a/Documentation/devicetree/bindings/net/renesas,ravb.txt +++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt @@ -6,6 +6,7 @@ interface contains. Required properties: - compatible: Must contain one or more of the following: - "renesas,etheravb-r8a7743" for the R8A7743 SoC. + - "renesas,etheravb-r8a7744" for the R8A7744 SoC. - "renesas,etheravb-r8a7745" for the R8A7745 SoC. - "renesas,etheravb-r8a77470" for the R8A77470 SoC. - "renesas,etheravb-r8a7790" for the R8A7790 SoC. diff --git a/Documentation/devicetree/bindings/phy/phy-ocelot-serdes.txt b/Documentation/devicetree/bindings/phy/phy-ocelot-serdes.txt new file mode 100644 index 000000000000..332219860187 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/phy-ocelot-serdes.txt @@ -0,0 +1,43 @@ +Microsemi Ocelot SerDes muxing driver +------------------------------------- + +On Microsemi Ocelot, there is a handful of registers in HSIO address +space for setting up the SerDes to switch port muxing. + +A SerDes X can be "muxed" to work with switch port Y or Z for example. +One specific SerDes can also be used as a PCIe interface. + +Hence, a SerDes represents an interface, be it an Ethernet or a PCIe one. + +There are two kinds of SerDes: SERDES1G supports 10/100Mbps in +half/full-duplex and 1000Mbps in full-duplex mode while SERDES6G supports +10/100Mbps in half/full-duplex and 1000/2500Mbps in full-duplex mode. + +Also, SERDES6G number (aka "macro") 0 is the only interface supporting +QSGMII. + +This is a child of the HSIO syscon ("mscc,ocelot-hsio", see +Documentation/devicetree/bindings/mips/mscc.txt) on the Microsemi Ocelot. + +Required properties: + +- compatible: should be "mscc,vsc7514-serdes" +- #phy-cells : from the generic phy bindings, must be 2. + The first number defines the input port to use for a given + SerDes macro. The second defines the macro to use. They are + defined in dt-bindings/phy/phy-ocelot-serdes.h + +Example: + + serdes: serdes { + compatible = "mscc,vsc7514-serdes"; + #phy-cells = <2>; + }; + + ethernet { + port1 { + phy-handle = <&phy_foo>; + /* Link SERDES1G_5 to port1 */ + phys = <&serdes 1 SERDES1G_5>; + }; + }; diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt index 03c741602c6d..6d2dd8a31482 100644 --- a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt +++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt @@ -98,6 +98,12 @@ The property below is dependent on fsl,tdm-interface: usage: optional for tdm interface value type: <empty> Definition : Internal loopback connecting on TDM layer. +- fsl,hmask + usage: optional + Value type: <u16> + Definition: HDLC address recognition. Set to zero to disable + address filtering of packets: + fsl,hmask = /bits/ 16 <0x0000>; Example for tdm interface: diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index 02a323c43261..f4f2b5d6c8d8 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX @@ -94,8 +94,8 @@ gianfar.txt - Gianfar Ethernet Driver. i40e.txt - README for the Intel Ethernet Controller XL710 Driver (i40e). -i40evf.txt - - Short note on the Driver for the Intel(R) XL710 X710 Virtual Function +iavf.txt + - README for the Intel Ethernet Adaptive Virtual Function Driver (iavf). ieee802154.txt - Linux IEEE 802.15.4 implementation, API and drivers igb.txt @@ -198,8 +198,6 @@ tc-actions-env-rules.txt - rules for traffic control (tc) actions. timestamping.txt - overview of network packet timestamping variants. -tcp.txt - - short blurb on how TCP output takes place. tcp-thin.txt - kernel tuning options for low rate 'thin' TCP streams. team.txt diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst index ff929cfab4f4..4ae4f9d8f8fe 100644 --- a/Documentation/networking/af_xdp.rst +++ b/Documentation/networking/af_xdp.rst @@ -159,8 +159,8 @@ log2(2048) LSB of the addr will be masked off, meaning that 2048, 2050 and 3000 refers to the same chunk. -UMEM Completetion Ring -~~~~~~~~~~~~~~~~~~~~~~ +UMEM Completion Ring +~~~~~~~~~~~~~~~~~~~~ The Completion Ring is used transfer ownership of UMEM frames from kernel-space to user-space. Just like the Fill ring, UMEM indicies are diff --git a/Documentation/networking/devlink-params-bnxt.txt b/Documentation/networking/devlink-params-bnxt.txt new file mode 100644 index 000000000000..481aa303d5b4 --- /dev/null +++ b/Documentation/networking/devlink-params-bnxt.txt @@ -0,0 +1,18 @@ +enable_sriov [DEVICE, GENERIC] + Configuration mode: Permanent + +ignore_ari [DEVICE, GENERIC] + Configuration mode: Permanent + +msix_vec_per_pf_max [DEVICE, GENERIC] + Configuration mode: Permanent + +msix_vec_per_pf_min [DEVICE, GENERIC] + Configuration mode: Permanent + +gre_ver_check [DEVICE, DRIVER-SPECIFIC] + Generic Routing Encapsulation (GRE) version check will + be enabled in the device. If disabled, device skips + version checking for incoming packets. + Type: Boolean + Configuration mode: Permanent diff --git a/Documentation/networking/devlink-params.txt b/Documentation/networking/devlink-params.txt new file mode 100644 index 000000000000..ae444ffe73ac --- /dev/null +++ b/Documentation/networking/devlink-params.txt @@ -0,0 +1,42 @@ +Devlink configuration parameters +================================ +Following is the list of configuration parameters via devlink interface. +Each parameter can be generic or driver specific and are device level +parameters. + +Note that the driver-specific files should contain the generic params +they support to, with supported config modes. + +Each parameter can be set in different configuration modes: + runtime - set while driver is running, no reset required. + driverinit - applied while driver initializes, requires restart + driver by devlink reload command. + permanent - written to device's non-volatile memory, hard reset + required. + +Following is the list of parameters: +==================================== +enable_sriov [DEVICE, GENERIC] + Enable Single Root I/O Virtualisation (SRIOV) in + the device. + Type: Boolean + +ignore_ari [DEVICE, GENERIC] + Ignore Alternative Routing-ID Interpretation (ARI) + capability. If enabled, adapter will ignore ARI + capability even when platforms has the support + enabled and creates same number of partitions when + platform does not support ARI. + Type: Boolean + +msix_vec_per_pf_max [DEVICE, GENERIC] + Provides the maximum number of MSIX interrupts that + a device can create. Value is same across all + physical functions (PFs) in the device. + Type: u32 + +msix_vec_per_pf_min [DEVICE, GENERIC] + Provides the minimum number of MSIX interrupts required + for the device initialization. Value is same across all + physical functions (PFs) in the device. + Type: u32 diff --git a/drivers/staging/fsl-dpaa2/ethernet/ethernet-driver.rst b/Documentation/networking/dpaa2/ethernet-driver.rst index 90ec940749e8..90ec940749e8 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/ethernet-driver.rst +++ b/Documentation/networking/dpaa2/ethernet-driver.rst diff --git a/Documentation/networking/dpaa2/index.rst b/Documentation/networking/dpaa2/index.rst index 10bea113a7bc..67bd87fe6c53 100644 --- a/Documentation/networking/dpaa2/index.rst +++ b/Documentation/networking/dpaa2/index.rst @@ -7,3 +7,4 @@ DPAA2 Documentation overview dpio-driver + ethernet-driver diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index e6b4ebb2b243..2196b824e96c 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -203,11 +203,11 @@ opcodes as defined in linux/filter.h stand for: Instruction Addressing mode Description - ld 1, 2, 3, 4, 10 Load word into A + ld 1, 2, 3, 4, 12 Load word into A ldi 4 Load word into A ldh 1, 2 Load half-word into A ldb 1, 2 Load byte into A - ldx 3, 4, 5, 10 Load word into X + ldx 3, 4, 5, 12 Load word into X ldxi 4 Load word into X ldxb 5 Load byte into X @@ -216,14 +216,14 @@ opcodes as defined in linux/filter.h stand for: jmp 6 Jump to label ja 6 Jump to label - jeq 7, 8 Jump on A == k - jneq 8 Jump on A != k - jne 8 Jump on A != k - jlt 8 Jump on A < k - jle 8 Jump on A <= k - jgt 7, 8 Jump on A > k - jge 7, 8 Jump on A >= k - jset 7, 8 Jump on A & k + jeq 7, 8, 9, 10 Jump on A == <x> + jneq 9, 10 Jump on A != <x> + jne 9, 10 Jump on A != <x> + jlt 9, 10 Jump on A < <x> + jle 9, 10 Jump on A <= <x> + jgt 7, 8, 9, 10 Jump on A > <x> + jge 7, 8, 9, 10 Jump on A >= <x> + jset 7, 8, 9, 10 Jump on A & <x> add 0, 4 A + <x> sub 0, 4 A - <x> @@ -240,7 +240,7 @@ opcodes as defined in linux/filter.h stand for: tax Copy A into X txa Copy X into A - ret 4, 9 Return + ret 4, 11 Return The next table shows addressing formats from the 2nd column: @@ -254,9 +254,11 @@ The next table shows addressing formats from the 2nd column: 5 4*([k]&0xf) Lower nibble * 4 at byte offset k in the packet 6 L Jump label L 7 #k,Lt,Lf Jump to Lt if true, otherwise jump to Lf - 8 #k,Lt Jump to Lt if predicate is true - 9 a/%a Accumulator A - 10 extension BPF extension + 8 x/%x,Lt,Lf Jump to Lt if true, otherwise jump to Lf + 9 #k,Lt Jump to Lt if predicate is true + 10 x/%x,Lt Jump to Lt if predicate is true + 11 a/%a Accumulator A + 12 extension BPF extension The Linux kernel also has a couple of BPF extensions that are used along with the class of load instructions by "overloading" the k argument with @@ -1125,6 +1127,14 @@ pointer type. The types of pointers describe their base, as follows: PTR_TO_STACK Frame pointer. PTR_TO_PACKET skb->data. PTR_TO_PACKET_END skb->data + headlen; arithmetic forbidden. + PTR_TO_SOCKET Pointer to struct bpf_sock_ops, implicitly refcounted. + PTR_TO_SOCKET_OR_NULL + Either a pointer to a socket, or NULL; socket lookup + returns this type, which becomes a PTR_TO_SOCKET when + checked != NULL. PTR_TO_SOCKET is reference-counted, + so programs must release the reference through the + socket release function before the end of the program. + Arithmetic on these pointers is forbidden. However, a pointer may be offset from this base (as a result of pointer arithmetic), and this is tracked in two parts: the 'fixed offset' and 'variable offset'. The former is used when an exactly-known value (e.g. an immediate @@ -1171,6 +1181,13 @@ over the Ethernet header, then reads IHL and addes (IHL * 4), the resulting pointer will have a variable offset known to be 4n+2 for some n, so adding the 2 bytes (NET_IP_ALIGN) gives a 4-byte alignment and so word-sized accesses through that pointer are safe. +The 'id' field is also used on PTR_TO_SOCKET and PTR_TO_SOCKET_OR_NULL, common +to all copies of the pointer returned from a socket lookup. This has similar +behaviour to the handling for PTR_TO_MAP_VALUE_OR_NULL->PTR_TO_MAP_VALUE, but +it also handles reference tracking for the pointer. PTR_TO_SOCKET implicitly +represents a reference to the corresponding 'struct sock'. To ensure that the +reference is not leaked, it is imperative to NULL-check the reference and in +the non-NULL case, and pass the valid reference to the socket release function. Direct packet access -------------------- @@ -1444,6 +1461,55 @@ Error: 8: (7a) *(u64 *)(r0 +0) = 1 R0 invalid mem access 'imm' +Program that performs a socket lookup then sets the pointer to NULL without +checking it: +value: + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_MOV64_IMM(BPF_REG_3, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), +Error: + 0: (b7) r2 = 0 + 1: (63) *(u32 *)(r10 -8) = r2 + 2: (bf) r2 = r10 + 3: (07) r2 += -8 + 4: (b7) r3 = 4 + 5: (b7) r4 = 0 + 6: (b7) r5 = 0 + 7: (85) call bpf_sk_lookup_tcp#65 + 8: (b7) r0 = 0 + 9: (95) exit + Unreleased reference id=1, alloc_insn=7 + +Program that performs a socket lookup but does not NULL-check the returned +value: + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_MOV64_IMM(BPF_REG_3, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp), + BPF_EXIT_INSN(), +Error: + 0: (b7) r2 = 0 + 1: (63) *(u32 *)(r10 -8) = r2 + 2: (bf) r2 = r10 + 3: (07) r2 += -8 + 4: (b7) r3 = 4 + 5: (b7) r4 = 0 + 6: (b7) r5 = 0 + 7: (85) call bpf_sk_lookup_tcp#65 + 8: (95) exit + Unreleased reference id=1, alloc_insn=7 + Testing ------- diff --git a/Documentation/networking/i40evf.txt b/Documentation/networking/iavf.txt index e9b3035b95d0..cc902a2369d6 100644 --- a/Documentation/networking/i40evf.txt +++ b/Documentation/networking/iavf.txt @@ -2,7 +2,7 @@ Linux* Base Driver for Intel(R) Network Connection ================================================== Intel Ethernet Adaptive Virtual Function Linux driver. -Copyright(c) 2013-2017 Intel Corporation. +Copyright(c) 2013-2018 Intel Corporation. Contents ======== @@ -11,20 +11,21 @@ Contents - Known Issues/Troubleshooting - Support -This file describes the i40evf Linux* Base Driver. +This file describes the iavf Linux* Base Driver. This driver +was formerly called i40evf. -The i40evf driver supports the below mentioned virtual function +The iavf driver supports the below mentioned virtual function devices and can only be activated on kernels running the i40e or newer Physical Function (PF) driver compiled with CONFIG_PCI_IOV. -The i40evf driver requires CONFIG_PCI_MSI to be enabled. +The iavf driver requires CONFIG_PCI_MSI to be enabled. -The guest OS loading the i40evf driver must support MSI-X interrupts. +The guest OS loading the iavf driver must support MSI-X interrupts. Supported Hardware ================== Intel XL710 X710 Virtual Function -Intel Ethernet Adaptive Virtual Function Intel X722 Virtual Function +Intel Ethernet Adaptive Virtual Function Identifying Your Adapter ======================== @@ -32,7 +33,8 @@ Identifying Your Adapter For more information on how to identify your adapter, go to the Adapter & Driver ID Guide at: - http://support.intel.com/support/go/network/adapter/idguide.htm + https://www.intel.com/content/www/us/en/support/articles/000005584/network-and-i-o/ethernet-products.html + Known Issues/Troubleshooting ============================ diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 960de8fe3f40..163b5ff1073c 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1442,6 +1442,14 @@ max_hbh_length - INTEGER header. Default: INT_MAX (unlimited) +skip_notify_on_dev_down - BOOLEAN + Controls whether an RTM_DELROUTE message is generated for routes + removed when a device is taken down or deleted. IPv4 does not + generate this message; IPv6 does by default. Setting this sysctl + to true skips the message, making IPv4 and IPv6 on par in relying + on userspace caches to track link events and evict routes. + Default: false (generate message) + IPv6 Fragmentation: ip6frag_high_thresh - INTEGER diff --git a/Documentation/networking/netvsc.txt b/Documentation/networking/netvsc.txt index 92f5b31392fa..3bfa635bbbd5 100644 --- a/Documentation/networking/netvsc.txt +++ b/Documentation/networking/netvsc.txt @@ -45,6 +45,15 @@ Features like packets and significantly reduces CPU usage under heavy Rx load. + Large Receive Offload (LRO), or Receive Side Coalescing (RSC) + ------------------------------------------------------------- + The driver supports LRO/RSC in the vSwitch feature. It reduces the per packet + processing overhead by coalescing multiple TCP segments when possible. The + feature is enabled by default on VMs running on Windows Server 2019 and + later. It may be changed by ethtool command: + ethtool -K eth0 lro on + ethtool -K eth0 lro off + SR-IOV support -------------- Hyper-V supports SR-IOV as a hardware acceleration option. If SR-IOV diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt index b5407163d53b..605e00cdd6be 100644 --- a/Documentation/networking/rxrpc.txt +++ b/Documentation/networking/rxrpc.txt @@ -1069,6 +1069,31 @@ The kernel interface functions are as follows: This function may transmit a PING ACK. + (*) Get reply timestamp. + + bool rxrpc_kernel_get_reply_time(struct socket *sock, + struct rxrpc_call *call, + ktime_t *_ts) + + This allows the timestamp on the first DATA packet of the reply of a + client call to be queried, provided that it is still in the Rx ring. If + successful, the timestamp will be stored into *_ts and true will be + returned; false will be returned otherwise. + + (*) Get remote client epoch. + + u32 rxrpc_kernel_get_epoch(struct socket *sock, + struct rxrpc_call *call) + + This allows the epoch that's contained in packets of an incoming client + call to be queried. This value is returned. The function always + successful if the call is still in progress. It shouldn't be called once + the call has expired. Note that calling this on a local client call only + returns the local epoch. + + This value can be used to determine if the remote client has been + restarted as it shouldn't change otherwise. + ======================= CONFIGURABLE PARAMETERS diff --git a/Documentation/networking/tcp.txt b/Documentation/networking/tcp.txt deleted file mode 100644 index 9c7139d57e57..000000000000 --- a/Documentation/networking/tcp.txt +++ /dev/null @@ -1,101 +0,0 @@ -TCP protocol -============ - -Last updated: 3 June 2017 - -Contents -======== - -- Congestion control -- How the new TCP output machine [nyi] works - -Congestion control -================== - -The following variables are used in the tcp_sock for congestion control: -snd_cwnd The size of the congestion window -snd_ssthresh Slow start threshold. We are in slow start if - snd_cwnd is less than this. -snd_cwnd_cnt A counter used to slow down the rate of increase - once we exceed slow start threshold. -snd_cwnd_clamp This is the maximum size that snd_cwnd can grow to. -snd_cwnd_stamp Timestamp for when congestion window last validated. -snd_cwnd_used Used as a highwater mark for how much of the - congestion window is in use. It is used to adjust - snd_cwnd down when the link is limited by the - application rather than the network. - -As of 2.6.13, Linux supports pluggable congestion control algorithms. -A congestion control mechanism can be registered through functions in -tcp_cong.c. The functions used by the congestion control mechanism are -registered via passing a tcp_congestion_ops struct to -tcp_register_congestion_control. As a minimum, the congestion control -mechanism must provide a valid name and must implement either ssthresh, -cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook. - -Private data for a congestion control mechanism is stored in tp->ca_priv. -tcp_ca(tp) returns a pointer to this space. This is preallocated space - it -is important to check the size of your private data will fit this space, or -alternatively, space could be allocated elsewhere and a pointer to it could -be stored here. - -There are three kinds of congestion control algorithms currently: The -simplest ones are derived from TCP reno (highspeed, scalable) and just -provide an alternative congestion window calculation. More complex -ones like BIC try to look at other events to provide better -heuristics. There are also round trip time based algorithms like -Vegas and Westwood+. - -Good TCP congestion control is a complex problem because the algorithm -needs to maintain fairness and performance. Please review current -research and RFC's before developing new modules. - -The default congestion control mechanism is chosen based on the -DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default -value then you can set it using sysctl net.ipv4.tcp_congestion_control. The -module will be autoloaded if needed and you will get the expected protocol. If -you ask for an unknown congestion method, then the sysctl attempt will fail. - -If you remove a TCP congestion control module, then you will get the next -available one. Since reno cannot be built as a module, and cannot be -removed, it will always be available. - -How the new TCP output machine [nyi] works. -=========================================== - -Data is kept on a single queue. The skb->users flag tells us if the frame is -one that has been queued already. To add a frame we throw it on the end. Ack -walks down the list from the start. - -We keep a set of control flags - - - sk->tcp_pend_event - - TCP_PEND_ACK Ack needed - TCP_ACK_NOW Needed now - TCP_WINDOW Window update check - TCP_WINZERO Zero probing - - - sk->transmit_queue The transmission frame begin - sk->transmit_new First new frame pointer - sk->transmit_end Where to add frames - - sk->tcp_last_tx_ack Last ack seen - sk->tcp_dup_ack Dup ack count for fast retransmit - - -Frames are queued for output by tcp_write. We do our best to send the frames -off immediately if possible, but otherwise queue and compute the body -checksum in the copy. - -When a write is done we try to clear any pending events and piggy back them. -If the window is full we queue full sized frames. On the first timeout in -zero window we split this. - -On a timer we walk the retransmit list to send any retransmits, update the -backoff timers etc. A change of route table stamp causes a change of header -and recompute. We add any new tcp level headers and refinish the checksum -before sending. - diff --git a/Documentation/networking/xfrm_device.txt b/Documentation/networking/xfrm_device.txt index 50c34ca65efe..267f55b5f54a 100644 --- a/Documentation/networking/xfrm_device.txt +++ b/Documentation/networking/xfrm_device.txt @@ -68,6 +68,10 @@ and an indication of whether it is for Rx or Tx. The driver should - verify the algorithm is supported for offloads - store the SA information (key, salt, target-ip, protocol, etc) - enable the HW offload of the SA + - return status value: + 0 success + -EOPNETSUPP offload not supported, try SW IPsec + other fail the request The driver can also set an offload_handle in the SA, an opaque void pointer that can be used to convey context into the fast-path offload requests. diff --git a/MAINTAINERS b/MAINTAINERS index 6ac000cc006d..6d5161def3f3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4527,9 +4527,13 @@ F: drivers/soc/fsl/dpio DPAA2 ETHERNET DRIVER M: Ioana Radulescu <ruxandra.radulescu@nxp.com> -L: linux-kernel@vger.kernel.org +L: netdev@vger.kernel.org S: Maintained -F: drivers/staging/fsl-dpaa2/ethernet +F: drivers/net/ethernet/freescale/dpaa2/dpaa2-eth* +F: drivers/net/ethernet/freescale/dpaa2/dpni* +F: drivers/net/ethernet/freescale/dpaa2/dpkg.h +F: drivers/net/ethernet/freescale/dpaa2/Makefile +F: drivers/net/ethernet/freescale/dpaa2/Kconfig DPAA2 ETHERNET SWITCH DRIVER M: Ioana Radulescu <ruxandra.radulescu@nxp.com> @@ -4540,9 +4544,10 @@ F: drivers/staging/fsl-dpaa2/ethsw DPAA2 PTP CLOCK DRIVER M: Yangbo Lu <yangbo.lu@nxp.com> -L: linux-kernel@vger.kernel.org +L: netdev@vger.kernel.org S: Maintained -F: drivers/staging/fsl-dpaa2/rtc +F: drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp* +F: drivers/net/ethernet/freescale/dpaa2/dprtc* DPT_I2O SCSI RAID DRIVER M: Adaptec OEM Raid Solutions <aacraid@microsemi.com> @@ -7348,7 +7353,7 @@ F: Documentation/networking/ixgb.txt F: Documentation/networking/ixgbe.txt F: Documentation/networking/ixgbevf.txt F: Documentation/networking/i40e.txt -F: Documentation/networking/i40evf.txt +F: Documentation/networking/iavf.txt F: Documentation/networking/ice.txt F: drivers/net/ethernet/intel/ F: drivers/net/ethernet/intel/*/ @@ -8183,6 +8188,15 @@ S: Maintained F: net/l3mdev F: include/net/l3mdev.h +LANTIQ / INTEL Ethernet drivers +M: Hauke Mehrtens <hauke@hauke-m.de> +L: netdev@vger.kernel.org +S: Maintained +F: net/dsa/tag_gswip.c +F: drivers/net/ethernet/lantiq_xrx200.c +F: drivers/net/dsa/lantiq_pce.h +F: drivers/net/dsa/lantiq_gswip.c + LANTIQ MIPS ARCHITECTURE M: John Crispin <john@phrozen.org> L: linux-mips@linux-mips.org @@ -8743,7 +8757,7 @@ M: Vivien Didelot <vivien.didelot@savoirfairelinux.com> L: netdev@vger.kernel.org S: Maintained F: drivers/net/dsa/mv88e6xxx/ -F: linux/platform_data/mv88e6xxx.h +F: include/linux/platform_data/mv88e6xxx.h F: Documentation/devicetree/bindings/net/dsa/marvell.txt MARVELL ARMADA DRM SUPPORT @@ -8833,6 +8847,15 @@ S: Supported F: drivers/mmc/host/sdhci-xenon* F: Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt +MARVELL OCTEONTX2 RVU ADMIN FUNCTION DRIVER +M: Sunil Goutham <sgoutham@marvell.com> +M: Linu Cherian <lcherian@marvell.com> +M: Geetha sowjanya <gakula@marvell.com> +M: Jerin Jacob <jerinj@marvell.com> +L: netdev@vger.kernel.org +S: Supported +F: drivers/net/ethernet/marvell/octeontx2/af/ + MATROX FRAMEBUFFER DRIVER L: linux-fbdev@vger.kernel.org S: Orphan diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi index f7eb612b46ba..8ce317c5b9ed 100644 --- a/arch/mips/boot/dts/mscc/ocelot.dtsi +++ b/arch/mips/boot/dts/mscc/ocelot.dtsi @@ -107,7 +107,6 @@ reg = <0x1010000 0x10000>, <0x1030000 0x10000>, <0x1080000 0x100>, - <0x10d0000 0x10000>, <0x11e0000 0x100>, <0x11f0000 0x100>, <0x1200000 0x100>, @@ -121,10 +120,10 @@ <0x1280000 0x100>, <0x1800000 0x80000>, <0x1880000 0x10000>; - reg-names = "sys", "rew", "qs", "hsio", "port0", - "port1", "port2", "port3", "port4", "port5", - "port6", "port7", "port8", "port9", "port10", - "qsys", "ana"; + reg-names = "sys", "rew", "qs", "port0", "port1", + "port2", "port3", "port4", "port5", "port6", + "port7", "port8", "port9", "port10", "qsys", + "ana"; interrupts = <21 22>; interrupt-names = "xtr", "inj"; @@ -231,5 +230,15 @@ pinctrl-0 = <&miim1>; status = "disabled"; }; + + hsio: syscon@10d0000 { + compatible = "mscc,ocelot-hsio", "syscon", "simple-mfd"; + reg = <0x10d0000 0x10000>; + + serdes: serdes { + compatible = "mscc,vsc7514-serdes"; + #phy-cells = <2>; + }; + }; }; }; diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c index 664f2f7f55c1..982859f2b2a3 100644 --- a/arch/mips/lantiq/xway/dma.c +++ b/arch/mips/lantiq/xway/dma.c @@ -106,7 +106,6 @@ ltq_dma_open(struct ltq_dma_channel *ch) spin_lock_irqsave(<q_dma_lock, flag); ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL); - ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); spin_unlock_irqrestore(<q_dma_lock, flag); } EXPORT_SYMBOL_GPL(ltq_dma_open); diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index e0af39b33e28..fe25c99089b7 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -505,7 +505,7 @@ void __init ltq_soc_init(void) clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI); clkdev_add_pmu("1a800000.pcie", "pdi", 1, 1, PMU1_PCIE2_PDI); clkdev_add_pmu("1a800000.pcie", "ctl", 1, 1, PMU1_PCIE2_CTL); - clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP); + clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP); clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); } else if (of_machine_is_compatible("lantiq,ar10")) { @@ -513,11 +513,11 @@ void __init ltq_soc_init(void) ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz()); clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1); - clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | + clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP | PMU_PPE_TC); clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); - clkdev_add_pmu("1f203020.gphy", NULL, 1, 0, PMU_GPHY); - clkdev_add_pmu("1f203068.gphy", NULL, 1, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); @@ -536,12 +536,12 @@ void __init ltq_soc_init(void) clkdev_add_pmu(NULL, "ahb", 1, 0, PMU_AHBM | PMU_AHBS); clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); - clkdev_add_pmu("1e108000.eth", NULL, 0, 0, + clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM | PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 | PMU_PPE_QSB | PMU_PPE_TOP); - clkdev_add_pmu("1f203020.gphy", NULL, 0, 0, PMU_GPHY); - clkdev_add_pmu("1f203068.gphy", NULL, 0, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index 99a38115b0a8..40d6ddbf1d5e 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -106,7 +106,6 @@ static const struct atmdev_ops fore200e_ops; -static const struct fore200e_bus fore200e_bus[]; static LIST_HEAD(fore200e_boards); @@ -183,10 +182,9 @@ fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, i alignment = 0; chunk->alloc_size = size + alignment; - chunk->align_size = size; chunk->direction = direction; - chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA); + chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL); if (chunk->alloc_addr == NULL) return -ENOMEM; @@ -195,8 +193,12 @@ fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, i chunk->align_addr = chunk->alloc_addr + offset; - chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction); - + chunk->dma_addr = dma_map_single(fore200e->dev, chunk->align_addr, + size, direction); + if (dma_mapping_error(fore200e->dev, chunk->dma_addr)) { + kfree(chunk->alloc_addr); + return -ENOMEM; + } return 0; } @@ -206,11 +208,39 @@ fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, i static void fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk) { - fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction); - + dma_unmap_single(fore200e->dev, chunk->dma_addr, chunk->dma_size, + chunk->direction); kfree(chunk->alloc_addr); } +/* + * Allocate a DMA consistent chunk of memory intended to act as a communication + * mechanism (to hold descriptors, status, queues, etc.) shared by the driver + * and the adapter. + */ +static int +fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk, + int size, int nbr, int alignment) +{ + /* returned chunks are page-aligned */ + chunk->alloc_size = size * nbr; + chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size, + &chunk->dma_addr, GFP_KERNEL); + if (!chunk->alloc_addr) + return -ENOMEM; + chunk->align_addr = chunk->alloc_addr; + return 0; +} + +/* + * Free a DMA consistent chunk of memory. + */ +static void +fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) +{ + dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr, + chunk->dma_addr); +} static void fore200e_spin(int msecs) @@ -303,10 +333,10 @@ fore200e_uninit_bs_queue(struct fore200e* fore200e) struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block; if (status->alloc_addr) - fore200e->bus->dma_chunk_free(fore200e, status); + fore200e_dma_chunk_free(fore200e, status); if (rbd_block->alloc_addr) - fore200e->bus->dma_chunk_free(fore200e, rbd_block); + fore200e_dma_chunk_free(fore200e, rbd_block); } } } @@ -372,17 +402,17 @@ fore200e_shutdown(struct fore200e* fore200e) /* fall through */ case FORE200E_STATE_INIT_RXQ: - fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status); - fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); + fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status); + fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); /* fall through */ case FORE200E_STATE_INIT_TXQ: - fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status); - fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd); + fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status); + fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd); /* fall through */ case FORE200E_STATE_INIT_CMDQ: - fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status); + fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status); /* fall through */ case FORE200E_STATE_INITIALIZE: @@ -429,81 +459,6 @@ static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr) writel(cpu_to_le32(val), addr); } - -static u32 -fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction) -{ - u32 dma_addr = dma_map_single(&((struct pci_dev *) fore200e->bus_dev)->dev, virt_addr, size, direction); - - DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n", - virt_addr, size, direction, dma_addr); - - return dma_addr; -} - - -static void -fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction) -{ - DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n", - dma_addr, size, direction); - - dma_unmap_single(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction); -} - - -static void -fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction) -{ - DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); - - dma_sync_single_for_cpu(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction); -} - -static void -fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction) -{ - DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); - - dma_sync_single_for_device(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction); -} - - -/* allocate a DMA consistent chunk of memory intended to act as a communication mechanism - (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */ - -static int -fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, - int size, int nbr, int alignment) -{ - /* returned chunks are page-aligned */ - chunk->alloc_size = size * nbr; - chunk->alloc_addr = dma_alloc_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev, - chunk->alloc_size, - &chunk->dma_addr, - GFP_KERNEL); - - if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) - return -ENOMEM; - - chunk->align_addr = chunk->alloc_addr; - - return 0; -} - - -/* free a DMA consistent chunk of memory */ - -static void -fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) -{ - dma_free_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev, - chunk->alloc_size, - chunk->alloc_addr, - chunk->dma_addr); -} - - static int fore200e_pca_irq_check(struct fore200e* fore200e) { @@ -571,7 +526,7 @@ fore200e_pca_unmap(struct fore200e* fore200e) static int fore200e_pca_configure(struct fore200e *fore200e) { - struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev; + struct pci_dev *pci_dev = to_pci_dev(fore200e->dev); u8 master_ctrl, latency; DPRINTK(2, "device %s being configured\n", fore200e->name); @@ -623,7 +578,10 @@ fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom) opcode.opcode = OPCODE_GET_PROM; opcode.pad = 0; - prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE); + prom_dma = dma_map_single(fore200e->dev, prom, sizeof(struct prom_data), + DMA_FROM_DEVICE); + if (dma_mapping_error(fore200e->dev, prom_dma)) + return -ENOMEM; fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr); @@ -635,7 +593,7 @@ fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom) *entry->status = STATUS_FREE; - fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE); + dma_unmap_single(fore200e->dev, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE); if (ok == 0) { printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name); @@ -658,15 +616,31 @@ fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom) static int fore200e_pca_proc_read(struct fore200e* fore200e, char *page) { - struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev; + struct pci_dev *pci_dev = to_pci_dev(fore200e->dev); return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n", pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); } +static const struct fore200e_bus fore200e_pci_ops = { + .model_name = "PCA-200E", + .proc_name = "pca200e", + .descr_alignment = 32, + .buffer_alignment = 4, + .status_alignment = 32, + .read = fore200e_pca_read, + .write = fore200e_pca_write, + .configure = fore200e_pca_configure, + .map = fore200e_pca_map, + .reset = fore200e_pca_reset, + .prom_read = fore200e_pca_prom_read, + .unmap = fore200e_pca_unmap, + .irq_check = fore200e_pca_irq_check, + .irq_ack = fore200e_pca_irq_ack, + .proc_read = fore200e_pca_proc_read, +}; #endif /* CONFIG_PCI */ - #ifdef CONFIG_SBUS static u32 fore200e_sba_read(volatile u32 __iomem *addr) @@ -679,78 +653,6 @@ static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr) sbus_writel(val, addr); } -static u32 fore200e_sba_dma_map(struct fore200e *fore200e, void* virt_addr, int size, int direction) -{ - struct platform_device *op = fore200e->bus_dev; - u32 dma_addr; - - dma_addr = dma_map_single(&op->dev, virt_addr, size, direction); - - DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n", - virt_addr, size, direction, dma_addr); - - return dma_addr; -} - -static void fore200e_sba_dma_unmap(struct fore200e *fore200e, u32 dma_addr, int size, int direction) -{ - struct platform_device *op = fore200e->bus_dev; - - DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n", - dma_addr, size, direction); - - dma_unmap_single(&op->dev, dma_addr, size, direction); -} - -static void fore200e_sba_dma_sync_for_cpu(struct fore200e *fore200e, u32 dma_addr, int size, int direction) -{ - struct platform_device *op = fore200e->bus_dev; - - DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); - - dma_sync_single_for_cpu(&op->dev, dma_addr, size, direction); -} - -static void fore200e_sba_dma_sync_for_device(struct fore200e *fore200e, u32 dma_addr, int size, int direction) -{ - struct platform_device *op = fore200e->bus_dev; - - DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); - - dma_sync_single_for_device(&op->dev, dma_addr, size, direction); -} - -/* Allocate a DVMA consistent chunk of memory intended to act as a communication mechanism - * (to hold descriptors, status, queues, etc.) shared by the driver and the adapter. - */ -static int fore200e_sba_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk, - int size, int nbr, int alignment) -{ - struct platform_device *op = fore200e->bus_dev; - - chunk->alloc_size = chunk->align_size = size * nbr; - - /* returned chunks are page-aligned */ - chunk->alloc_addr = dma_alloc_coherent(&op->dev, chunk->alloc_size, - &chunk->dma_addr, GFP_ATOMIC); - - if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) - return -ENOMEM; - - chunk->align_addr = chunk->alloc_addr; - - return 0; -} - -/* free a DVMA consistent chunk of memory */ -static void fore200e_sba_dma_chunk_free(struct fore200e *fore200e, struct chunk *chunk) -{ - struct platform_device *op = fore200e->bus_dev; - - dma_free_coherent(&op->dev, chunk->alloc_size, - chunk->alloc_addr, chunk->dma_addr); -} - static void fore200e_sba_irq_enable(struct fore200e *fore200e) { u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; @@ -777,7 +679,7 @@ static void fore200e_sba_reset(struct fore200e *fore200e) static int __init fore200e_sba_map(struct fore200e *fore200e) { - struct platform_device *op = fore200e->bus_dev; + struct platform_device *op = to_platform_device(fore200e->dev); unsigned int bursts; /* gain access to the SBA specific registers */ @@ -807,7 +709,7 @@ static int __init fore200e_sba_map(struct fore200e *fore200e) static void fore200e_sba_unmap(struct fore200e *fore200e) { - struct platform_device *op = fore200e->bus_dev; + struct platform_device *op = to_platform_device(fore200e->dev); of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH); of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH); @@ -823,7 +725,7 @@ static int __init fore200e_sba_configure(struct fore200e *fore200e) static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom) { - struct platform_device *op = fore200e->bus_dev; + struct platform_device *op = to_platform_device(fore200e->dev); const u8 *prop; int len; @@ -847,7 +749,7 @@ static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_ static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page) { - struct platform_device *op = fore200e->bus_dev; + struct platform_device *op = to_platform_device(fore200e->dev); const struct linux_prom_registers *regs; regs = of_get_property(op->dev.of_node, "reg", NULL); @@ -855,8 +757,26 @@ static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page) return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", (regs ? regs->which_io : 0), op->dev.of_node->name); } -#endif /* CONFIG_SBUS */ +static const struct fore200e_bus fore200e_sbus_ops = { + .model_name = "SBA-200E", + .proc_name = "sba200e", + .descr_alignment = 32, + .buffer_alignment = 64, + .status_alignment = 32, + .read = fore200e_sba_read, + .write = fore200e_sba_write, + .configure = fore200e_sba_configure, + .map = fore200e_sba_map, + .reset = fore200e_sba_reset, + .prom_read = fore200e_sba_prom_read, + .unmap = fore200e_sba_unmap, + .irq_enable = fore200e_sba_irq_enable, + .irq_check = fore200e_sba_irq_check, + .irq_ack = fore200e_sba_irq_ack, + .proc_read = fore200e_sba_proc_read, +}; +#endif /* CONFIG_SBUS */ static void fore200e_tx_irq(struct fore200e* fore200e) @@ -884,7 +804,7 @@ fore200e_tx_irq(struct fore200e* fore200e) kfree(entry->data); /* remove DMA mapping */ - fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length, + dma_unmap_single(fore200e->dev, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length, DMA_TO_DEVICE); vc_map = entry->vc_map; @@ -1105,12 +1025,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); /* Make device DMA transfer visible to CPU. */ - fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(fore200e->dev, buffer->data.dma_addr, + rpd->rsd[i].length, DMA_FROM_DEVICE); skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length); /* Now let the device get at it again. */ - fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE); + dma_sync_single_for_device(fore200e->dev, buffer->data.dma_addr, + rpd->rsd[i].length, DMA_FROM_DEVICE); } DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize); @@ -1611,7 +1533,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) } if (tx_copy) { - data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA); + data = kmalloc(tx_len, GFP_ATOMIC); if (data == NULL) { if (vcc->pop) { vcc->pop(vcc, skb); @@ -1679,7 +1601,13 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) entry->data = tx_copy ? data : NULL; tpd = entry->tpd; - tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE); + tpd->tsd[ 0 ].buffer = dma_map_single(fore200e->dev, data, tx_len, + DMA_TO_DEVICE); + if (dma_mapping_error(fore200e->dev, tpd->tsd[0].buffer)) { + if (tx_copy) + kfree(data); + return -ENOMEM; + } tpd->tsd[ 0 ].length = tx_len; FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX); @@ -1747,13 +1675,15 @@ fore200e_getstats(struct fore200e* fore200e) u32 stats_dma_addr; if (fore200e->stats == NULL) { - fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA); + fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL); if (fore200e->stats == NULL) return -ENOMEM; } - stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats, - sizeof(struct stats), DMA_FROM_DEVICE); + stats_dma_addr = dma_map_single(fore200e->dev, fore200e->stats, + sizeof(struct stats), DMA_FROM_DEVICE); + if (dma_mapping_error(fore200e->dev, stats_dma_addr)) + return -ENOMEM; FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); @@ -1770,7 +1700,7 @@ fore200e_getstats(struct fore200e* fore200e) *entry->status = STATUS_FREE; - fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE); + dma_unmap_single(fore200e->dev, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE); if (ok == 0) { printk(FORE200E "unable to get statistics from device %s\n", fore200e->name); @@ -2049,7 +1979,7 @@ static int fore200e_irq_request(struct fore200e *fore200e) static int fore200e_get_esi(struct fore200e *fore200e) { - struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA); + struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL); int ok, i; if (!prom) @@ -2156,7 +2086,7 @@ static int fore200e_init_bs_queue(struct fore200e *fore200e) bsq = &fore200e->host_bsq[ scheme ][ magn ]; /* allocate and align the array of status words */ - if (fore200e->bus->dma_chunk_alloc(fore200e, + if (fore200e_dma_chunk_alloc(fore200e, &bsq->status, sizeof(enum status), QUEUE_SIZE_BS, @@ -2165,13 +2095,13 @@ static int fore200e_init_bs_queue(struct fore200e *fore200e) } /* allocate and align the array of receive buffer descriptors */ - if (fore200e->bus->dma_chunk_alloc(fore200e, + if (fore200e_dma_chunk_alloc(fore200e, &bsq->rbd_block, sizeof(struct rbd_block), QUEUE_SIZE_BS, fore200e->bus->descr_alignment) < 0) { - fore200e->bus->dma_chunk_free(fore200e, &bsq->status); + fore200e_dma_chunk_free(fore200e, &bsq->status); return -ENOMEM; } @@ -2212,7 +2142,7 @@ static int fore200e_init_rx_queue(struct fore200e *fore200e) DPRINTK(2, "receive queue is being initialized\n"); /* allocate and align the array of status words */ - if (fore200e->bus->dma_chunk_alloc(fore200e, + if (fore200e_dma_chunk_alloc(fore200e, &rxq->status, sizeof(enum status), QUEUE_SIZE_RX, @@ -2221,13 +2151,13 @@ static int fore200e_init_rx_queue(struct fore200e *fore200e) } /* allocate and align the array of receive PDU descriptors */ - if (fore200e->bus->dma_chunk_alloc(fore200e, + if (fore200e_dma_chunk_alloc(fore200e, &rxq->rpd, sizeof(struct rpd), QUEUE_SIZE_RX, fore200e->bus->descr_alignment) < 0) { - fore200e->bus->dma_chunk_free(fore200e, &rxq->status); + fore200e_dma_chunk_free(fore200e, &rxq->status); return -ENOMEM; } @@ -2271,7 +2201,7 @@ static int fore200e_init_tx_queue(struct fore200e *fore200e) DPRINTK(2, "transmit queue is being initialized\n"); /* allocate and align the array of status words */ - if (fore200e->bus->dma_chunk_alloc(fore200e, + if (fore200e_dma_chunk_alloc(fore200e, &txq->status, sizeof(enum status), QUEUE_SIZE_TX, @@ -2280,13 +2210,13 @@ static int fore200e_init_tx_queue(struct fore200e *fore200e) } /* allocate and align the array of transmit PDU descriptors */ - if (fore200e->bus->dma_chunk_alloc(fore200e, + if (fore200e_dma_chunk_alloc(fore200e, &txq->tpd, sizeof(struct tpd), QUEUE_SIZE_TX, fore200e->bus->descr_alignment) < 0) { - fore200e->bus->dma_chunk_free(fore200e, &txq->status); + fore200e_dma_chunk_free(fore200e, &txq->status); return -ENOMEM; } @@ -2333,7 +2263,7 @@ static int fore200e_init_cmd_queue(struct fore200e *fore200e) DPRINTK(2, "command queue is being initialized\n"); /* allocate and align the array of status words */ - if (fore200e->bus->dma_chunk_alloc(fore200e, + if (fore200e_dma_chunk_alloc(fore200e, &cmdq->status, sizeof(enum status), QUEUE_SIZE_CMD, @@ -2487,25 +2417,15 @@ static void fore200e_monitor_puts(struct fore200e *fore200e, char *str) static int fore200e_load_and_start_fw(struct fore200e *fore200e) { const struct firmware *firmware; - struct device *device; const struct fw_header *fw_header; const __le32 *fw_data; u32 fw_size; u32 __iomem *load_addr; char buf[48]; - int err = -ENODEV; - - if (strcmp(fore200e->bus->model_name, "PCA-200E") == 0) - device = &((struct pci_dev *) fore200e->bus_dev)->dev; -#ifdef CONFIG_SBUS - else if (strcmp(fore200e->bus->model_name, "SBA-200E") == 0) - device = &((struct platform_device *) fore200e->bus_dev)->dev; -#endif - else - return err; + int err; sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT); - if ((err = request_firmware(&firmware, buf, device)) < 0) { + if ((err = request_firmware(&firmware, buf, fore200e->dev)) < 0) { printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name); return err; } @@ -2631,7 +2551,6 @@ static const struct of_device_id fore200e_sba_match[]; static int fore200e_sba_probe(struct platform_device *op) { const struct of_device_id *match; - const struct fore200e_bus *bus; struct fore200e *fore200e; static int index = 0; int err; @@ -2639,18 +2558,17 @@ static int fore200e_sba_probe(struct platform_device *op) match = of_match_device(fore200e_sba_match, &op->dev); if (!match) return -EINVAL; - bus = match->data; fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); if (!fore200e) return -ENOMEM; - fore200e->bus = bus; - fore200e->bus_dev = op; + fore200e->bus = &fore200e_sbus_ops; + fore200e->dev = &op->dev; fore200e->irq = op->archdata.irqs[0]; fore200e->phys_base = op->resource[0].start; - sprintf(fore200e->name, "%s-%d", bus->model_name, index); + sprintf(fore200e->name, "SBA-200E-%d", index); err = fore200e_init(fore200e, &op->dev); if (err < 0) { @@ -2678,7 +2596,6 @@ static int fore200e_sba_remove(struct platform_device *op) static const struct of_device_id fore200e_sba_match[] = { { .name = SBA200E_PROM_NAME, - .data = (void *) &fore200e_bus[1], }, {}, }; @@ -2698,7 +2615,6 @@ static struct platform_driver fore200e_sba_driver = { static int fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent) { - const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data; struct fore200e* fore200e; int err = 0; static int index = 0; @@ -2719,20 +2635,19 @@ static int fore200e_pca_detect(struct pci_dev *pci_dev, goto out_disable; } - fore200e->bus = bus; - fore200e->bus_dev = pci_dev; + fore200e->bus = &fore200e_pci_ops; + fore200e->dev = &pci_dev->dev; fore200e->irq = pci_dev->irq; fore200e->phys_base = pci_resource_start(pci_dev, 0); - sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1); + sprintf(fore200e->name, "PCA-200E-%d", index - 1); pci_set_master(pci_dev); - printk(FORE200E "device %s found at 0x%lx, IRQ %s\n", - fore200e->bus->model_name, + printk(FORE200E "device PCA-200E found at 0x%lx, IRQ %s\n", fore200e->phys_base, fore200e_irq_itoa(fore200e->irq)); - sprintf(fore200e->name, "%s-%d", bus->model_name, index); + sprintf(fore200e->name, "PCA-200E-%d", index); err = fore200e_init(fore200e, &pci_dev->dev); if (err < 0) { @@ -2767,8 +2682,7 @@ static void fore200e_pca_remove_one(struct pci_dev *pci_dev) static const struct pci_device_id fore200e_pca_tbl[] = { - { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID, - 0, 0, (unsigned long) &fore200e_bus[0] }, + { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID }, { 0, } }; @@ -3108,8 +3022,7 @@ module_init(fore200e_module_init); module_exit(fore200e_module_cleanup); -static const struct atmdev_ops fore200e_ops = -{ +static const struct atmdev_ops fore200e_ops = { .open = fore200e_open, .close = fore200e_close, .ioctl = fore200e_ioctl, @@ -3121,53 +3034,6 @@ static const struct atmdev_ops fore200e_ops = .owner = THIS_MODULE }; - -static const struct fore200e_bus fore200e_bus[] = { -#ifdef CONFIG_PCI - { "PCA-200E", "pca200e", 32, 4, 32, - fore200e_pca_read, - fore200e_pca_write, - fore200e_pca_dma_map, - fore200e_pca_dma_unmap, - fore200e_pca_dma_sync_for_cpu, - fore200e_pca_dma_sync_for_device, - fore200e_pca_dma_chunk_alloc, - fore200e_pca_dma_chunk_free, - fore200e_pca_configure, - fore200e_pca_map, - fore200e_pca_reset, - fore200e_pca_prom_read, - fore200e_pca_unmap, - NULL, - fore200e_pca_irq_check, - fore200e_pca_irq_ack, - fore200e_pca_proc_read, - }, -#endif -#ifdef CONFIG_SBUS - { "SBA-200E", "sba200e", 32, 64, 32, - fore200e_sba_read, - fore200e_sba_write, - fore200e_sba_dma_map, - fore200e_sba_dma_unmap, - fore200e_sba_dma_sync_for_cpu, - fore200e_sba_dma_sync_for_device, - fore200e_sba_dma_chunk_alloc, - fore200e_sba_dma_chunk_free, - fore200e_sba_configure, - fore200e_sba_map, - fore200e_sba_reset, - fore200e_sba_prom_read, - fore200e_sba_unmap, - fore200e_sba_irq_enable, - fore200e_sba_irq_check, - fore200e_sba_irq_ack, - fore200e_sba_proc_read, - }, -#endif - {} -}; - MODULE_LICENSE("GPL"); #ifdef CONFIG_PCI #ifdef __LITTLE_ENDIAN__ diff --git a/drivers/atm/fore200e.h b/drivers/atm/fore200e.h index c8a02c8fba15..caf0ea6a328a 100644 --- a/drivers/atm/fore200e.h +++ b/drivers/atm/fore200e.h @@ -805,12 +805,6 @@ typedef struct fore200e_bus { int status_alignment; /* status words DMA alignment requirement */ u32 (*read)(volatile u32 __iomem *); void (*write)(u32, volatile u32 __iomem *); - u32 (*dma_map)(struct fore200e*, void*, int, int); - void (*dma_unmap)(struct fore200e*, u32, int, int); - void (*dma_sync_for_cpu)(struct fore200e*, u32, int, int); - void (*dma_sync_for_device)(struct fore200e*, u32, int, int); - int (*dma_chunk_alloc)(struct fore200e*, struct chunk*, int, int, int); - void (*dma_chunk_free)(struct fore200e*, struct chunk*); int (*configure)(struct fore200e*); int (*map)(struct fore200e*); void (*reset)(struct fore200e*); @@ -844,7 +838,7 @@ typedef struct fore200e { enum fore200e_state state; /* device state */ char name[16]; /* device name */ - void* bus_dev; /* bus-specific kernel data */ + struct device *dev; int irq; /* irq number */ unsigned long phys_base; /* physical base address */ void __iomem * virt_base; /* virtual base address */ diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index cbec9adc01c7..ae4aa02e4dc6 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -2689,11 +2689,10 @@ static void ns_poll(struct timer_list *unused) PRINTK("nicstar: Entering ns_poll().\n"); for (i = 0; i < num_cards; i++) { card = cards[i]; - if (spin_is_locked(&card->int_lock)) { + if (!spin_trylock_irqsave(&card->int_lock, flags)) { /* Probably it isn't worth spinning */ continue; } - spin_lock_irqsave(&card->int_lock, flags); stat_w = 0; stat_r = readl(card->membase + STAT); diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 3d7a5c149af3..1ad4991753bb 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -203,10 +203,11 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { } /* Terminating entry */ }; -static inline void ath3k_log_failed_loading(int err, int len, int size) +static inline void ath3k_log_failed_loading(int err, int len, int size, + int count) { - BT_ERR("Error in firmware loading err = %d, len = %d, size = %d", - err, len, size); + BT_ERR("Firmware loading err = %d, len = %d, size = %d, count = %d", + err, len, size, count); } #define USB_REQ_DFU_DNLOAD 1 @@ -257,7 +258,7 @@ static int ath3k_load_firmware(struct usb_device *udev, &len, 3000); if (err || (len != size)) { - ath3k_log_failed_loading(err, len, size); + ath3k_log_failed_loading(err, len, size, count); goto error; } @@ -356,7 +357,7 @@ static int ath3k_load_fwfile(struct usb_device *udev, err = usb_bulk_msg(udev, pipe, send_buf, size, &len, 3000); if (err || (len != size)) { - ath3k_log_failed_loading(err, len, size); + ath3k_log_failed_loading(err, len, size, count); kfree(send_buf); return err; } diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index 25b0cf952b91..54713833951a 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c @@ -448,7 +448,7 @@ static int bt3c_load_firmware(struct bt3c_info *info, { char *ptr = (char *) firmware; char b[9]; - unsigned int iobase, tmp; + unsigned int iobase, tmp, tn; unsigned long size, addr, fcs; int i, err = 0; @@ -490,7 +490,9 @@ static int bt3c_load_firmware(struct bt3c_info *info, memset(b, 0, sizeof(b)); for (tmp = 0, i = 0; i < size; i++) { memcpy(b, ptr + (i * 2) + 2, 2); - tmp += simple_strtol(b, NULL, 16); + if (kstrtouint(b, 16, &tn)) + return -EINVAL; + tmp += tn; } if (((tmp + fcs) & 0xff) != 0xff) { @@ -505,7 +507,8 @@ static int bt3c_load_firmware(struct bt3c_info *info, memset(b, 0, sizeof(b)); for (i = 0; i < (size - 4) / 2; i++) { memcpy(b, ptr + (i * 4) + 12, 4); - tmp = simple_strtoul(b, NULL, 16); + if (kstrtouint(b, 16, &tmp)) + return -EINVAL; bt3c_put(iobase, tmp); } } diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 99cde1f9467d..e3e4d929e74f 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -324,6 +324,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = { { 0x4103, "BCM4330B1" }, /* 002.001.003 */ { 0x410e, "BCM43341B0" }, /* 002.001.014 */ { 0x4406, "BCM4324B3" }, /* 002.004.006 */ + { 0x6109, "BCM4335C0" }, /* 003.001.009 */ { 0x610c, "BCM4354" }, /* 003.001.012 */ { 0x2122, "BCM4343A0" }, /* 001.001.034 */ { 0x2209, "BCM43430A1" }, /* 001.002.009 */ diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c index 60d1419590ba..3951f7b23840 100644 --- a/drivers/bluetooth/btrsi.c +++ b/drivers/bluetooth/btrsi.c @@ -21,8 +21,9 @@ #include <net/rsi_91x.h> #include <net/genetlink.h> -#define RSI_HEADROOM_FOR_BT_HAL 16 +#define RSI_DMA_ALIGN 8 #define RSI_FRAME_DESC_SIZE 16 +#define RSI_HEADROOM_FOR_BT_HAL (RSI_FRAME_DESC_SIZE + RSI_DMA_ALIGN) struct rsi_hci_adapter { void *priv; @@ -70,6 +71,16 @@ static int rsi_hci_send_pkt(struct hci_dev *hdev, struct sk_buff *skb) bt_cb(new_skb)->pkt_type = hci_skb_pkt_type(skb); kfree_skb(skb); skb = new_skb; + if (!IS_ALIGNED((unsigned long)skb->data, RSI_DMA_ALIGN)) { + u8 *skb_data = skb->data; + int skb_len = skb->len; + + skb_push(skb, RSI_DMA_ALIGN); + skb_pull(skb, PTR_ALIGN(skb->data, + RSI_DMA_ALIGN) - skb->data); + memmove(skb->data, skb_data, skb_len); + skb_trim(skb, skb_len); + } } return h_adapter->proto_ops->coex_send_pkt(h_adapter->priv, skb, diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 7f9ea8e4c1b2..41405de27d66 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -138,6 +138,13 @@ static const struct id_table ic_id_table[] = { .fw_name = "rtl_bt/rtl8761a_fw.bin", .cfg_name = "rtl_bt/rtl8761a_config" }, + /* 8822C with USB interface */ + { IC_INFO(RTL_ROM_LMP_8822B, 0xc), + .config_needed = false, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8822cu_fw.bin", + .cfg_name = "rtl_bt/rtl8822cu_config" }, + /* 8822B */ { IC_INFO(RTL_ROM_LMP_8822B, 0xb), .config_needed = true, @@ -206,7 +213,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev, unsigned char **_buf) { - const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 }; + static const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 }; struct rtl_epatch_header *epatch_info; unsigned char *buf; int i, len; @@ -228,6 +235,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, { RTL_ROM_LMP_8822B, 8 }, { RTL_ROM_LMP_8723B, 9 }, /* 8723D */ { RTL_ROM_LMP_8821A, 10 }, /* 8821C */ + { RTL_ROM_LMP_8822B, 13 }, /* 8822C */ }; min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index cd2e5cf14ea5..61cde1a7ec1b 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -3096,6 +3096,7 @@ static int btusb_probe(struct usb_interface *intf, hdev->set_diag = btintel_set_diag; hdev->set_bdaddr = btintel_set_bdaddr; set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks); } diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 2fee65886d50..9f1392fc7105 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -167,7 +167,8 @@ struct qca_serdev { }; static int qca_power_setup(struct hci_uart *hu, bool on); -static void qca_power_shutdown(struct hci_dev *hdev); +static void qca_power_shutdown(struct hci_uart *hu); +static int qca_power_off(struct hci_dev *hdev); static void __serial_clock_on(struct tty_struct *tty) { @@ -499,7 +500,6 @@ static int qca_open(struct hci_uart *hu) hu->priv = qca; if (hu->serdev) { - serdev_device_open(hu->serdev); qcadev = serdev_device_get_drvdata(hu->serdev); if (qcadev->btsoc_type != QCA_WCN3990) { @@ -609,11 +609,10 @@ static int qca_close(struct hci_uart *hu) if (hu->serdev) { qcadev = serdev_device_get_drvdata(hu->serdev); if (qcadev->btsoc_type == QCA_WCN3990) - qca_power_shutdown(hu->hdev); + qca_power_shutdown(hu); else gpiod_set_value_cansleep(qcadev->bt_en, 0); - serdev_device_close(hu->serdev); } kfree_skb(qca->rx_skb); @@ -1101,8 +1100,26 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) static int qca_wcn3990_init(struct hci_uart *hu) { struct hci_dev *hdev = hu->hdev; + struct qca_serdev *qcadev; int ret; + /* Check for vregs status, may be hci down has turned + * off the voltage regulator. + */ + qcadev = serdev_device_get_drvdata(hu->serdev); + if (!qcadev->bt_power->vregs_on) { + serdev_device_close(hu->serdev); + ret = qca_power_setup(hu, true); + if (ret) + return ret; + + ret = serdev_device_open(hu->serdev); + if (ret) { + bt_dev_err(hu->hdev, "failed to open port"); + return ret; + } + } + /* Forcefully enable wcn3990 to enter in to boot mode. */ host_set_baudrate(hu, 2400); ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE); @@ -1154,6 +1171,12 @@ static int qca_setup(struct hci_uart *hu) if (qcadev->btsoc_type == QCA_WCN3990) { bt_dev_info(hdev, "setting up wcn3990"); + + /* Enable NON_PERSISTENT_SETUP QUIRK to ensure to execute + * setup for every hci up. + */ + set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); + hu->hdev->shutdown = qca_power_off; ret = qca_wcn3990_init(hu); if (ret) return ret; @@ -1232,15 +1255,26 @@ static const struct qca_vreg_data qca_soc_data = { .num_vregs = 4, }; -static void qca_power_shutdown(struct hci_dev *hdev) +static void qca_power_shutdown(struct hci_uart *hu) { - struct hci_uart *hu = hci_get_drvdata(hdev); + struct serdev_device *serdev = hu->serdev; + unsigned char cmd = QCA_WCN3990_POWEROFF_PULSE; host_set_baudrate(hu, 2400); - qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE); + hci_uart_set_flow_control(hu, true); + serdev_device_write_buf(serdev, &cmd, sizeof(cmd)); + hci_uart_set_flow_control(hu, false); qca_power_setup(hu, false); } +static int qca_power_off(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + qca_power_shutdown(hu); + return 0; +} + static int qca_enable_regulator(struct qca_vreg vregs, struct regulator *regulator) { @@ -1413,7 +1447,7 @@ static void qca_serdev_remove(struct serdev_device *serdev) struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); if (qcadev->btsoc_type == QCA_WCN3990) - qca_power_shutdown(qcadev->serdev_hu.hdev); + qca_power_shutdown(&qcadev->serdev_hu); else clk_disable_unprepare(qcadev->susclk); diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index aa2543b3c286..c445aa9ac511 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -57,9 +57,10 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) { struct sk_buff *skb = hu->tx_skb; - if (!skb) - skb = hu->proto->dequeue(hu); - else + if (!skb) { + if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + skb = hu->proto->dequeue(hu); + } else hu->tx_skb = NULL; return skb; @@ -94,7 +95,7 @@ static void hci_uart_write_work(struct work_struct *work) hci_uart_tx_complete(hu, hci_skb_pkt_type(skb)); kfree_skb(skb); } - } while(test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)); + } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)); clear_bit(HCI_UART_SENDING, &hu->tx_state); } @@ -368,6 +369,7 @@ void hci_uart_unregister_device(struct hci_uart *hu) { struct hci_dev *hdev = hu->hdev; + clear_bit(HCI_UART_PROTO_READY, &hu->flags); hci_unregister_dev(hdev); hci_free_dev(hdev); diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c index 9bdb84dc225c..e96ffff61c3a 100644 --- a/drivers/infiniband/hw/nes/nes_mgt.c +++ b/drivers/infiniband/hw/nes/nes_mgt.c @@ -198,9 +198,9 @@ static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp if (skb) { /* Continue processing fpdu */ - if (skb->next == (struct sk_buff *)&nesqp->pau_list) + skb = skb_peek_next(skb, &nesqp->pau_list); + if (!skb) goto out; - skb = skb->next; processacks = false; } else { /* Starting a new one */ @@ -553,12 +553,10 @@ static void queue_fpdus(struct sk_buff *skb, struct nes_vnic *nesvnic, struct ne if (skb_queue_len(&nesqp->pau_list) == 0) { skb_queue_head(&nesqp->pau_list, skb); } else { - tmpskb = nesqp->pau_list.next; - while (tmpskb != (struct sk_buff *)&nesqp->pau_list) { + skb_queue_walk(&nesqp->pau_list, tmpskb) { cb = (struct nes_rskb_cb *)&tmpskb->cb[0]; if (before(seqnum, cb->seqnum)) break; - tmpskb = tmpskb->next; } skb_insert(tmpskb, skb, &nesqp->pau_list); } diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c index bc208557f783..c0cbee06bc21 100644 --- a/drivers/isdn/gigaset/asyncdata.c +++ b/drivers/isdn/gigaset/asyncdata.c @@ -65,7 +65,7 @@ static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf) cs->respdata[0] = 0; break; } - /* --v-- fall through --v-- */ + /* fall through */ case '\r': /* end of message line, pass to response handler */ if (cbytes >= MAX_RESP_SIZE) { @@ -100,7 +100,7 @@ static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf) goto exit; } /* quoted or not in DLE mode: treat as regular data */ - /* --v-- fall through --v-- */ + /* fall through */ default: /* append to line buffer if possible */ if (cbytes < MAX_RESP_SIZE) diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c index 1cfcea62aed9..182826e9d07c 100644 --- a/drivers/isdn/gigaset/ev-layer.c +++ b/drivers/isdn/gigaset/ev-layer.c @@ -1036,7 +1036,7 @@ static void handle_icall(struct cardstate *cs, struct bc_state *bcs, break; default: dev_err(cs->dev, "internal error: disposition=%d\n", retval); - /* --v-- fall through --v-- */ + /* fall through */ case ICALL_IGNORE: case ICALL_REJECT: /* hang up actively @@ -1319,7 +1319,7 @@ static void do_action(int action, struct cardstate *cs, cs->commands_pending = 1; break; } - /* bad cid: fall through */ + /* fall through - bad cid */ case ACT_FAILCID: cs->cur_at_seq = SEQ_NONE; channel = cs->curchannel; diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c index 97e00118ccfe..f9264ba0fe77 100644 --- a/drivers/isdn/gigaset/isocdata.c +++ b/drivers/isdn/gigaset/isocdata.c @@ -906,7 +906,7 @@ static void cmd_loop(unsigned char *src, int numbytes, struct inbuf_t *inbuf) cs->respdata[0] = 0; break; } - /* --v-- fall through --v-- */ + /* fall through */ case '\r': /* end of message line, pass to response handler */ if (cbytes >= MAX_RESP_SIZE) { diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c index 77debda2221b..6c336366128c 100644 --- a/drivers/isdn/hisax/amd7930_fn.c +++ b/drivers/isdn/hisax/amd7930_fn.c @@ -625,7 +625,7 @@ Amd7930_l1hw(struct PStack *st, int pr, void *arg) break; case (HW_RESET | REQUEST): spin_lock_irqsave(&cs->lock, flags); - if ((cs->dc.amd7930.ph_state == 8)) { + if (cs->dc.amd7930.ph_state == 8) { /* b-channels off, PH-AR cleared * change to F3 */ Amd7930_ph_command(cs, 0x20, "HW_RESET REQUEST"); //LMR1 bit 5 diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c index c4be1644f5bb..36eefaa3a7d9 100644 --- a/drivers/isdn/hisax/w6692.c +++ b/drivers/isdn/hisax/w6692.c @@ -72,7 +72,7 @@ W6692_new_ph(struct IsdnCardState *cs) case (W_L1CMD_RST): ph_command(cs, W_L1CMD_DRC); l1_msg(cs, HW_RESET | INDICATION, NULL); - /* fallthru */ + /* fall through */ case (W_L1IND_CD): l1_msg(cs, HW_DEACTIVATE | CONFIRM, NULL); break; @@ -624,7 +624,7 @@ W6692_l1hw(struct PStack *st, int pr, void *arg) break; case (HW_RESET | REQUEST): spin_lock_irqsave(&cs->lock, flags); - if ((cs->dc.w6692.ph_state == W_L1IND_DRD)) { + if (cs->dc.w6692.ph_state == W_L1IND_DRD) { ph_command(cs, W_L1CMD_ECK); spin_unlock_irqrestore(&cs->lock, flags); } else { diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 18c0a1281914..15d3ca37669a 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -236,8 +236,7 @@ mISDN_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) } done: - if (skb) - kfree_skb(skb); + kfree_skb(skb); release_sock(sk); return err; } diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c index d94dae216820..c7d05027a7a0 100644 --- a/drivers/net/can/rx-offload.c +++ b/drivers/net/can/rx-offload.c @@ -79,7 +79,7 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, int (*compare)(struct sk_buff *a, struct sk_buff *b)) { - struct sk_buff *pos, *insert = (struct sk_buff *)head; + struct sk_buff *pos, *insert = NULL; skb_queue_reverse_walk(head, pos) { const struct can_rx_offload_cb *cb_pos, *cb_new; @@ -99,8 +99,10 @@ static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buf insert = pos; break; } - - __skb_queue_after(head, insert, new); + if (!insert) + __skb_queue_head(head, new); + else + __skb_queue_after(head, insert, new); } static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b) diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index d3ce1e4cb4d3..71bb3aebded4 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -23,6 +23,14 @@ config NET_DSA_LOOP This enables support for a fake mock-up switch chip which exercises the DSA APIs. +config NET_DSA_LANTIQ_GSWIP + tristate "Lantiq / Intel GSWIP" + depends on HAS_IOMEM && NET_DSA + select NET_DSA_TAG_GSWIP + ---help--- + This enables support for the Lantiq / Intel GSWIP 2.1 found in + the xrx200 / VR9 SoC. + config NET_DSA_MT7530 tristate "Mediatek MT7530 Ethernet switch support" depends on NET_DSA diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 46c1cba91ffe..82e5d794c41f 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o ifdef CONFIG_NET_DSA_LOOP obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o endif +obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig index 2f988216dab9..d32469283f97 100644 --- a/drivers/net/dsa/b53/Kconfig +++ b/drivers/net/dsa/b53/Kconfig @@ -23,6 +23,7 @@ config B53_MDIO_DRIVER config B53_MMAP_DRIVER tristate "B53 MMAP connected switch driver" depends on B53 && HAS_IOMEM + default BCM63XX || BMIPS_GENERIC help Select to enable support for memory-mapped switches like the BCM63XX integrated switches. @@ -30,6 +31,15 @@ config B53_MMAP_DRIVER config B53_SRAB_DRIVER tristate "B53 SRAB connected switch driver" depends on B53 && HAS_IOMEM + depends on B53_SERDES || !B53_SERDES + default ARCH_BCM_IPROC help Select to enable support for memory-mapped Switch Register Access Bridge Registers (SRAB) like it is found on the BCM53010 + +config B53_SERDES + tristate "B53 SerDes support" + depends on B53 + default ARCH_BCM_NSP + help + Select to enable support for SerDes on e.g: Northstar Plus SoCs. diff --git a/drivers/net/dsa/b53/Makefile b/drivers/net/dsa/b53/Makefile index 4256fb42a4dd..b1be13023ae4 100644 --- a/drivers/net/dsa/b53/Makefile +++ b/drivers/net/dsa/b53/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_B53_SPI_DRIVER) += b53_spi.o obj-$(CONFIG_B53_MDIO_DRIVER) += b53_mdio.o obj-$(CONFIG_B53_MMAP_DRIVER) += b53_mmap.o obj-$(CONFIG_B53_SRAB_DRIVER) += b53_srab.o +obj-$(CONFIG_B53_SERDES) += b53_serdes.o diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index ad534b90ef21..0e4bbdcc614f 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -26,6 +26,7 @@ #include <linux/module.h> #include <linux/platform_data/b53.h> #include <linux/phy.h> +#include <linux/phylink.h> #include <linux/etherdevice.h> #include <linux/if_bridge.h> #include <net/dsa.h> @@ -502,8 +503,14 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) { struct b53_device *dev = ds->priv; unsigned int cpu_port = ds->ports[port].cpu_dp->index; + int ret = 0; u16 pvlan; + if (dev->ops->irq_enable) + ret = dev->ops->irq_enable(dev, port); + if (ret) + return ret; + /* Clear the Rx and Tx disable bits and set to no spanning tree */ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); @@ -536,6 +543,9 @@ void b53_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy) b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); + + if (dev->ops->irq_disable) + dev->ops->irq_disable(dev, port); } EXPORT_SYMBOL(b53_disable_port); @@ -755,6 +765,8 @@ static int b53_reset_switch(struct b53_device *priv) memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); + priv->serdes_lane = B53_INVALID_LANE; + return b53_switch_reset(priv); } @@ -938,33 +950,50 @@ static int b53_setup(struct dsa_switch *ds) return ret; } -static void b53_adjust_link(struct dsa_switch *ds, int port, - struct phy_device *phydev) +static void b53_force_link(struct b53_device *dev, int port, int link) { - struct b53_device *dev = ds->priv; - struct ethtool_eee *p = &dev->ports[port].eee; - u8 rgmii_ctrl = 0, reg = 0, off; - - if (!phy_is_pseudo_fixed_link(phydev)) - return; + u8 reg, val, off; /* Override the port settings */ if (port == dev->cpu_port) { off = B53_PORT_OVERRIDE_CTRL; - reg = PORT_OVERRIDE_EN; + val = PORT_OVERRIDE_EN; } else { off = B53_GMII_PORT_OVERRIDE_CTRL(port); - reg = GMII_PO_EN; + val = GMII_PO_EN; } - /* Set the link UP */ - if (phydev->link) + b53_read8(dev, B53_CTRL_PAGE, off, ®); + reg |= val; + if (link) reg |= PORT_OVERRIDE_LINK; + else + reg &= ~PORT_OVERRIDE_LINK; + b53_write8(dev, B53_CTRL_PAGE, off, reg); +} - if (phydev->duplex == DUPLEX_FULL) +static void b53_force_port_config(struct b53_device *dev, int port, + int speed, int duplex, int pause) +{ + u8 reg, val, off; + + /* Override the port settings */ + if (port == dev->cpu_port) { + off = B53_PORT_OVERRIDE_CTRL; + val = PORT_OVERRIDE_EN; + } else { + off = B53_GMII_PORT_OVERRIDE_CTRL(port); + val = GMII_PO_EN; + } + + b53_read8(dev, B53_CTRL_PAGE, off, ®); + reg |= val; + if (duplex == DUPLEX_FULL) reg |= PORT_OVERRIDE_FULL_DUPLEX; + else + reg &= ~PORT_OVERRIDE_FULL_DUPLEX; - switch (phydev->speed) { + switch (speed) { case 2000: reg |= PORT_OVERRIDE_SPEED_2000M; /* fallthrough */ @@ -978,21 +1007,41 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, reg |= PORT_OVERRIDE_SPEED_10M; break; default: - dev_err(ds->dev, "unknown speed: %d\n", phydev->speed); + dev_err(dev->dev, "unknown speed: %d\n", speed); return; } + if (pause & MLO_PAUSE_RX) + reg |= PORT_OVERRIDE_RX_FLOW; + if (pause & MLO_PAUSE_TX) + reg |= PORT_OVERRIDE_TX_FLOW; + + b53_write8(dev, B53_CTRL_PAGE, off, reg); +} + +static void b53_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct b53_device *dev = ds->priv; + struct ethtool_eee *p = &dev->ports[port].eee; + u8 rgmii_ctrl = 0, reg = 0, off; + int pause = 0; + + if (!phy_is_pseudo_fixed_link(phydev)) + return; + /* Enable flow control on BCM5301x's CPU port */ if (is5301x(dev) && port == dev->cpu_port) - reg |= PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW; + pause = MLO_PAUSE_TXRX_MASK; if (phydev->pause) { if (phydev->asym_pause) - reg |= PORT_OVERRIDE_TX_FLOW; - reg |= PORT_OVERRIDE_RX_FLOW; + pause |= MLO_PAUSE_TX; + pause |= MLO_PAUSE_RX; } - b53_write8(dev, B53_CTRL_PAGE, off, reg); + b53_force_port_config(dev, port, phydev->speed, phydev->duplex, pause); + b53_force_link(dev, port, phydev->link); if (is531x5(dev) && phy_interface_is_rgmii(phydev)) { if (port == 8) @@ -1052,16 +1101,9 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, } } else if (is5301x(dev)) { if (port != dev->cpu_port) { - u8 po_reg = B53_GMII_PORT_OVERRIDE_CTRL(dev->cpu_port); - u8 gmii_po; - - b53_read8(dev, B53_CTRL_PAGE, po_reg, &gmii_po); - gmii_po |= GMII_PO_LINK | - GMII_PO_RX_FLOW | - GMII_PO_TX_FLOW | - GMII_PO_EN | - GMII_PO_SPEED_2000M; - b53_write8(dev, B53_CTRL_PAGE, po_reg, gmii_po); + b53_force_port_config(dev, dev->cpu_port, 2000, + DUPLEX_FULL, MLO_PAUSE_TXRX_MASK); + b53_force_link(dev, dev->cpu_port, 1); } } @@ -1069,6 +1111,148 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, p->eee_enabled = b53_eee_init(ds, port, phydev); } +void b53_port_event(struct dsa_switch *ds, int port) +{ + struct b53_device *dev = ds->priv; + bool link; + u16 sts; + + b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts); + link = !!(sts & BIT(port)); + dsa_port_phylink_mac_change(ds, port, link); +} +EXPORT_SYMBOL(b53_port_event); + +void b53_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct b53_device *dev = ds->priv; + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + if (dev->ops->serdes_phylink_validate) + dev->ops->serdes_phylink_validate(dev, port, mask, state); + + /* Allow all the expected bits */ + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we + * support Gigabit, including Half duplex. + */ + if (state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII && + !phy_interface_mode_is_8023z(state->interface) && + !(is5325(dev) || is5365(dev))) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + } + + if (!phy_interface_mode_is_8023z(state->interface)) { + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + } + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + phylink_helper_basex_speed(state); +} +EXPORT_SYMBOL(b53_phylink_validate); + +int b53_phylink_mac_link_state(struct dsa_switch *ds, int port, + struct phylink_link_state *state) +{ + struct b53_device *dev = ds->priv; + int ret = -EOPNOTSUPP; + + if ((phy_interface_mode_is_8023z(state->interface) || + state->interface == PHY_INTERFACE_MODE_SGMII) && + dev->ops->serdes_link_state) + ret = dev->ops->serdes_link_state(dev, port, state); + + return ret; +} +EXPORT_SYMBOL(b53_phylink_mac_link_state); + +void b53_phylink_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct b53_device *dev = ds->priv; + + if (mode == MLO_AN_PHY) + return; + + if (mode == MLO_AN_FIXED) { + b53_force_port_config(dev, port, state->speed, + state->duplex, state->pause); + return; + } + + if ((phy_interface_mode_is_8023z(state->interface) || + state->interface == PHY_INTERFACE_MODE_SGMII) && + dev->ops->serdes_config) + dev->ops->serdes_config(dev, port, mode, state); +} +EXPORT_SYMBOL(b53_phylink_mac_config); + +void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port) +{ + struct b53_device *dev = ds->priv; + + if (dev->ops->serdes_an_restart) + dev->ops->serdes_an_restart(dev, port); +} +EXPORT_SYMBOL(b53_phylink_mac_an_restart); + +void b53_phylink_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface) +{ + struct b53_device *dev = ds->priv; + + if (mode == MLO_AN_PHY) + return; + + if (mode == MLO_AN_FIXED) { + b53_force_link(dev, port, false); + return; + } + + if (phy_interface_mode_is_8023z(interface) && + dev->ops->serdes_link_set) + dev->ops->serdes_link_set(dev, port, mode, interface, false); +} +EXPORT_SYMBOL(b53_phylink_mac_link_down); + +void b53_phylink_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev) +{ + struct b53_device *dev = ds->priv; + + if (mode == MLO_AN_PHY) + return; + + if (mode == MLO_AN_FIXED) { + b53_force_link(dev, port, true); + return; + } + + if (phy_interface_mode_is_8023z(interface) && + dev->ops->serdes_link_set) + dev->ops->serdes_link_set(dev, port, mode, interface, true); +} +EXPORT_SYMBOL(b53_phylink_mac_link_up); + int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) { return 0; @@ -1710,6 +1894,12 @@ static const struct dsa_switch_ops b53_switch_ops = { .phy_read = b53_phy_read16, .phy_write = b53_phy_write16, .adjust_link = b53_adjust_link, + .phylink_validate = b53_phylink_validate, + .phylink_mac_link_state = b53_phylink_mac_link_state, + .phylink_mac_config = b53_phylink_mac_config, + .phylink_mac_an_restart = b53_phylink_mac_an_restart, + .phylink_mac_link_down = b53_phylink_mac_link_down, + .phylink_mac_link_up = b53_phylink_mac_link_up, .port_enable = b53_enable_port, .port_disable = b53_disable_port, .get_mac_eee = b53_get_mac_eee, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index df149756c282..ec796482792d 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -29,6 +29,7 @@ struct b53_device; struct net_device; +struct phylink_link_state; struct b53_io_ops { int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value); @@ -43,8 +44,25 @@ struct b53_io_ops { int (*write64)(struct b53_device *dev, u8 page, u8 reg, u64 value); int (*phy_read16)(struct b53_device *dev, int addr, int reg, u16 *value); int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value); + int (*irq_enable)(struct b53_device *dev, int port); + void (*irq_disable)(struct b53_device *dev, int port); + u8 (*serdes_map_lane)(struct b53_device *dev, int port); + int (*serdes_link_state)(struct b53_device *dev, int port, + struct phylink_link_state *state); + void (*serdes_config)(struct b53_device *dev, int port, + unsigned int mode, + const struct phylink_link_state *state); + void (*serdes_an_restart)(struct b53_device *dev, int port); + void (*serdes_link_set)(struct b53_device *dev, int port, + unsigned int mode, phy_interface_t interface, + bool link_up); + void (*serdes_phylink_validate)(struct b53_device *dev, int port, + unsigned long *supported, + struct phylink_link_state *state); }; +#define B53_INVALID_LANE 0xff + enum { BCM5325_DEVICE_ID = 0x25, BCM5365_DEVICE_ID = 0x65, @@ -107,6 +125,7 @@ struct b53_device { /* connect specific data */ u8 current_page; struct device *dev; + u8 serdes_lane; /* Master MDIO bus we got probed from */ struct mii_bus *bus; @@ -298,6 +317,23 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); void b53_br_fast_age(struct dsa_switch *ds, int port); +void b53_port_event(struct dsa_switch *ds, int port); +void b53_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state); +int b53_phylink_mac_link_state(struct dsa_switch *ds, int port, + struct phylink_link_state *state); +void b53_phylink_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state); +void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port); +void b53_phylink_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface); +void b53_phylink_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev); int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering); int b53_vlan_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c new file mode 100644 index 000000000000..629bf14128a2 --- /dev/null +++ b/drivers/net/dsa/b53/b53_serdes.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +/* + * Northstar Plus switch SerDes/SGMII PHY main logic + * + * Copyright (C) 2018 Florian Fainelli <f.fainelli@gmail.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/phy.h> +#include <linux/phylink.h> +#include <net/dsa.h> + +#include "b53_priv.h" +#include "b53_serdes.h" +#include "b53_regs.h" + +static void b53_serdes_write_blk(struct b53_device *dev, u8 offset, u16 block, + u16 value) +{ + b53_write16(dev, B53_SERDES_PAGE, B53_SERDES_BLKADDR, block); + b53_write16(dev, B53_SERDES_PAGE, offset, value); +} + +static u16 b53_serdes_read_blk(struct b53_device *dev, u8 offset, u16 block) +{ + u16 value; + + b53_write16(dev, B53_SERDES_PAGE, B53_SERDES_BLKADDR, block); + b53_read16(dev, B53_SERDES_PAGE, offset, &value); + + return value; +} + +static void b53_serdes_set_lane(struct b53_device *dev, u8 lane) +{ + if (dev->serdes_lane == lane) + return; + + WARN_ON(lane > 1); + + b53_serdes_write_blk(dev, B53_SERDES_LANE, + SERDES_XGXSBLK0_BLOCKADDRESS, lane); + dev->serdes_lane = lane; +} + +static void b53_serdes_write(struct b53_device *dev, u8 lane, + u8 offset, u16 block, u16 value) +{ + b53_serdes_set_lane(dev, lane); + b53_serdes_write_blk(dev, offset, block, value); +} + +static u16 b53_serdes_read(struct b53_device *dev, u8 lane, + u8 offset, u16 block) +{ + b53_serdes_set_lane(dev, lane); + return b53_serdes_read_blk(dev, offset, block); +} + +void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode, + const struct phylink_link_state *state) +{ + u8 lane = b53_serdes_map_lane(dev, port); + u16 reg; + + if (lane == B53_INVALID_LANE) + return; + + reg = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_CONTROL(1), + SERDES_DIGITAL_BLK); + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) + reg |= FIBER_MODE_1000X; + else + reg &= ~FIBER_MODE_1000X; + b53_serdes_write(dev, lane, B53_SERDES_DIGITAL_CONTROL(1), + SERDES_DIGITAL_BLK, reg); +} +EXPORT_SYMBOL(b53_serdes_config); + +void b53_serdes_an_restart(struct b53_device *dev, int port) +{ + u8 lane = b53_serdes_map_lane(dev, port); + u16 reg; + + if (lane == B53_INVALID_LANE) + return; + + reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR), + SERDES_MII_BLK); + reg |= BMCR_ANRESTART; + b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR), + SERDES_MII_BLK, reg); +} +EXPORT_SYMBOL(b53_serdes_an_restart); + +int b53_serdes_link_state(struct b53_device *dev, int port, + struct phylink_link_state *state) +{ + u8 lane = b53_serdes_map_lane(dev, port); + u16 dig, bmsr; + + if (lane == B53_INVALID_LANE) + return 1; + + dig = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_STATUS, + SERDES_DIGITAL_BLK); + bmsr = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMSR), + SERDES_MII_BLK); + + switch ((dig >> SPEED_STATUS_SHIFT) & SPEED_STATUS_MASK) { + case SPEED_STATUS_10: + state->speed = SPEED_10; + break; + case SPEED_STATUS_100: + state->speed = SPEED_100; + break; + case SPEED_STATUS_1000: + state->speed = SPEED_1000; + break; + default: + case SPEED_STATUS_2500: + state->speed = SPEED_2500; + break; + } + + state->duplex = dig & DUPLEX_STATUS ? DUPLEX_FULL : DUPLEX_HALF; + state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE); + state->link = !!(dig & LINK_STATUS); + if (dig & PAUSE_RESOLUTION_RX_SIDE) + state->pause |= MLO_PAUSE_RX; + if (dig & PAUSE_RESOLUTION_TX_SIDE) + state->pause |= MLO_PAUSE_TX; + + return 0; +} +EXPORT_SYMBOL(b53_serdes_link_state); + +void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode, + phy_interface_t interface, bool link_up) +{ + u8 lane = b53_serdes_map_lane(dev, port); + u16 reg; + + if (lane == B53_INVALID_LANE) + return; + + reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR), + SERDES_MII_BLK); + if (link_up) + reg &= ~BMCR_PDOWN; + else + reg |= BMCR_PDOWN; + b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR), + SERDES_MII_BLK, reg); +} +EXPORT_SYMBOL(b53_serdes_link_set); + +void b53_serdes_phylink_validate(struct b53_device *dev, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + u8 lane = b53_serdes_map_lane(dev, port); + + if (lane == B53_INVALID_LANE) + return; + + switch (lane) { + case 0: + phylink_set(supported, 2500baseX_Full); + /* fallthrough */ + case 1: + phylink_set(supported, 1000baseX_Full); + break; + default: + break; + } +} +EXPORT_SYMBOL(b53_serdes_phylink_validate); + +int b53_serdes_init(struct b53_device *dev, int port) +{ + u8 lane = b53_serdes_map_lane(dev, port); + u16 id0, msb, lsb; + + if (lane == B53_INVALID_LANE) + return -EINVAL; + + id0 = b53_serdes_read(dev, lane, B53_SERDES_ID0, SERDES_ID0); + msb = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_PHYSID1), + SERDES_MII_BLK); + lsb = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_PHYSID2), + SERDES_MII_BLK); + if (id0 == 0 || id0 == 0xffff) { + dev_err(dev->dev, "SerDes not initialized, check settings\n"); + return -ENODEV; + } + + dev_info(dev->dev, + "SerDes lane %d, model: %d, rev %c%d (OUI: 0x%08x)\n", + lane, id0 & SERDES_ID0_MODEL_MASK, + (id0 >> SERDES_ID0_REV_LETTER_SHIFT) + 0x41, + (id0 >> SERDES_ID0_REV_NUM_SHIFT) & SERDES_ID0_REV_NUM_MASK, + (u32)msb << 16 | lsb); + + return 0; +} +EXPORT_SYMBOL(b53_serdes_init); + +MODULE_AUTHOR("Florian Fainelli <f.fainelli@gmail.com>"); +MODULE_DESCRIPTION("B53 Switch SerDes driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/dsa/b53/b53_serdes.h b/drivers/net/dsa/b53/b53_serdes.h new file mode 100644 index 000000000000..3bb4f91aec9e --- /dev/null +++ b/drivers/net/dsa/b53/b53_serdes.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause + * + * Northstar Plus switch SerDes/SGMII PHY definitions + * + * Copyright (C) 2018 Florian Fainelli <f.fainelli@gmail.com> + */ + +#include <linux/phy.h> +#include <linux/types.h> + +/* Non-standard page used to access SerDes PHY registers on NorthStar Plus */ +#define B53_SERDES_PAGE 0x16 +#define B53_SERDES_BLKADDR 0x3e +#define B53_SERDES_LANE 0x3c + +#define B53_SERDES_ID0 0x20 +#define SERDES_ID0_MODEL_MASK 0x3f +#define SERDES_ID0_REV_NUM_SHIFT 11 +#define SERDES_ID0_REV_NUM_MASK 0x7 +#define SERDES_ID0_REV_LETTER_SHIFT 14 + +#define B53_SERDES_MII_REG(x) (0x20 + (x) * 2) +#define B53_SERDES_DIGITAL_CONTROL(x) (0x1e + (x) * 2) +#define B53_SERDES_DIGITAL_STATUS 0x28 + +/* SERDES_DIGITAL_CONTROL1 */ +#define FIBER_MODE_1000X BIT(0) +#define TBI_INTERFACE BIT(1) +#define SIGNAL_DETECT_EN BIT(2) +#define INVERT_SIGNAL_DETECT BIT(3) +#define AUTODET_EN BIT(4) +#define SGMII_MASTER_MODE BIT(5) +#define DISABLE_DLL_PWRDOWN BIT(6) +#define CRC_CHECKER_DIS BIT(7) +#define COMMA_DET_EN BIT(8) +#define ZERO_COMMA_DET_EN BIT(9) +#define REMOTE_LOOPBACK BIT(10) +#define SEL_RX_PKTS_FOR_CNTR BIT(11) +#define MASTER_MDIO_PHY_SEL BIT(13) +#define DISABLE_SIGNAL_DETECT_FLT BIT(14) + +/* SERDES_DIGITAL_CONTROL2 */ +#define EN_PARALLEL_DET BIT(0) +#define DIS_FALSE_LINK BIT(1) +#define FLT_FORCE_LINK BIT(2) +#define EN_AUTONEG_ERR_TIMER BIT(3) +#define DIS_REMOTE_FAULT_SENSING BIT(4) +#define FORCE_XMIT_DATA BIT(5) +#define AUTONEG_FAST_TIMERS BIT(6) +#define DIS_CARRIER_EXTEND BIT(7) +#define DIS_TRRR_GENERATION BIT(8) +#define BYPASS_PCS_RX BIT(9) +#define BYPASS_PCS_TX BIT(10) +#define TEST_CNTR_EN BIT(11) +#define TX_PACKET_SEQ_TEST BIT(12) +#define TX_IDLE_JAM_SEQ_TEST BIT(13) +#define CLR_BER_CNTR BIT(14) + +/* SERDES_DIGITAL_CONTROL3 */ +#define TX_FIFO_RST BIT(0) +#define FIFO_ELAST_TX_RX_SHIFT 1 +#define FIFO_ELAST_TX_RX_5K 0 +#define FIFO_ELAST_TX_RX_10K 1 +#define FIFO_ELAST_TX_RX_13_5K 2 +#define FIFO_ELAST_TX_RX_18_5K 3 +#define BLOCK_TXEN_MODE BIT(9) +#define JAM_FALSE_CARRIER_MODE BIT(10) +#define EXT_PHY_CRS_MODE BIT(11) +#define INVERT_EXT_PHY_CRS BIT(12) +#define DISABLE_TX_CRS BIT(13) + +/* SERDES_DIGITAL_STATUS */ +#define SGMII_MODE BIT(0) +#define LINK_STATUS BIT(1) +#define DUPLEX_STATUS BIT(2) +#define SPEED_STATUS_SHIFT 3 +#define SPEED_STATUS_10 0 +#define SPEED_STATUS_100 1 +#define SPEED_STATUS_1000 2 +#define SPEED_STATUS_2500 3 +#define SPEED_STATUS_MASK SPEED_STATUS_2500 +#define PAUSE_RESOLUTION_TX_SIDE BIT(5) +#define PAUSE_RESOLUTION_RX_SIDE BIT(6) +#define LINK_STATUS_CHANGE BIT(7) +#define EARLY_END_EXT_DET BIT(8) +#define CARRIER_EXT_ERR_DET BIT(9) +#define RX_ERR_DET BIT(10) +#define TX_ERR_DET BIT(11) +#define CRC_ERR_DET BIT(12) +#define FALSE_CARRIER_ERR_DET BIT(13) +#define RXFIFO_ERR_DET BIT(14) +#define TXFIFO_ERR_DET BIT(15) + +/* Block offsets */ +#define SERDES_DIGITAL_BLK 0x8300 +#define SERDES_ID0 0x8310 +#define SERDES_MII_BLK 0xffe0 +#define SERDES_XGXSBLK0_BLOCKADDRESS 0xffd0 + +struct phylink_link_state; + +static inline u8 b53_serdes_map_lane(struct b53_device *dev, int port) +{ + if (!dev->ops->serdes_map_lane) + return B53_INVALID_LANE; + + return dev->ops->serdes_map_lane(dev, port); +} + +int b53_serdes_get_link(struct b53_device *dev, int port); +int b53_serdes_link_state(struct b53_device *dev, int port, + struct phylink_link_state *state); +void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode, + const struct phylink_link_state *state); +void b53_serdes_an_restart(struct b53_device *dev, int port); +void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode, + phy_interface_t interface, bool link_up); +void b53_serdes_phylink_validate(struct b53_device *dev, int port, + unsigned long *supported, + struct phylink_link_state *state); +#if IS_ENABLED(CONFIG_B53_SERDES) +int b53_serdes_init(struct b53_device *dev, int port); +#else +static inline int b53_serdes_init(struct b53_device *dev, int port) +{ + return -ENODEV; +} +#endif diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index 91de2ba99ad1..90f514252987 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -19,11 +19,13 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> +#include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/platform_data/b53.h> #include <linux/of.h> #include "b53_priv.h" +#include "b53_serdes.h" /* command and status register of the SRAB */ #define B53_SRAB_CMDSTAT 0x2c @@ -47,6 +49,7 @@ /* command and status register of the SRAB */ #define B53_SRAB_CTRLS 0x40 +#define B53_SRAB_CTRLS_HOST_INTR BIT(1) #define B53_SRAB_CTRLS_RCAREQ BIT(3) #define B53_SRAB_CTRLS_RCAGNT BIT(4) #define B53_SRAB_CTRLS_SW_INIT_DONE BIT(6) @@ -60,8 +63,29 @@ #define B53_SRAB_P7_SLEEP_TIMER BIT(11) #define B53_SRAB_IMP0_SLEEP_TIMER BIT(12) +/* Port mux configuration registers */ +#define B53_MUX_CONFIG_P5 0x00 +#define MUX_CONFIG_SGMII 0 +#define MUX_CONFIG_MII_LITE 1 +#define MUX_CONFIG_RGMII 2 +#define MUX_CONFIG_GMII 3 +#define MUX_CONFIG_GPHY 4 +#define MUX_CONFIG_INTERNAL 5 +#define MUX_CONFIG_MASK 0x7 +#define B53_MUX_CONFIG_P4 0x04 + +struct b53_srab_port_priv { + int irq; + bool irq_enabled; + struct b53_device *dev; + unsigned int num; + phy_interface_t mode; +}; + struct b53_srab_priv { void __iomem *regs; + void __iomem *mux_config; + struct b53_srab_port_priv port_intrs[B53_N_PORTS]; }; static int b53_srab_request_grant(struct b53_device *dev) @@ -344,6 +368,81 @@ err: return ret; } +static irqreturn_t b53_srab_port_thread(int irq, void *dev_id) +{ + struct b53_srab_port_priv *port = dev_id; + struct b53_device *dev = port->dev; + + if (port->mode == PHY_INTERFACE_MODE_SGMII) + b53_port_event(dev->ds, port->num); + + return IRQ_HANDLED; +} + +static irqreturn_t b53_srab_port_isr(int irq, void *dev_id) +{ + struct b53_srab_port_priv *port = dev_id; + struct b53_device *dev = port->dev; + struct b53_srab_priv *priv = dev->priv; + + /* Acknowledge the interrupt */ + writel(BIT(port->num), priv->regs + B53_SRAB_INTR); + + return IRQ_WAKE_THREAD; +} + +#if IS_ENABLED(CONFIG_B53_SERDES) +static u8 b53_srab_serdes_map_lane(struct b53_device *dev, int port) +{ + struct b53_srab_priv *priv = dev->priv; + struct b53_srab_port_priv *p = &priv->port_intrs[port]; + + if (p->mode != PHY_INTERFACE_MODE_SGMII) + return B53_INVALID_LANE; + + switch (port) { + case 5: + return 0; + case 4: + return 1; + default: + return B53_INVALID_LANE; + } +} +#endif + +static int b53_srab_irq_enable(struct b53_device *dev, int port) +{ + struct b53_srab_priv *priv = dev->priv; + struct b53_srab_port_priv *p = &priv->port_intrs[port]; + int ret = 0; + + /* Interrupt is optional and was not specified, do not make + * this fatal + */ + if (p->irq == -ENXIO) + return ret; + + ret = request_threaded_irq(p->irq, b53_srab_port_isr, + b53_srab_port_thread, 0, + dev_name(dev->dev), p); + if (!ret) + p->irq_enabled = true; + + return ret; +} + +static void b53_srab_irq_disable(struct b53_device *dev, int port) +{ + struct b53_srab_priv *priv = dev->priv; + struct b53_srab_port_priv *p = &priv->port_intrs[port]; + + if (p->irq_enabled) { + free_irq(p->irq, p); + p->irq_enabled = false; + } +} + static const struct b53_io_ops b53_srab_ops = { .read8 = b53_srab_read8, .read16 = b53_srab_read16, @@ -355,6 +454,16 @@ static const struct b53_io_ops b53_srab_ops = { .write32 = b53_srab_write32, .write48 = b53_srab_write48, .write64 = b53_srab_write64, + .irq_enable = b53_srab_irq_enable, + .irq_disable = b53_srab_irq_disable, +#if IS_ENABLED(CONFIG_B53_SERDES) + .serdes_map_lane = b53_srab_serdes_map_lane, + .serdes_link_state = b53_serdes_link_state, + .serdes_config = b53_serdes_config, + .serdes_an_restart = b53_serdes_an_restart, + .serdes_link_set = b53_serdes_link_set, + .serdes_phylink_validate = b53_serdes_phylink_validate, +#endif }; static const struct of_device_id b53_srab_of_match[] = { @@ -379,6 +488,107 @@ static const struct of_device_id b53_srab_of_match[] = { }; MODULE_DEVICE_TABLE(of, b53_srab_of_match); +static void b53_srab_intr_set(struct b53_srab_priv *priv, bool set) +{ + u32 reg; + + reg = readl(priv->regs + B53_SRAB_CTRLS); + if (set) + reg |= B53_SRAB_CTRLS_HOST_INTR; + else + reg &= ~B53_SRAB_CTRLS_HOST_INTR; + writel(reg, priv->regs + B53_SRAB_CTRLS); +} + +static void b53_srab_prepare_irq(struct platform_device *pdev) +{ + struct b53_device *dev = platform_get_drvdata(pdev); + struct b53_srab_priv *priv = dev->priv; + struct b53_srab_port_priv *port; + unsigned int i; + char *name; + + /* Clear all pending interrupts */ + writel(0xffffffff, priv->regs + B53_SRAB_INTR); + + if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID) + return; + + for (i = 0; i < B53_N_PORTS; i++) { + port = &priv->port_intrs[i]; + + /* There is no port 6 */ + if (i == 6) + continue; + + name = kasprintf(GFP_KERNEL, "link_state_p%d", i); + if (!name) + return; + + port->num = i; + port->dev = dev; + port->irq = platform_get_irq_byname(pdev, name); + kfree(name); + } + + b53_srab_intr_set(priv, true); +} + +static void b53_srab_mux_init(struct platform_device *pdev) +{ + struct b53_device *dev = platform_get_drvdata(pdev); + struct b53_srab_priv *priv = dev->priv; + struct b53_srab_port_priv *p; + struct resource *r; + unsigned int port; + u32 reg, off = 0; + int ret; + + if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID) + return; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->mux_config = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(priv->mux_config)) + return; + + /* Obtain the port mux configuration so we know which lanes + * actually map to SerDes lanes + */ + for (port = 5; port > 3; port--, off += 4) { + p = &priv->port_intrs[port]; + + reg = readl(priv->mux_config + B53_MUX_CONFIG_P5 + off); + switch (reg & MUX_CONFIG_MASK) { + case MUX_CONFIG_SGMII: + p->mode = PHY_INTERFACE_MODE_SGMII; + ret = b53_serdes_init(dev, port); + if (ret) + continue; + break; + case MUX_CONFIG_MII_LITE: + p->mode = PHY_INTERFACE_MODE_MII; + break; + case MUX_CONFIG_GMII: + p->mode = PHY_INTERFACE_MODE_GMII; + break; + case MUX_CONFIG_RGMII: + p->mode = PHY_INTERFACE_MODE_RGMII; + break; + case MUX_CONFIG_INTERNAL: + p->mode = PHY_INTERFACE_MODE_INTERNAL; + break; + default: + p->mode = PHY_INTERFACE_MODE_NA; + break; + } + + if (p->mode != PHY_INTERFACE_MODE_NA) + dev_info(&pdev->dev, "Port %d mode: %s\n", + port, phy_modes(p->mode)); + } +} + static int b53_srab_probe(struct platform_device *pdev) { struct b53_platform_data *pdata = pdev->dev.platform_data; @@ -417,13 +627,18 @@ static int b53_srab_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); + b53_srab_prepare_irq(pdev); + b53_srab_mux_init(pdev); + return b53_switch_register(dev); } static int b53_srab_remove(struct platform_device *pdev) { struct b53_device *dev = platform_get_drvdata(pdev); + struct b53_srab_priv *priv = dev->priv; + b53_srab_intr_set(priv, false); if (dev) b53_switch_remove(dev); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index fc8b48adf38b..3017ecf82ca5 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -465,8 +465,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) { mdiobus_unregister(priv->slave_mii_bus); - if (priv->master_mii_dn) - of_node_put(priv->master_mii_dn); + of_node_put(priv->master_mii_dn); } static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c new file mode 100644 index 000000000000..693a67f45bef --- /dev/null +++ b/drivers/net/dsa/lantiq_gswip.c @@ -0,0 +1,1167 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lantiq / Intel GSWIP switch driver for VRX200 SoCs + * + * Copyright (C) 2010 Lantiq Deutschland + * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de> + */ + +#include <linux/clk.h> +#include <linux/etherdevice.h> +#include <linux/firmware.h> +#include <linux/if_bridge.h> +#include <linux/if_vlan.h> +#include <linux/iopoll.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/of_platform.h> +#include <linux/phy.h> +#include <linux/phylink.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <net/dsa.h> +#include <dt-bindings/mips/lantiq_rcu_gphy.h> + +#include "lantiq_pce.h" + +/* GSWIP MDIO Registers */ +#define GSWIP_MDIO_GLOB 0x00 +#define GSWIP_MDIO_GLOB_ENABLE BIT(15) +#define GSWIP_MDIO_CTRL 0x08 +#define GSWIP_MDIO_CTRL_BUSY BIT(12) +#define GSWIP_MDIO_CTRL_RD BIT(11) +#define GSWIP_MDIO_CTRL_WR BIT(10) +#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f +#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5 +#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f +#define GSWIP_MDIO_READ 0x09 +#define GSWIP_MDIO_WRITE 0x0A +#define GSWIP_MDIO_MDC_CFG0 0x0B +#define GSWIP_MDIO_MDC_CFG1 0x0C +#define GSWIP_MDIO_PHYp(p) (0x15 - (p)) +#define GSWIP_MDIO_PHY_LINK_MASK 0x6000 +#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000 +#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000 +#define GSWIP_MDIO_PHY_LINK_UP 0x2000 +#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800 +#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800 +#define GSWIP_MDIO_PHY_SPEED_M10 0x0000 +#define GSWIP_MDIO_PHY_SPEED_M100 0x0800 +#define GSWIP_MDIO_PHY_SPEED_G1 0x1000 +#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600 +#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000 +#define GSWIP_MDIO_PHY_FDUP_EN 0x0200 +#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600 +#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180 +#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000 +#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100 +#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180 +#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060 +#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000 +#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020 +#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060 +#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f +#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \ + GSWIP_MDIO_PHY_FCONRX_MASK | \ + GSWIP_MDIO_PHY_FCONTX_MASK | \ + GSWIP_MDIO_PHY_LINK_MASK | \ + GSWIP_MDIO_PHY_SPEED_MASK | \ + GSWIP_MDIO_PHY_FDUP_MASK) + +/* GSWIP MII Registers */ +#define GSWIP_MII_CFG0 0x00 +#define GSWIP_MII_CFG1 0x02 +#define GSWIP_MII_CFG5 0x04 +#define GSWIP_MII_CFG_EN BIT(14) +#define GSWIP_MII_CFG_LDCLKDIS BIT(12) +#define GSWIP_MII_CFG_MODE_MIIP 0x0 +#define GSWIP_MII_CFG_MODE_MIIM 0x1 +#define GSWIP_MII_CFG_MODE_RMIIP 0x2 +#define GSWIP_MII_CFG_MODE_RMIIM 0x3 +#define GSWIP_MII_CFG_MODE_RGMII 0x4 +#define GSWIP_MII_CFG_MODE_MASK 0xf +#define GSWIP_MII_CFG_RATE_M2P5 0x00 +#define GSWIP_MII_CFG_RATE_M25 0x10 +#define GSWIP_MII_CFG_RATE_M125 0x20 +#define GSWIP_MII_CFG_RATE_M50 0x30 +#define GSWIP_MII_CFG_RATE_AUTO 0x40 +#define GSWIP_MII_CFG_RATE_MASK 0x70 +#define GSWIP_MII_PCDU0 0x01 +#define GSWIP_MII_PCDU1 0x03 +#define GSWIP_MII_PCDU5 0x05 +#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0) +#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7) + +/* GSWIP Core Registers */ +#define GSWIP_SWRES 0x000 +#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */ +#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */ +#define GSWIP_VERSION 0x013 +#define GSWIP_VERSION_REV_SHIFT 0 +#define GSWIP_VERSION_REV_MASK GENMASK(7, 0) +#define GSWIP_VERSION_MOD_SHIFT 8 +#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8) +#define GSWIP_VERSION_2_0 0x100 +#define GSWIP_VERSION_2_1 0x021 +#define GSWIP_VERSION_2_2 0x122 +#define GSWIP_VERSION_2_2_ETC 0x022 + +#define GSWIP_BM_RAM_VAL(x) (0x043 - (x)) +#define GSWIP_BM_RAM_ADDR 0x044 +#define GSWIP_BM_RAM_CTRL 0x045 +#define GSWIP_BM_RAM_CTRL_BAS BIT(15) +#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5) +#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0) +#define GSWIP_BM_QUEUE_GCTRL 0x04A +#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10) +/* buffer management Port Configuration Register */ +#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2)) +#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */ +#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */ +/* buffer management Port Control Register */ +#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2)) +#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */ +#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */ + +/* PCE */ +#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x)) +#define GSWIP_PCE_TBL_MASK 0x448 +#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x)) +#define GSWIP_PCE_TBL_ADDR 0x44E +#define GSWIP_PCE_TBL_CTRL 0x44F +#define GSWIP_PCE_TBL_CTRL_BAS BIT(15) +#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13) +#define GSWIP_PCE_TBL_CTRL_VLD BIT(12) +#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11) +#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7) +#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5) +#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00 +#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20 +#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40 +#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60 +#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0) +#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */ +#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */ +#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */ +#define GSWIP_PCE_GCTRL_0 0x456 +#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3) +#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */ +#define GSWIP_PCE_GCTRL_1 0x457 +#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */ +#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */ +#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA)) +#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) +#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0 +#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1 +#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2 +#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3 +#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7 +#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0) + +#define GSWIP_MAC_FLEN 0x8C5 +#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC)) +#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */ + +/* Ethernet Switch Fetch DMA Port Control Register */ +#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6)) +#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */ +#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */ +#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */ +#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */ +#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) +#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) +#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) +#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) + +/* Ethernet Switch Store DMA Port Control Register */ +#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6)) +#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */ +#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */ +#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */ + +#define XRX200_GPHY_FW_ALIGN (16 * 1024) + +struct gswip_hw_info { + int max_ports; + int cpu_port; +}; + +struct xway_gphy_match_data { + char *fe_firmware_name; + char *ge_firmware_name; +}; + +struct gswip_gphy_fw { + struct clk *clk_gate; + struct reset_control *reset; + u32 fw_addr_offset; + char *fw_name; +}; + +struct gswip_priv { + __iomem void *gswip; + __iomem void *mdio; + __iomem void *mii; + const struct gswip_hw_info *hw_info; + const struct xway_gphy_match_data *gphy_fw_name_cfg; + struct dsa_switch *ds; + struct device *dev; + struct regmap *rcu_regmap; + int num_gphy_fw; + struct gswip_gphy_fw *gphy_fw; +}; + +struct gswip_rmon_cnt_desc { + unsigned int size; + unsigned int offset; + const char *name; +}; + +#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name} + +static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = { + /** Receive Packet Count (only packets that are accepted and not discarded). */ + MIB_DESC(1, 0x1F, "RxGoodPkts"), + MIB_DESC(1, 0x23, "RxUnicastPkts"), + MIB_DESC(1, 0x22, "RxMulticastPkts"), + MIB_DESC(1, 0x21, "RxFCSErrorPkts"), + MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"), + MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"), + MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"), + MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"), + MIB_DESC(1, 0x20, "RxGoodPausePkts"), + MIB_DESC(1, 0x1A, "RxAlignErrorPkts"), + MIB_DESC(1, 0x12, "Rx64BytePkts"), + MIB_DESC(1, 0x13, "Rx127BytePkts"), + MIB_DESC(1, 0x14, "Rx255BytePkts"), + MIB_DESC(1, 0x15, "Rx511BytePkts"), + MIB_DESC(1, 0x16, "Rx1023BytePkts"), + /** Receive Size 1024-1522 (or more, if configured) Packet Count. */ + MIB_DESC(1, 0x17, "RxMaxBytePkts"), + MIB_DESC(1, 0x18, "RxDroppedPkts"), + MIB_DESC(1, 0x19, "RxFilteredPkts"), + MIB_DESC(2, 0x24, "RxGoodBytes"), + MIB_DESC(2, 0x26, "RxBadBytes"), + MIB_DESC(1, 0x11, "TxAcmDroppedPkts"), + MIB_DESC(1, 0x0C, "TxGoodPkts"), + MIB_DESC(1, 0x06, "TxUnicastPkts"), + MIB_DESC(1, 0x07, "TxMulticastPkts"), + MIB_DESC(1, 0x00, "Tx64BytePkts"), + MIB_DESC(1, 0x01, "Tx127BytePkts"), + MIB_DESC(1, 0x02, "Tx255BytePkts"), + MIB_DESC(1, 0x03, "Tx511BytePkts"), + MIB_DESC(1, 0x04, "Tx1023BytePkts"), + /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */ + MIB_DESC(1, 0x05, "TxMaxBytePkts"), + MIB_DESC(1, 0x08, "TxSingleCollCount"), + MIB_DESC(1, 0x09, "TxMultCollCount"), + MIB_DESC(1, 0x0A, "TxLateCollCount"), + MIB_DESC(1, 0x0B, "TxExcessCollCount"), + MIB_DESC(1, 0x0D, "TxPauseCount"), + MIB_DESC(1, 0x10, "TxDroppedPkts"), + MIB_DESC(2, 0x0E, "TxGoodBytes"), +}; + +static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset) +{ + return __raw_readl(priv->gswip + (offset * 4)); +} + +static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset) +{ + __raw_writel(val, priv->gswip + (offset * 4)); +} + +static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set, + u32 offset) +{ + u32 val = gswip_switch_r(priv, offset); + + val &= ~(clear); + val |= set; + gswip_switch_w(priv, val, offset); +} + +static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset, + u32 cleared) +{ + u32 val; + + return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val, + (val & cleared) == 0, 20, 50000); +} + +static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset) +{ + return __raw_readl(priv->mdio + (offset * 4)); +} + +static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset) +{ + __raw_writel(val, priv->mdio + (offset * 4)); +} + +static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set, + u32 offset) +{ + u32 val = gswip_mdio_r(priv, offset); + + val &= ~(clear); + val |= set; + gswip_mdio_w(priv, val, offset); +} + +static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset) +{ + return __raw_readl(priv->mii + (offset * 4)); +} + +static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset) +{ + __raw_writel(val, priv->mii + (offset * 4)); +} + +static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set, + u32 offset) +{ + u32 val = gswip_mii_r(priv, offset); + + val &= ~(clear); + val |= set; + gswip_mii_w(priv, val, offset); +} + +static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set, + int port) +{ + switch (port) { + case 0: + gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0); + break; + case 1: + gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1); + break; + case 5: + gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5); + break; + } +} + +static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set, + int port) +{ + switch (port) { + case 0: + gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0); + break; + case 1: + gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1); + break; + case 5: + gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5); + break; + } +} + +static int gswip_mdio_poll(struct gswip_priv *priv) +{ + int cnt = 100; + + while (likely(cnt--)) { + u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL); + + if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0) + return 0; + usleep_range(20, 40); + } + + return -ETIMEDOUT; +} + +static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val) +{ + struct gswip_priv *priv = bus->priv; + int err; + + err = gswip_mdio_poll(priv); + if (err) { + dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); + return err; + } + + gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE); + gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR | + ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | + (reg & GSWIP_MDIO_CTRL_REGAD_MASK), + GSWIP_MDIO_CTRL); + + return 0; +} + +static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg) +{ + struct gswip_priv *priv = bus->priv; + int err; + + err = gswip_mdio_poll(priv); + if (err) { + dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); + return err; + } + + gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD | + ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | + (reg & GSWIP_MDIO_CTRL_REGAD_MASK), + GSWIP_MDIO_CTRL); + + err = gswip_mdio_poll(priv); + if (err) { + dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); + return err; + } + + return gswip_mdio_r(priv, GSWIP_MDIO_READ); +} + +static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np) +{ + struct dsa_switch *ds = priv->ds; + + ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev); + if (!ds->slave_mii_bus) + return -ENOMEM; + + ds->slave_mii_bus->priv = priv; + ds->slave_mii_bus->read = gswip_mdio_rd; + ds->slave_mii_bus->write = gswip_mdio_wr; + ds->slave_mii_bus->name = "lantiq,xrx200-mdio"; + snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", + dev_name(priv->dev)); + ds->slave_mii_bus->parent = priv->dev; + ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; + + return of_mdiobus_register(ds->slave_mii_bus, mdio_np); +} + +static int gswip_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct gswip_priv *priv = ds->priv; + + /* RMON Counter Enable for port */ + gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port)); + + /* enable port fetch/store dma & VLAN Modification */ + gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN | + GSWIP_FDMA_PCTRL_VLANMOD_BOTH, + GSWIP_FDMA_PCTRLp(port)); + gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN, + GSWIP_SDMA_PCTRLp(port)); + gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS, + GSWIP_PCE_PCTRL_0p(port)); + + if (!dsa_is_cpu_port(ds, port)) { + u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO | + GSWIP_MDIO_PHY_SPEED_AUTO | + GSWIP_MDIO_PHY_FDUP_AUTO | + GSWIP_MDIO_PHY_FCONTX_AUTO | + GSWIP_MDIO_PHY_FCONRX_AUTO | + (phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK); + + gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port)); + /* Activate MDIO auto polling */ + gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0); + } + + return 0; +} + +static void gswip_port_disable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct gswip_priv *priv = ds->priv; + + if (!dsa_is_cpu_port(ds, port)) { + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN, + GSWIP_MDIO_PHY_LINK_MASK, + GSWIP_MDIO_PHYp(port)); + /* Deactivate MDIO auto polling */ + gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0); + } + + gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0, + GSWIP_FDMA_PCTRLp(port)); + gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, + GSWIP_SDMA_PCTRLp(port)); +} + +static int gswip_pce_load_microcode(struct gswip_priv *priv) +{ + int i; + int err; + + gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | + GSWIP_PCE_TBL_CTRL_OPMOD_MASK, + GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL); + gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK); + + for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) { + gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR); + gswip_switch_w(priv, gswip_pce_microcode[i].val_0, + GSWIP_PCE_TBL_VAL(0)); + gswip_switch_w(priv, gswip_pce_microcode[i].val_1, + GSWIP_PCE_TBL_VAL(1)); + gswip_switch_w(priv, gswip_pce_microcode[i].val_2, + GSWIP_PCE_TBL_VAL(2)); + gswip_switch_w(priv, gswip_pce_microcode[i].val_3, + GSWIP_PCE_TBL_VAL(3)); + + /* start the table access: */ + gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS, + GSWIP_PCE_TBL_CTRL); + err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_BAS); + if (err) + return err; + } + + /* tell the switch that the microcode is loaded */ + gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID, + GSWIP_PCE_GCTRL_0); + + return 0; +} + +static int gswip_setup(struct dsa_switch *ds) +{ + struct gswip_priv *priv = ds->priv; + unsigned int cpu_port = priv->hw_info->cpu_port; + int i; + int err; + + gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES); + usleep_range(5000, 10000); + gswip_switch_w(priv, 0, GSWIP_SWRES); + + /* disable port fetch/store dma on all ports */ + for (i = 0; i < priv->hw_info->max_ports; i++) + gswip_port_disable(ds, i, NULL); + + /* enable Switch */ + gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB); + + err = gswip_pce_load_microcode(priv); + if (err) { + dev_err(priv->dev, "writing PCE microcode failed, %i", err); + return err; + } + + /* Default unknown Broadcast/Multicast/Unicast port maps */ + gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1); + gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2); + gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3); + + /* disable PHY auto polling */ + gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0); + /* Configure the MDIO Clock 2.5 MHz */ + gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1); + + /* Disable the xMII link */ + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0); + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1); + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5); + + /* enable special tag insertion on cpu port */ + gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN, + GSWIP_FDMA_PCTRLp(cpu_port)); + + gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, + GSWIP_MAC_CTRL_2p(cpu_port)); + gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8, GSWIP_MAC_FLEN); + gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD, + GSWIP_BM_QUEUE_GCTRL); + + /* VLAN aware Switching */ + gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0); + + /* Mac Address Table Lock */ + gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_1_MAC_GLOCK | + GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD, + GSWIP_PCE_GCTRL_1); + + gswip_port_enable(ds, cpu_port, NULL); + return 0; +} + +static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds, + int port) +{ + return DSA_TAG_PROTO_GSWIP; +} + +static void gswip_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + switch (port) { + case 0: + case 1: + if (!phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII && + state->interface != PHY_INTERFACE_MODE_RMII) + goto unsupported; + break; + case 2: + case 3: + case 4: + if (state->interface != PHY_INTERFACE_MODE_INTERNAL) + goto unsupported; + break; + case 5: + if (!phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_INTERNAL) + goto unsupported; + break; + default: + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + dev_err(ds->dev, "Unsupported port: %i\n", port); + return; + } + + /* Allow all the expected bits */ + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + /* With the exclusion of MII and Reverse MII, we support Gigabit, + * including Half duplex + */ + if (state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + } + + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + +unsupported: + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + dev_err(ds->dev, "Unsupported interface: %d\n", state->interface); + return; +} + +static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct gswip_priv *priv = ds->priv; + u32 miicfg = 0; + + miicfg |= GSWIP_MII_CFG_LDCLKDIS; + + switch (state->interface) { + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_INTERNAL: + miicfg |= GSWIP_MII_CFG_MODE_MIIM; + break; + case PHY_INTERFACE_MODE_REVMII: + miicfg |= GSWIP_MII_CFG_MODE_MIIP; + break; + case PHY_INTERFACE_MODE_RMII: + miicfg |= GSWIP_MII_CFG_MODE_RMIIM; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + miicfg |= GSWIP_MII_CFG_MODE_RGMII; + break; + default: + dev_err(ds->dev, + "Unsupported interface: %d\n", state->interface); + return; + } + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port); + + switch (state->interface) { + case PHY_INTERFACE_MODE_RGMII_ID: + gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK | + GSWIP_MII_PCDU_RXDLY_MASK, 0, port); + break; + case PHY_INTERFACE_MODE_RGMII_RXID: + gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port); + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port); + break; + default: + break; + } +} + +static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface) +{ + struct gswip_priv *priv = ds->priv; + + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port); +} + +static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev) +{ + struct gswip_priv *priv = ds->priv; + + /* Enable the xMII interface only for the external PHY */ + if (interface != PHY_INTERFACE_MODE_INTERNAL) + gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port); +} + +static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) +{ + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) + strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name, + ETH_GSTRING_LEN); +} + +static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table, + u32 index) +{ + u32 result; + int err; + + gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR); + gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK | + GSWIP_BM_RAM_CTRL_OPMOD, + table | GSWIP_BM_RAM_CTRL_BAS, + GSWIP_BM_RAM_CTRL); + + err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL, + GSWIP_BM_RAM_CTRL_BAS); + if (err) { + dev_err(priv->dev, "timeout while reading table: %u, index: %u", + table, index); + return 0; + } + + result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0)); + result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16; + + return result; +} + +static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct gswip_priv *priv = ds->priv; + const struct gswip_rmon_cnt_desc *rmon_cnt; + int i; + u64 high; + + for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) { + rmon_cnt = &gswip_rmon_cnt[i]; + + data[i] = gswip_bcm_ram_entry_read(priv, port, + rmon_cnt->offset); + if (rmon_cnt->size == 2) { + high = gswip_bcm_ram_entry_read(priv, port, + rmon_cnt->offset + 1); + data[i] |= high << 32; + } + } +} + +static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + if (sset != ETH_SS_STATS) + return 0; + + return ARRAY_SIZE(gswip_rmon_cnt); +} + +static const struct dsa_switch_ops gswip_switch_ops = { + .get_tag_protocol = gswip_get_tag_protocol, + .setup = gswip_setup, + .port_enable = gswip_port_enable, + .port_disable = gswip_port_disable, + .phylink_validate = gswip_phylink_validate, + .phylink_mac_config = gswip_phylink_mac_config, + .phylink_mac_link_down = gswip_phylink_mac_link_down, + .phylink_mac_link_up = gswip_phylink_mac_link_up, + .get_strings = gswip_get_strings, + .get_ethtool_stats = gswip_get_ethtool_stats, + .get_sset_count = gswip_get_sset_count, +}; + +static const struct xway_gphy_match_data xrx200a1x_gphy_data = { + .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin", + .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin", +}; + +static const struct xway_gphy_match_data xrx200a2x_gphy_data = { + .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin", + .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin", +}; + +static const struct xway_gphy_match_data xrx300_gphy_data = { + .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin", + .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin", +}; + +static const struct of_device_id xway_gphy_match[] = { + { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL }, + { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data }, + { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data }, + { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data }, + { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data }, + {}, +}; + +static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw) +{ + struct device *dev = priv->dev; + const struct firmware *fw; + void *fw_addr; + dma_addr_t dma_addr; + dma_addr_t dev_addr; + size_t size; + int ret; + + ret = clk_prepare_enable(gphy_fw->clk_gate); + if (ret) + return ret; + + reset_control_assert(gphy_fw->reset); + + ret = request_firmware(&fw, gphy_fw->fw_name, dev); + if (ret) { + dev_err(dev, "failed to load firmware: %s, error: %i\n", + gphy_fw->fw_name, ret); + return ret; + } + + /* GPHY cores need the firmware code in a persistent and contiguous + * memory area with a 16 kB boundary aligned start address. + */ + size = fw->size + XRX200_GPHY_FW_ALIGN; + + fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL); + if (fw_addr) { + fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN); + dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN); + memcpy(fw_addr, fw->data, fw->size); + } else { + dev_err(dev, "failed to alloc firmware memory\n"); + release_firmware(fw); + return -ENOMEM; + } + + release_firmware(fw); + + ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr); + if (ret) + return ret; + + reset_control_deassert(gphy_fw->reset); + + return ret; +} + +static int gswip_gphy_fw_probe(struct gswip_priv *priv, + struct gswip_gphy_fw *gphy_fw, + struct device_node *gphy_fw_np, int i) +{ + struct device *dev = priv->dev; + u32 gphy_mode; + int ret; + char gphyname[10]; + + snprintf(gphyname, sizeof(gphyname), "gphy%d", i); + + gphy_fw->clk_gate = devm_clk_get(dev, gphyname); + if (IS_ERR(gphy_fw->clk_gate)) { + dev_err(dev, "Failed to lookup gate clock\n"); + return PTR_ERR(gphy_fw->clk_gate); + } + + ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset); + if (ret) + return ret; + + ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode); + /* Default to GE mode */ + if (ret) + gphy_mode = GPHY_MODE_GE; + + switch (gphy_mode) { + case GPHY_MODE_FE: + gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name; + break; + case GPHY_MODE_GE: + gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name; + break; + default: + dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode); + return -EINVAL; + } + + gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np); + if (IS_ERR(gphy_fw->reset)) { + if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER) + dev_err(dev, "Failed to lookup gphy reset\n"); + return PTR_ERR(gphy_fw->reset); + } + + return gswip_gphy_fw_load(priv, gphy_fw); +} + +static void gswip_gphy_fw_remove(struct gswip_priv *priv, + struct gswip_gphy_fw *gphy_fw) +{ + int ret; + + /* check if the device was fully probed */ + if (!gphy_fw->fw_name) + return; + + ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0); + if (ret) + dev_err(priv->dev, "can not reset GPHY FW pointer"); + + clk_disable_unprepare(gphy_fw->clk_gate); + + reset_control_put(gphy_fw->reset); +} + +static int gswip_gphy_fw_list(struct gswip_priv *priv, + struct device_node *gphy_fw_list_np, u32 version) +{ + struct device *dev = priv->dev; + struct device_node *gphy_fw_np; + const struct of_device_id *match; + int err; + int i = 0; + + /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older + * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also + * needs a different GPHY firmware. + */ + if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) { + switch (version) { + case GSWIP_VERSION_2_0: + priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data; + break; + case GSWIP_VERSION_2_1: + priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data; + break; + default: + dev_err(dev, "unknown GSWIP version: 0x%x", version); + return -ENOENT; + } + } + + match = of_match_node(xway_gphy_match, gphy_fw_list_np); + if (match && match->data) + priv->gphy_fw_name_cfg = match->data; + + if (!priv->gphy_fw_name_cfg) { + dev_err(dev, "GPHY compatible type not supported"); + return -ENOENT; + } + + priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np); + if (!priv->num_gphy_fw) + return -ENOENT; + + priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np, + "lantiq,rcu"); + if (IS_ERR(priv->rcu_regmap)) + return PTR_ERR(priv->rcu_regmap); + + priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw, + sizeof(*priv->gphy_fw), + GFP_KERNEL | __GFP_ZERO); + if (!priv->gphy_fw) + return -ENOMEM; + + for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) { + err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i], + gphy_fw_np, i); + if (err) + goto remove_gphy; + i++; + } + + return 0; + +remove_gphy: + for (i = 0; i < priv->num_gphy_fw; i++) + gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); + return err; +} + +static int gswip_probe(struct platform_device *pdev) +{ + struct gswip_priv *priv; + struct resource *gswip_res, *mdio_res, *mii_res; + struct device_node *mdio_np, *gphy_fw_np; + struct device *dev = &pdev->dev; + int err; + int i; + u32 version; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + gswip_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->gswip = devm_ioremap_resource(dev, gswip_res); + if (IS_ERR(priv->gswip)) + return PTR_ERR(priv->gswip); + + mdio_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->mdio = devm_ioremap_resource(dev, mdio_res); + if (IS_ERR(priv->mdio)) + return PTR_ERR(priv->mdio); + + mii_res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + priv->mii = devm_ioremap_resource(dev, mii_res); + if (IS_ERR(priv->mii)) + return PTR_ERR(priv->mii); + + priv->hw_info = of_device_get_match_data(dev); + if (!priv->hw_info) + return -EINVAL; + + priv->ds = dsa_switch_alloc(dev, priv->hw_info->max_ports); + if (!priv->ds) + return -ENOMEM; + + priv->ds->priv = priv; + priv->ds->ops = &gswip_switch_ops; + priv->dev = dev; + version = gswip_switch_r(priv, GSWIP_VERSION); + + /* bring up the mdio bus */ + gphy_fw_np = of_find_compatible_node(pdev->dev.of_node, NULL, + "lantiq,gphy-fw"); + if (gphy_fw_np) { + err = gswip_gphy_fw_list(priv, gphy_fw_np, version); + if (err) { + dev_err(dev, "gphy fw probe failed\n"); + return err; + } + } + + /* bring up the mdio bus */ + mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL, + "lantiq,xrx200-mdio"); + if (mdio_np) { + err = gswip_mdio(priv, mdio_np); + if (err) { + dev_err(dev, "mdio probe failed\n"); + goto gphy_fw; + } + } + + err = dsa_register_switch(priv->ds); + if (err) { + dev_err(dev, "dsa switch register failed: %i\n", err); + goto mdio_bus; + } + if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) { + dev_err(dev, "wrong CPU port defined, HW only supports port: %i", + priv->hw_info->cpu_port); + err = -EINVAL; + goto mdio_bus; + } + + platform_set_drvdata(pdev, priv); + + dev_info(dev, "probed GSWIP version %lx mod %lx\n", + (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT, + (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT); + return 0; + +mdio_bus: + if (mdio_np) + mdiobus_unregister(priv->ds->slave_mii_bus); +gphy_fw: + for (i = 0; i < priv->num_gphy_fw; i++) + gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); + return err; +} + +static int gswip_remove(struct platform_device *pdev) +{ + struct gswip_priv *priv = platform_get_drvdata(pdev); + int i; + + if (!priv) + return 0; + + /* disable the switch */ + gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); + + dsa_unregister_switch(priv->ds); + + if (priv->ds->slave_mii_bus) + mdiobus_unregister(priv->ds->slave_mii_bus); + + for (i = 0; i < priv->num_gphy_fw; i++) + gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); + + return 0; +} + +static const struct gswip_hw_info gswip_xrx200 = { + .max_ports = 7, + .cpu_port = 6, +}; + +static const struct of_device_id gswip_of_match[] = { + { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 }, + {}, +}; +MODULE_DEVICE_TABLE(of, gswip_of_match); + +static struct platform_driver gswip_driver = { + .probe = gswip_probe, + .remove = gswip_remove, + .driver = { + .name = "gswip", + .of_match_table = gswip_of_match, + }, +}; + +module_platform_driver(gswip_driver); + +MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>"); +MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/dsa/lantiq_pce.h b/drivers/net/dsa/lantiq_pce.h new file mode 100644 index 000000000000..180663138e75 --- /dev/null +++ b/drivers/net/dsa/lantiq_pce.h @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCE microcode extracted from UGW 7.1.1 switch api + * + * Copyright (c) 2012, 2014, 2015 Lantiq Deutschland GmbH + * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de> + */ + +enum { + OUT_MAC0 = 0, + OUT_MAC1, + OUT_MAC2, + OUT_MAC3, + OUT_MAC4, + OUT_MAC5, + OUT_ETHTYP, + OUT_VTAG0, + OUT_VTAG1, + OUT_ITAG0, + OUT_ITAG1, /*10 */ + OUT_ITAG2, + OUT_ITAG3, + OUT_IP0, + OUT_IP1, + OUT_IP2, + OUT_IP3, + OUT_SIP0, + OUT_SIP1, + OUT_SIP2, + OUT_SIP3, /*20*/ + OUT_SIP4, + OUT_SIP5, + OUT_SIP6, + OUT_SIP7, + OUT_DIP0, + OUT_DIP1, + OUT_DIP2, + OUT_DIP3, + OUT_DIP4, + OUT_DIP5, /*30*/ + OUT_DIP6, + OUT_DIP7, + OUT_SESID, + OUT_PROT, + OUT_APP0, + OUT_APP1, + OUT_IGMP0, + OUT_IGMP1, + OUT_IPOFF, /*39*/ + OUT_NONE = 63, +}; + +/* parser's microcode length type */ +#define INSTR 0 +#define IPV6 1 +#define LENACCU 2 + +/* parser's microcode flag type */ +enum { + FLAG_ITAG = 0, + FLAG_VLAN, + FLAG_SNAP, + FLAG_PPPOE, + FLAG_IPV6, + FLAG_IPV6FL, + FLAG_IPV4, + FLAG_IGMP, + FLAG_TU, + FLAG_HOP, + FLAG_NN1, /*10 */ + FLAG_NN2, + FLAG_END, + FLAG_NO, /*13*/ +}; + +struct gswip_pce_microcode { + u16 val_3; + u16 val_2; + u16 val_1; + u16 val_0; +}; + +#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \ + { val, msk, ((ns) << 10 | (out) << 4 | (len) >> 1),\ + ((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 } +static const struct gswip_pce_microcode gswip_pce_microcode[] = { + /* value mask ns fields L type flags ipv4_len */ + MC_ENTRY(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0), + MC_ENTRY(0x8100, 0xFFFF, 2, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0), + MC_ENTRY(0x88A8, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0), + MC_ENTRY(0x8100, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0), + MC_ENTRY(0x8864, 0xFFFF, 17, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0800, 0xFFFF, 21, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x86DD, 0xFFFF, 22, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x8863, 0xFFFF, 16, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0xF800, 10, OUT_NONE, 0, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 40, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0600, 0x0600, 40, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 12, OUT_NONE, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0xAAAA, 0xFFFF, 14, OUT_NONE, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0300, 0xFF00, 41, OUT_NONE, 0, INSTR, FLAG_SNAP, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_DIP7, 3, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 18, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0), + MC_ENTRY(0x0021, 0xFFFF, 21, OUT_NONE, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0057, 0xFFFF, 22, OUT_NONE, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, INSTR, FLAG_NO, 0), + MC_ENTRY(0x4000, 0xF000, 24, OUT_IP0, 4, INSTR, FLAG_IPV4, 1), + MC_ENTRY(0x6000, 0xF000, 27, OUT_IP0, 3, INSTR, FLAG_IPV6, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 25, OUT_IP3, 2, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 26, OUT_SIP0, 4, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, LENACCU, FLAG_NO, 0), + MC_ENTRY(0x1100, 0xFF00, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0600, 0xFF00, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_HOP, 0), + MC_ENTRY(0x2B00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN1, 0), + MC_ENTRY(0x3C00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN2, 0), + MC_ENTRY(0x0000, 0x0000, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x00E0, 35, OUT_NONE, 0, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_HOP, 0), + MC_ENTRY(0x2B00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN1, 0), + MC_ENTRY(0x3C00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN2, 0), + MC_ENTRY(0x0000, 0x0000, 40, OUT_PROT, 1, IPV6, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 40, OUT_SIP0, 16, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_APP0, 4, INSTR, FLAG_IGMP, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), +}; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 62e486652e62..a5de9bffe5be 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -658,11 +658,7 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port, if (phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - if (phydev->advertising & ADVERTISED_Pause) - lcl_adv |= ADVERTISE_PAUSE_CAP; - if (phydev->advertising & ADVERTISED_Asym_Pause) - lcl_adv |= ADVERTISE_PAUSE_ASYM; - + lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising); flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (flowctrl & FLOW_CTRL_TX) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8da3d39e3218..78ce820b5257 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -434,7 +434,7 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip) err = request_threaded_irq(chip->irq, NULL, mv88e6xxx_g1_irq_thread_fn, - IRQF_ONESHOT, + IRQF_ONESHOT | IRQF_SHARED, dev_name(chip->dev), chip); if (err) mv88e6xxx_g1_irq_free_common(chip); @@ -575,6 +575,13 @@ restore_link: return err; } +static int mv88e6xxx_phy_is_internal(struct dsa_switch *ds, int port) +{ + struct mv88e6xxx_chip *chip = ds->priv; + + return port < chip->info->num_internal_phys; +} + /* We expect the switch to perform auto negotiation if there is a real * phy. However, in the case of a fixed link phy, we force the port * settings from the fixed link settings. @@ -585,7 +592,8 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, struct mv88e6xxx_chip *chip = ds->priv; int err; - if (!phy_is_pseudo_fixed_link(phydev)) + if (!phy_is_pseudo_fixed_link(phydev) && + mv88e6xxx_phy_is_internal(ds, port)) return; mutex_lock(&chip->reg_lock); @@ -709,13 +717,17 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, struct mv88e6xxx_chip *chip = ds->priv; int speed, duplex, link, pause, err; - if (mode == MLO_AN_PHY) + if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port)) return; if (mode == MLO_AN_FIXED) { link = LINK_FORCED_UP; speed = state->speed; duplex = state->duplex; + } else if (!mv88e6xxx_phy_is_internal(ds, port)) { + link = state->link; + speed = state->speed; + duplex = state->duplex; } else { speed = SPEED_UNFORCED; duplex = DUPLEX_UNFORCED; @@ -3160,6 +3172,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, + .serdes_irq_setup = mv88e6352_serdes_irq_setup, + .serdes_irq_free = mv88e6352_serdes_irq_free, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6352_phylink_validate, }; @@ -3366,6 +3380,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, + .serdes_irq_setup = mv88e6352_serdes_irq_setup, + .serdes_irq_free = mv88e6352_serdes_irq_free, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -3664,6 +3680,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, + .serdes_irq_setup = mv88e6352_serdes_irq_setup, + .serdes_irq_free = mv88e6352_serdes_irq_free, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c index 46af8052e535..152a65d46e0b 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.c +++ b/drivers/net/dsa/mv88e6xxx/phy.c @@ -110,6 +110,9 @@ int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy, err = mv88e6xxx_phy_page_get(chip, phy, page); if (!err) { err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page); + if (!err) + err = mv88e6xxx_phy_write(chip, phy, reg, val); + mv88e6xxx_phy_page_put(chip, phy); } diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index e82983975754..bb69650ff772 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -185,6 +185,111 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, return ARRAY_SIZE(mv88e6352_serdes_hw_stats); } +static void mv88e6352_serdes_irq_link(struct mv88e6xxx_chip *chip, int port) +{ + struct dsa_switch *ds = chip->ds; + u16 status; + bool up; + + mv88e6352_serdes_read(chip, MII_BMSR, &status); + + /* Status must be read twice in order to give the current link + * status. Otherwise the change in link status since the last + * read of the register is returned. + */ + mv88e6352_serdes_read(chip, MII_BMSR, &status); + + up = status & BMSR_LSTATUS; + + dsa_port_phylink_mac_change(ds, port, up); +} + +static irqreturn_t mv88e6352_serdes_thread_fn(int irq, void *dev_id) +{ + struct mv88e6xxx_port *port = dev_id; + struct mv88e6xxx_chip *chip = port->chip; + irqreturn_t ret = IRQ_NONE; + u16 status; + int err; + + mutex_lock(&chip->reg_lock); + + err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_INT_STATUS, &status); + if (err) + goto out; + + if (status & MV88E6352_SERDES_INT_LINK_CHANGE) { + ret = IRQ_HANDLED; + mv88e6352_serdes_irq_link(chip, port->port); + } +out: + mutex_unlock(&chip->reg_lock); + + return ret; +} + +static int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip) +{ + return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE, + MV88E6352_SERDES_INT_LINK_CHANGE); +} + +static int mv88e6352_serdes_irq_disable(struct mv88e6xxx_chip *chip) +{ + return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE, 0); +} + +int mv88e6352_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) +{ + int err; + + if (!mv88e6352_port_has_serdes(chip, port)) + return 0; + + chip->ports[port].serdes_irq = irq_find_mapping(chip->g2_irq.domain, + MV88E6352_SERDES_IRQ); + if (chip->ports[port].serdes_irq < 0) { + dev_err(chip->dev, "Unable to map SERDES irq: %d\n", + chip->ports[port].serdes_irq); + return chip->ports[port].serdes_irq; + } + + /* Requesting the IRQ will trigger irq callbacks. So we cannot + * hold the reg_lock. + */ + mutex_unlock(&chip->reg_lock); + err = request_threaded_irq(chip->ports[port].serdes_irq, NULL, + mv88e6352_serdes_thread_fn, + IRQF_ONESHOT, "mv88e6xxx-serdes", + &chip->ports[port]); + mutex_lock(&chip->reg_lock); + + if (err) { + dev_err(chip->dev, "Unable to request SERDES interrupt: %d\n", + err); + return err; + } + + return mv88e6352_serdes_irq_enable(chip); +} + +void mv88e6352_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) +{ + if (!mv88e6352_port_has_serdes(chip, port)) + return; + + mv88e6352_serdes_irq_disable(chip); + + /* Freeing the IRQ will trigger irq callbacks. So we cannot + * hold the reg_lock. + */ + mutex_unlock(&chip->reg_lock); + free_irq(chip->ports[port].serdes_irq, &chip->ports[port]); + mutex_lock(&chip->reg_lock); + + chip->ports[port].serdes_irq = 0; +} + /* Return the SERDES lane address a port is using. Only Ports 9 and 10 * have SERDES lanes. Returns -ENODEV if a port does not have a lane. */ diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index b1496de9c6fe..7870c5a9ef12 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -18,6 +18,19 @@ #define MV88E6352_ADDR_SERDES 0x0f #define MV88E6352_SERDES_PAGE_FIBER 0x01 +#define MV88E6352_SERDES_IRQ 0x0b +#define MV88E6352_SERDES_INT_ENABLE 0x12 +#define MV88E6352_SERDES_INT_SPEED_CHANGE BIT(14) +#define MV88E6352_SERDES_INT_DUPLEX_CHANGE BIT(13) +#define MV88E6352_SERDES_INT_PAGE_RX BIT(12) +#define MV88E6352_SERDES_INT_AN_COMPLETE BIT(11) +#define MV88E6352_SERDES_INT_LINK_CHANGE BIT(10) +#define MV88E6352_SERDES_INT_SYMBOL_ERROR BIT(9) +#define MV88E6352_SERDES_INT_FALSE_CARRIER BIT(8) +#define MV88E6352_SERDES_INT_FIFO_OVER_UNDER BIT(7) +#define MV88E6352_SERDES_INT_FIBRE_ENERGY BIT(4) +#define MV88E6352_SERDES_INT_STATUS 0x13 + #define MV88E6341_ADDR_SERDES 0x15 @@ -73,5 +86,8 @@ int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane); int mv88e6390_serdes_irq_disable(struct mv88e6xxx_chip *chip, int port, int lane); +int mv88e6352_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port); +void mv88e6352_serdes_irq_free(struct mv88e6xxx_chip *chip, int port); + #endif diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 2a0ddec1dd56..3dcc61821ed5 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -377,9 +377,7 @@ static int ax_mii_probe(struct net_device *dev) return ret; } - /* mask with MAC supported features */ - phy_dev->supported &= PHY_BASIC_FEATURES; - phy_dev->advertising = phy_dev->supported; + phy_set_max_speed(phy_dev, SPEED_100); netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq); diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c index 32e9627e3880..77191a281866 100644 --- a/drivers/net/ethernet/8390/etherh.c +++ b/drivers/net/ethernet/8390/etherh.c @@ -564,26 +564,29 @@ static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i sizeof(info->bus_info)); } -static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int etherh_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { - cmd->supported = etherh_priv(dev)->supported; - ethtool_cmd_speed_set(cmd, SPEED_10); - cmd->duplex = DUPLEX_HALF; - cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC; - cmd->autoneg = (dev->flags & IFF_AUTOMEDIA ? - AUTONEG_ENABLE : AUTONEG_DISABLE); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + etherh_priv(dev)->supported); + cmd->base.speed = SPEED_10; + cmd->base.duplex = DUPLEX_HALF; + cmd->base.port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC; + cmd->base.autoneg = (dev->flags & IFF_AUTOMEDIA ? AUTONEG_ENABLE : + AUTONEG_DISABLE); return 0; } -static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int etherh_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { - switch (cmd->autoneg) { + switch (cmd->base.autoneg) { case AUTONEG_ENABLE: dev->flags |= IFF_AUTOMEDIA; break; case AUTONEG_DISABLE: - switch (cmd->port) { + switch (cmd->base.port) { case PORT_TP: dev->if_port = IF_PORT_10BASET; break; @@ -622,12 +625,12 @@ static void etherh_set_msglevel(struct net_device *dev, u32 v) } static const struct ethtool_ops etherh_ethtool_ops = { - .get_settings = etherh_get_settings, - .set_settings = etherh_set_settings, - .get_drvinfo = etherh_get_drvinfo, - .get_ts_info = ethtool_op_get_ts_info, - .get_msglevel = etherh_get_msglevel, - .set_msglevel = etherh_set_msglevel, + .get_drvinfo = etherh_get_drvinfo, + .get_ts_info = ethtool_op_get_ts_info, + .get_msglevel = etherh_get_msglevel, + .set_msglevel = etherh_set_msglevel, + .get_link_ksettings = etherh_get_link_ksettings, + .set_link_ksettings = etherh_set_link_ksettings, }; static const struct net_device_ops etherh_netdev_ops = { diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 6fde68aa13a4..885e00d17807 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -108,6 +108,13 @@ config LANTIQ_ETOP ---help--- Support for the MII0 inside the Lantiq SoC +config LANTIQ_XRX200 + tristate "Lantiq / Intel xRX200 PMAC network driver" + depends on SOC_TYPE_XWAY + ---help--- + Support for the PMAC of the Gigabit switch (GSWIP) inside the + Lantiq / Intel VRX200 VDSL SoC + source "drivers/net/ethernet/marvell/Kconfig" source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index b45d5f626b59..7b5bf9682066 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -49,6 +49,7 @@ obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_KORINA) += korina.o obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o +obj-$(CONFIG_LANTIQ_XRX200) += lantiq_xrx200.o obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 4309be3724ad..7c9348a26cbb 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1279,9 +1279,9 @@ static int greth_mdio_probe(struct net_device *dev) } if (greth->gbit_mac) - phy->supported &= PHY_GBIT_FEATURES; + phy_set_max_speed(phy, SPEED_1000); else - phy->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phy, SPEED_100); phy->advertising = phy->supported; diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 48220b6c600d..ea34bcb868b5 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3258,19 +3258,11 @@ static int et131x_mii_probe(struct net_device *netdev) return PTR_ERR(phydev); } - phydev->supported &= (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_MII | - SUPPORTED_TP); + phy_set_max_speed(phydev, SPEED_100); if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) - phydev->supported |= SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full; + phy_set_max_speed(phydev, SPEED_1000); - phydev->advertising = phydev->supported; phydev->autoneg = AUTONEG_ENABLE; phy_attached_info(phydev); diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h index d0c388cfd52f..3add305d34b4 100644 --- a/drivers/net/ethernet/alacritech/slic.h +++ b/drivers/net/ethernet/alacritech/slic.h @@ -8,7 +8,6 @@ #include <linux/spinlock_types.h> #include <linux/dma-mapping.h> #include <linux/pci.h> -#include <linux/netdevice.h> #include <linux/list.h> #include <linux/u64_stats_sync.h> diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 3143de45baaa..e1acafa82214 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -172,8 +172,7 @@ static int emac_mdio_probe(struct net_device *dev) } /* mask with MAC supported features */ - phydev->supported &= PHY_BASIC_FEATURES; - phydev->advertising = phydev->supported; + phy_set_max_speed(phydev, SPEED_100); db->link = 0; db->speed = 0; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index baca8f704a45..02921d877c08 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -835,13 +835,10 @@ static int init_phy(struct net_device *dev) } /* Stop Advertising 1000BASE Capability if interface is not GMII - * Note: Checkpatch throws CHECKs for the camel case defines below, - * it's ok to ignore. */ if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) || (priv->phy_iface == PHY_INTERFACE_MODE_RMII)) - phydev->advertising &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); + phy_set_max_speed(phydev, SPEED_100); /* Broken HW is sometimes missing the pull-up resistor on the * MDIO line, which results in reads to non-existent devices returning diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index 4532e574ebcd..9f80b73f90b1 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -32,115 +32,81 @@ #ifndef _ENA_ADMIN_H_ #define _ENA_ADMIN_H_ -enum ena_admin_aq_opcode { - ENA_ADMIN_CREATE_SQ = 1, - - ENA_ADMIN_DESTROY_SQ = 2, - - ENA_ADMIN_CREATE_CQ = 3, - - ENA_ADMIN_DESTROY_CQ = 4, - ENA_ADMIN_GET_FEATURE = 8, - - ENA_ADMIN_SET_FEATURE = 9, - - ENA_ADMIN_GET_STATS = 11, +enum ena_admin_aq_opcode { + ENA_ADMIN_CREATE_SQ = 1, + ENA_ADMIN_DESTROY_SQ = 2, + ENA_ADMIN_CREATE_CQ = 3, + ENA_ADMIN_DESTROY_CQ = 4, + ENA_ADMIN_GET_FEATURE = 8, + ENA_ADMIN_SET_FEATURE = 9, + ENA_ADMIN_GET_STATS = 11, }; enum ena_admin_aq_completion_status { - ENA_ADMIN_SUCCESS = 0, - - ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1, - - ENA_ADMIN_BAD_OPCODE = 2, - - ENA_ADMIN_UNSUPPORTED_OPCODE = 3, - - ENA_ADMIN_MALFORMED_REQUEST = 4, - + ENA_ADMIN_SUCCESS = 0, + ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1, + ENA_ADMIN_BAD_OPCODE = 2, + ENA_ADMIN_UNSUPPORTED_OPCODE = 3, + ENA_ADMIN_MALFORMED_REQUEST = 4, /* Additional status is provided in ACQ entry extended_status */ - ENA_ADMIN_ILLEGAL_PARAMETER = 5, - - ENA_ADMIN_UNKNOWN_ERROR = 6, + ENA_ADMIN_ILLEGAL_PARAMETER = 5, + ENA_ADMIN_UNKNOWN_ERROR = 6, + ENA_ADMIN_RESOURCE_BUSY = 7, }; enum ena_admin_aq_feature_id { - ENA_ADMIN_DEVICE_ATTRIBUTES = 1, - - ENA_ADMIN_MAX_QUEUES_NUM = 2, - - ENA_ADMIN_HW_HINTS = 3, - - ENA_ADMIN_RSS_HASH_FUNCTION = 10, - - ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, - - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, - - ENA_ADMIN_MTU = 14, - - ENA_ADMIN_RSS_HASH_INPUT = 18, - - ENA_ADMIN_INTERRUPT_MODERATION = 20, - - ENA_ADMIN_AENQ_CONFIG = 26, - - ENA_ADMIN_LINK_CONFIG = 27, - - ENA_ADMIN_HOST_ATTR_CONFIG = 28, - - ENA_ADMIN_FEATURES_OPCODE_NUM = 32, + ENA_ADMIN_DEVICE_ATTRIBUTES = 1, + ENA_ADMIN_MAX_QUEUES_NUM = 2, + ENA_ADMIN_HW_HINTS = 3, + ENA_ADMIN_LLQ = 4, + ENA_ADMIN_RSS_HASH_FUNCTION = 10, + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, + ENA_ADMIN_MTU = 14, + ENA_ADMIN_RSS_HASH_INPUT = 18, + ENA_ADMIN_INTERRUPT_MODERATION = 20, + ENA_ADMIN_AENQ_CONFIG = 26, + ENA_ADMIN_LINK_CONFIG = 27, + ENA_ADMIN_HOST_ATTR_CONFIG = 28, + ENA_ADMIN_FEATURES_OPCODE_NUM = 32, }; enum ena_admin_placement_policy_type { /* descriptors and headers are in host memory */ - ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, - + ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, /* descriptors and headers are in device memory (a.k.a Low Latency * Queue) */ - ENA_ADMIN_PLACEMENT_POLICY_DEV = 3, + ENA_ADMIN_PLACEMENT_POLICY_DEV = 3, }; enum ena_admin_link_types { - ENA_ADMIN_LINK_SPEED_1G = 0x1, - - ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2, - - ENA_ADMIN_LINK_SPEED_5G = 0x4, - - ENA_ADMIN_LINK_SPEED_10G = 0x8, - - ENA_ADMIN_LINK_SPEED_25G = 0x10, - - ENA_ADMIN_LINK_SPEED_40G = 0x20, - - ENA_ADMIN_LINK_SPEED_50G = 0x40, - - ENA_ADMIN_LINK_SPEED_100G = 0x80, - - ENA_ADMIN_LINK_SPEED_200G = 0x100, - - ENA_ADMIN_LINK_SPEED_400G = 0x200, + ENA_ADMIN_LINK_SPEED_1G = 0x1, + ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2, + ENA_ADMIN_LINK_SPEED_5G = 0x4, + ENA_ADMIN_LINK_SPEED_10G = 0x8, + ENA_ADMIN_LINK_SPEED_25G = 0x10, + ENA_ADMIN_LINK_SPEED_40G = 0x20, + ENA_ADMIN_LINK_SPEED_50G = 0x40, + ENA_ADMIN_LINK_SPEED_100G = 0x80, + ENA_ADMIN_LINK_SPEED_200G = 0x100, + ENA_ADMIN_LINK_SPEED_400G = 0x200, }; enum ena_admin_completion_policy_type { /* completion queue entry for each sq descriptor */ - ENA_ADMIN_COMPLETION_POLICY_DESC = 0, - + ENA_ADMIN_COMPLETION_POLICY_DESC = 0, /* completion queue entry upon request in sq descriptor */ - ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1, - + ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1, /* current queue head pointer is updated in OS memory upon sq * descriptor request */ - ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, - + ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, /* current queue head pointer is updated in OS memory for each sq * descriptor */ - ENA_ADMIN_COMPLETION_POLICY_HEAD = 3, + ENA_ADMIN_COMPLETION_POLICY_HEAD = 3, }; /* basic stats return ena_admin_basic_stats while extanded stats return a @@ -148,15 +114,13 @@ enum ena_admin_completion_policy_type { * device id */ enum ena_admin_get_stats_type { - ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, - - ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, + ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, + ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, }; enum ena_admin_get_stats_scope { - ENA_ADMIN_SPECIFIC_QUEUE = 0, - - ENA_ADMIN_ETH_TRAFFIC = 1, + ENA_ADMIN_SPECIFIC_QUEUE = 0, + ENA_ADMIN_ETH_TRAFFIC = 1, }; struct ena_admin_aq_common_desc { @@ -227,7 +191,9 @@ struct ena_admin_acq_common_desc { u16 extended_status; - /* serves as a hint what AQ entries can be revoked */ + /* indicates to the driver which AQ entry has been consumed by the + * device and could be reused + */ u16 sq_head_indx; }; @@ -296,9 +262,8 @@ struct ena_admin_aq_create_sq_cmd { }; enum ena_admin_sq_direction { - ENA_ADMIN_SQ_DIRECTION_TX = 1, - - ENA_ADMIN_SQ_DIRECTION_RX = 2, + ENA_ADMIN_SQ_DIRECTION_TX = 1, + ENA_ADMIN_SQ_DIRECTION_RX = 2, }; struct ena_admin_acq_create_sq_resp_desc { @@ -483,8 +448,85 @@ struct ena_admin_device_attr_feature_desc { u32 max_mtu; }; +enum ena_admin_llq_header_location { + /* header is in descriptor list */ + ENA_ADMIN_INLINE_HEADER = 1, + /* header in a separate ring, implies 16B descriptor list entry */ + ENA_ADMIN_HEADER_RING = 2, +}; + +enum ena_admin_llq_ring_entry_size { + ENA_ADMIN_LIST_ENTRY_SIZE_128B = 1, + ENA_ADMIN_LIST_ENTRY_SIZE_192B = 2, + ENA_ADMIN_LIST_ENTRY_SIZE_256B = 4, +}; + +enum ena_admin_llq_num_descs_before_header { + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_0 = 0, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1 = 1, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2 = 2, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4 = 4, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8 = 8, +}; + +/* packet descriptor list entry always starts with one or more descriptors, + * followed by a header. The rest of the descriptors are located in the + * beginning of the subsequent entry. Stride refers to how the rest of the + * descriptors are placed. This field is relevant only for inline header + * mode + */ +enum ena_admin_llq_stride_ctrl { + ENA_ADMIN_SINGLE_DESC_PER_ENTRY = 1, + ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2, +}; + +struct ena_admin_feature_llq_desc { + u32 max_llq_num; + + u32 max_llq_depth; + + /* specify the header locations the device supports. bitfield of + * enum ena_admin_llq_header_location. + */ + u16 header_location_ctrl_supported; + + /* the header location the driver selected to use. */ + u16 header_location_ctrl_enabled; + + /* if inline header is specified - this is the size of descriptor + * list entry. If header in a separate ring is specified - this is + * the size of header ring entry. bitfield of enum + * ena_admin_llq_ring_entry_size. specify the entry sizes the device + * supports + */ + u16 entry_size_ctrl_supported; + + /* the entry size the driver selected to use. */ + u16 entry_size_ctrl_enabled; + + /* valid only if inline header is specified. First entry associated + * with the packet includes descriptors and header. Rest of the + * entries occupied by descriptors. This parameter defines the max + * number of descriptors precedding the header in the first entry. + * The field is bitfield of enum + * ena_admin_llq_num_descs_before_header and specify the values the + * device supports + */ + u16 desc_num_before_header_supported; + + /* the desire field the driver selected to use */ + u16 desc_num_before_header_enabled; + + /* valid only if inline was chosen. bitfield of enum + * ena_admin_llq_stride_ctrl + */ + u16 descriptors_stride_ctrl_supported; + + /* the stride control the driver selected to use */ + u16 descriptors_stride_ctrl_enabled; +}; + struct ena_admin_queue_feature_desc { - /* including LLQs */ u32 max_sq_num; u32 max_sq_depth; @@ -493,9 +535,9 @@ struct ena_admin_queue_feature_desc { u32 max_cq_depth; - u32 max_llq_num; + u32 max_legacy_llq_num; - u32 max_llq_depth; + u32 max_legacy_llq_depth; u32 max_header_size; @@ -583,9 +625,8 @@ struct ena_admin_feature_offload_desc { }; enum ena_admin_hash_functions { - ENA_ADMIN_TOEPLITZ = 1, - - ENA_ADMIN_CRC32 = 2, + ENA_ADMIN_TOEPLITZ = 1, + ENA_ADMIN_CRC32 = 2, }; struct ena_admin_feature_rss_flow_hash_control { @@ -611,50 +652,35 @@ struct ena_admin_feature_rss_flow_hash_function { /* RSS flow hash protocols */ enum ena_admin_flow_hash_proto { - ENA_ADMIN_RSS_TCP4 = 0, - - ENA_ADMIN_RSS_UDP4 = 1, - - ENA_ADMIN_RSS_TCP6 = 2, - - ENA_ADMIN_RSS_UDP6 = 3, - - ENA_ADMIN_RSS_IP4 = 4, - - ENA_ADMIN_RSS_IP6 = 5, - - ENA_ADMIN_RSS_IP4_FRAG = 6, - - ENA_ADMIN_RSS_NOT_IP = 7, - + ENA_ADMIN_RSS_TCP4 = 0, + ENA_ADMIN_RSS_UDP4 = 1, + ENA_ADMIN_RSS_TCP6 = 2, + ENA_ADMIN_RSS_UDP6 = 3, + ENA_ADMIN_RSS_IP4 = 4, + ENA_ADMIN_RSS_IP6 = 5, + ENA_ADMIN_RSS_IP4_FRAG = 6, + ENA_ADMIN_RSS_NOT_IP = 7, /* TCPv6 with extension header */ - ENA_ADMIN_RSS_TCP6_EX = 8, - + ENA_ADMIN_RSS_TCP6_EX = 8, /* IPv6 with extension header */ - ENA_ADMIN_RSS_IP6_EX = 9, - - ENA_ADMIN_RSS_PROTO_NUM = 16, + ENA_ADMIN_RSS_IP6_EX = 9, + ENA_ADMIN_RSS_PROTO_NUM = 16, }; /* RSS flow hash fields */ enum ena_admin_flow_hash_fields { /* Ethernet Dest Addr */ - ENA_ADMIN_RSS_L2_DA = BIT(0), - + ENA_ADMIN_RSS_L2_DA = BIT(0), /* Ethernet Src Addr */ - ENA_ADMIN_RSS_L2_SA = BIT(1), - + ENA_ADMIN_RSS_L2_SA = BIT(1), /* ipv4/6 Dest Addr */ - ENA_ADMIN_RSS_L3_DA = BIT(2), - + ENA_ADMIN_RSS_L3_DA = BIT(2), /* ipv4/6 Src Addr */ - ENA_ADMIN_RSS_L3_SA = BIT(3), - + ENA_ADMIN_RSS_L3_SA = BIT(3), /* tcp/udp Dest Port */ - ENA_ADMIN_RSS_L4_DP = BIT(4), - + ENA_ADMIN_RSS_L4_DP = BIT(4), /* tcp/udp Src Port */ - ENA_ADMIN_RSS_L4_SP = BIT(5), + ENA_ADMIN_RSS_L4_SP = BIT(5), }; struct ena_admin_proto_input { @@ -693,15 +719,13 @@ struct ena_admin_feature_rss_flow_hash_input { }; enum ena_admin_os_type { - ENA_ADMIN_OS_LINUX = 1, - - ENA_ADMIN_OS_WIN = 2, - - ENA_ADMIN_OS_DPDK = 3, - - ENA_ADMIN_OS_FREEBSD = 4, - - ENA_ADMIN_OS_IPXE = 5, + ENA_ADMIN_OS_LINUX = 1, + ENA_ADMIN_OS_WIN = 2, + ENA_ADMIN_OS_DPDK = 3, + ENA_ADMIN_OS_FREEBSD = 4, + ENA_ADMIN_OS_IPXE = 5, + ENA_ADMIN_OS_ESXI = 6, + ENA_ADMIN_OS_GROUPS_NUM = 6, }; struct ena_admin_host_info { @@ -723,11 +747,27 @@ struct ena_admin_host_info { /* 7:0 : major * 15:8 : minor * 23:16 : sub_minor + * 31:24 : module_type */ u32 driver_version; /* features bitmap */ - u32 supported_network_features[4]; + u32 supported_network_features[2]; + + /* ENA spec version of driver */ + u16 ena_spec_version; + + /* ENA device's Bus, Device and Function + * 2:0 : function + * 7:3 : device + * 15:8 : bus + */ + u16 bdf; + + /* Number of CPUs */ + u16 num_cpus; + + u16 reserved; }; struct ena_admin_rss_ind_table_entry { @@ -800,6 +840,8 @@ struct ena_admin_get_feat_resp { struct ena_admin_device_attr_feature_desc dev_attr; + struct ena_admin_feature_llq_desc llq; + struct ena_admin_queue_feature_desc max_queue; struct ena_admin_feature_aenq_desc aenq; @@ -847,6 +889,9 @@ struct ena_admin_set_feat_cmd { /* rss indirection table */ struct ena_admin_feature_rss_ind_table ind_table; + + /* LLQ configuration */ + struct ena_admin_feature_llq_desc llq; } u; }; @@ -875,25 +920,18 @@ struct ena_admin_aenq_common_desc { /* asynchronous event notification groups */ enum ena_admin_aenq_group { - ENA_ADMIN_LINK_CHANGE = 0, - - ENA_ADMIN_FATAL_ERROR = 1, - - ENA_ADMIN_WARNING = 2, - - ENA_ADMIN_NOTIFICATION = 3, - - ENA_ADMIN_KEEP_ALIVE = 4, - - ENA_ADMIN_AENQ_GROUPS_NUM = 5, + ENA_ADMIN_LINK_CHANGE = 0, + ENA_ADMIN_FATAL_ERROR = 1, + ENA_ADMIN_WARNING = 2, + ENA_ADMIN_NOTIFICATION = 3, + ENA_ADMIN_KEEP_ALIVE = 4, + ENA_ADMIN_AENQ_GROUPS_NUM = 5, }; enum ena_admin_aenq_notification_syndrom { - ENA_ADMIN_SUSPEND = 0, - - ENA_ADMIN_RESUME = 1, - - ENA_ADMIN_UPDATE_HINTS = 2, + ENA_ADMIN_SUSPEND = 0, + ENA_ADMIN_RESUME = 1, + ENA_ADMIN_UPDATE_HINTS = 2, }; struct ena_admin_aenq_entry { @@ -928,27 +966,27 @@ struct ena_admin_ena_mmio_req_read_less_resp { }; /* aq_common_desc */ -#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) -#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1 -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1) -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2 -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2) +#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) +#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1 +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1) +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2 +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2) /* sq */ -#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5 -#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5) +#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5 +#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5) /* acq_common_desc */ -#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) -#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0) +#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) +#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0) /* aq_create_sq_cmd */ -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5 -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5) -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0) -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4 -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5 +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4 +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4) #define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0) /* aq_create_cq_cmd */ @@ -957,12 +995,12 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) /* get_set_feature_common_desc */ -#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0) +#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0) /* get_feature_link_desc */ -#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0) -#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1 -#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1) +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0) +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1 +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1) /* feature_offload_desc */ #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0) @@ -974,19 +1012,19 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3) #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4) -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5 -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5) -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6 -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6) -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7 -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7) #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0) #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1) #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2) -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3 -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3) /* feature_rss_flow_hash_function */ #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0) @@ -994,25 +1032,32 @@ struct ena_admin_ena_mmio_req_read_less_resp { /* feature_rss_flow_hash_input */ #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1 -#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1) #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2 -#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2) #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1) #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2) /* host_info */ -#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0) -#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8 -#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8) -#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16 -#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) +#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0) +#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8 +#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8) +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16 +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) +#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT 24 +#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK GENMASK(31, 24) +#define ENA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0) +#define ENA_ADMIN_HOST_INFO_DEVICE_SHIFT 3 +#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3) +#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8 +#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8) /* aenq_common_desc */ -#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) +#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) /* aenq_link_change_desc */ -#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) +#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) #endif /*_ENA_ADMIN_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 7635c38e77dd..420cede41ca4 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -41,9 +41,6 @@ #define ENA_ASYNC_QUEUE_DEPTH 16 #define ENA_ADMIN_QUEUE_DEPTH 32 -#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ - ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \ - | (ENA_COMMON_SPEC_VERSION_MINOR)) #define ENA_CTRL_MAJOR 0 #define ENA_CTRL_MINOR 0 @@ -61,6 +58,8 @@ #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF +#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4 + #define ENA_REGS_ADMIN_INTR_MASK 1 #define ENA_POLL_MS 5 @@ -236,7 +235,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu tail_masked = admin_queue->sq.tail & queue_size_mask; /* In case of queue FULL */ - cnt = atomic_read(&admin_queue->outstanding_cmds); + cnt = (u16)atomic_read(&admin_queue->outstanding_cmds); if (cnt >= admin_queue->q_depth) { pr_debug("admin queue is full.\n"); admin_queue->stats.out_of_space++; @@ -305,7 +304,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue struct ena_admin_acq_entry *comp, size_t comp_size_in_bytes) { - unsigned long flags; + unsigned long flags = 0; struct ena_comp_ctx *comp_ctx; spin_lock_irqsave(&admin_queue->q_lock, flags); @@ -333,7 +332,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); - io_sq->dma_addr_bits = ena_dev->dma_addr_bits; + io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits; io_sq->desc_entry_size = (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? sizeof(struct ena_eth_io_tx_desc) : @@ -355,21 +354,48 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, &io_sq->desc_addr.phys_addr, GFP_KERNEL); } - } else { + + if (!io_sq->desc_addr.virt_addr) { + pr_err("memory allocation failed"); + return -ENOMEM; + } + } + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + /* Allocate bounce buffers */ + io_sq->bounce_buf_ctrl.buffer_size = + ena_dev->llq_info.desc_list_entry_size; + io_sq->bounce_buf_ctrl.buffers_num = + ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; + io_sq->bounce_buf_ctrl.next_to_use = 0; + + size = io_sq->bounce_buf_ctrl.buffer_size * + io_sq->bounce_buf_ctrl.buffers_num; + dev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); - io_sq->desc_addr.virt_addr = + io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); set_dev_node(ena_dev->dmadev, dev_node); - if (!io_sq->desc_addr.virt_addr) { - io_sq->desc_addr.virt_addr = + if (!io_sq->bounce_buf_ctrl.base_buffer) + io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); + + if (!io_sq->bounce_buf_ctrl.base_buffer) { + pr_err("bounce buffer memory allocation failed"); + return -ENOMEM; } - } - if (!io_sq->desc_addr.virt_addr) { - pr_err("memory allocation failed"); - return -ENOMEM; + memcpy(&io_sq->llq_info, &ena_dev->llq_info, + sizeof(io_sq->llq_info)); + + /* Initiate the first bounce buffer */ + io_sq->llq_buf_ctrl.curr_bounce_buf = + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, + 0x0, io_sq->llq_info.desc_list_entry_size); + io_sq->llq_buf_ctrl.descs_left_in_line = + io_sq->llq_info.descs_num_before_header; } io_sq->tail = 0; @@ -460,7 +486,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu /* Go over all the completions */ while ((READ_ONCE(cqe->acq_common_descriptor.flags) & - ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { + ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { /* Do not read the rest of the completion entry before the * phase bit was validated */ @@ -511,7 +537,8 @@ static int ena_com_comp_status_to_errno(u8 comp_status) static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, struct ena_com_admin_queue *admin_queue) { - unsigned long flags, timeout; + unsigned long flags = 0; + unsigned long timeout; int ret; timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout); @@ -557,10 +584,160 @@ err: return ret; } +/** + * Set the LLQ configurations of the firmware + * + * The driver provides only the enabled feature values to the device, + * which in turn, checks if they are supported. + */ +static int ena_com_set_llq(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + struct ena_com_llq_info *llq_info = &ena_dev->llq_info; + int ret; + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.feat_common.feature_id = ENA_ADMIN_LLQ; + + cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; + cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; + cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; + cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + pr_err("Failed to set LLQ configurations: %d\n", ret); + + return ret; +} + +static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq_features, + struct ena_llq_configurations *llq_default_cfg) +{ + struct ena_com_llq_info *llq_info = &ena_dev->llq_info; + u16 supported_feat; + int rc; + + memset(llq_info, 0, sizeof(*llq_info)); + + supported_feat = llq_features->header_location_ctrl_supported; + + if (likely(supported_feat & llq_default_cfg->llq_header_location)) { + llq_info->header_location_ctrl = + llq_default_cfg->llq_header_location; + } else { + pr_err("Invalid header location control, supported: 0x%x\n", + supported_feat); + return -EINVAL; + } + + if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { + supported_feat = llq_features->descriptors_stride_ctrl_supported; + if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { + llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; + } else { + if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) { + llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; + } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { + llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; + } else { + pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n", + supported_feat); + return -EINVAL; + } + + pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_stride_ctrl, supported_feat, + llq_info->desc_stride_ctrl); + } + } else { + llq_info->desc_stride_ctrl = 0; + } + + supported_feat = llq_features->entry_size_ctrl_supported; + if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) { + llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size; + llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value; + } else { + if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) { + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B; + llq_info->desc_list_entry_size = 128; + } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) { + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B; + llq_info->desc_list_entry_size = 192; + } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) { + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; + llq_info->desc_list_entry_size = 256; + } else { + pr_err("Invalid entry_size_ctrl, supported: 0x%x\n", + supported_feat); + return -EINVAL; + } + + pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_ring_entry_size, supported_feat, + llq_info->desc_list_entry_size); + } + if (unlikely(llq_info->desc_list_entry_size & 0x7)) { + /* The desc list entry size should be whole multiply of 8 + * This requirement comes from __iowrite64_copy() + */ + pr_err("illegal entry size %d\n", + llq_info->desc_list_entry_size); + return -EINVAL; + } + + if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) + llq_info->descs_per_entry = llq_info->desc_list_entry_size / + sizeof(struct ena_eth_io_tx_desc); + else + llq_info->descs_per_entry = 1; + + supported_feat = llq_features->desc_num_before_header_supported; + if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) { + llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header; + } else { + if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1; + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4; + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; + } else { + pr_err("Invalid descs_num_before_header, supported: 0x%x\n", + supported_feat); + return -EINVAL; + } + + pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_num_decs_before_header, + supported_feat, llq_info->descs_num_before_header); + } + + rc = ena_com_set_llq(ena_dev); + if (rc) + pr_err("Cannot set LLQ configuration: %d\n", rc); + + return 0; +} + static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, struct ena_com_admin_queue *admin_queue) { - unsigned long flags; + unsigned long flags = 0; int ret; wait_for_completion_timeout(&comp_ctx->wait_event, @@ -606,7 +783,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = mmio_read->read_resp; u32 mmio_read_reg, ret, i; - unsigned long flags; + unsigned long flags = 0; u32 timeout = mmio_read->reg_read_to; might_sleep(); @@ -728,15 +905,17 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, if (io_sq->desc_addr.virt_addr) { size = io_sq->desc_entry_size * io_sq->q_depth; - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) - dma_free_coherent(ena_dev->dmadev, size, - io_sq->desc_addr.virt_addr, - io_sq->desc_addr.phys_addr); - else - devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr); + dma_free_coherent(ena_dev->dmadev, size, + io_sq->desc_addr.virt_addr, + io_sq->desc_addr.phys_addr); io_sq->desc_addr.virt_addr = NULL; } + + if (io_sq->bounce_buf_ctrl.base_buffer) { + devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer); + io_sq->bounce_buf_ctrl.base_buffer = NULL; + } } static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, @@ -1248,7 +1427,7 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; - unsigned long flags; + unsigned long flags = 0; spin_lock_irqsave(&admin_queue->q_lock, flags); while (atomic_read(&admin_queue->outstanding_cmds) != 0) { @@ -1292,7 +1471,7 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; - unsigned long flags; + unsigned long flags = 0; spin_lock_irqsave(&admin_queue->q_lock, flags); ena_dev->admin_queue.running_state = state; @@ -1326,7 +1505,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) } if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { - pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n", + pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", get_resp.u.aenq.supported_groups, groups_flag); return -EOPNOTSUPP; } @@ -1400,11 +1579,6 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev) ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); - if (ver < MIN_ENA_VER) { - pr_err("ENA version is lower than the minimal version the driver supports\n"); - return -1; - } - pr_info("ena controller version: %d.%d.%d implementation version %d\n", (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, @@ -1479,7 +1653,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) sizeof(*mmio_read->read_resp), &mmio_read->read_resp_dma_addr, GFP_KERNEL); if (unlikely(!mmio_read->read_resp)) - return -ENOMEM; + goto err; ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); @@ -1488,6 +1662,10 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) mmio_read->readless_supported = true; return 0; + +err: + + return -ENOMEM; } void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) @@ -1523,8 +1701,7 @@ void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) } int ena_com_admin_init(struct ena_com_dev *ena_dev, - struct ena_aenq_handlers *aenq_handlers, - bool init_spinlock) + struct ena_aenq_handlers *aenq_handlers) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; @@ -1550,8 +1727,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, atomic_set(&admin_queue->outstanding_cmds, 0); - if (init_spinlock) - spin_lock_init(&admin_queue->q_lock); + spin_lock_init(&admin_queue->q_lock); ret = ena_com_init_comp_ctxt(admin_queue); if (ret) @@ -1748,6 +1924,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, else return rc; + rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ); + if (!rc) + memcpy(&get_feat_ctx->llq, &get_resp.u.llq, + sizeof(get_resp.u.llq)); + else if (rc == -EOPNOTSUPP) + memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); + else + return rc; + return 0; } @@ -1779,6 +1964,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) struct ena_admin_aenq_entry *aenq_e; struct ena_admin_aenq_common_desc *aenq_common; struct ena_com_aenq *aenq = &dev->aenq; + unsigned long long timestamp; ena_aenq_handler handler_cb; u16 masked_head, processed = 0; u8 phase; @@ -1796,10 +1982,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) */ dma_rmb(); + timestamp = + (unsigned long long)aenq_common->timestamp_low | + ((unsigned long long)aenq_common->timestamp_high << 32); pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", - aenq_common->group, aenq_common->syndrom, - (u64)aenq_common->timestamp_low + - ((u64)aenq_common->timestamp_high << 32)); + aenq_common->group, aenq_common->syndrom, timestamp); /* Handle specific event*/ handler_cb = ena_com_get_specific_aenq_cb(dev, @@ -2441,6 +2628,10 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) if (unlikely(!host_attr->host_info)) return -ENOMEM; + host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR << + ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) | + (ENA_COMMON_SPEC_VERSION_MINOR)); + return 0; } @@ -2712,3 +2903,34 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, intr_moder_tbl[level].pkts_per_interval; entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; } + +int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq_features, + struct ena_llq_configurations *llq_default_cfg) +{ + int rc; + int size; + + if (!llq_features->max_llq_num) { + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + + rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg); + if (rc) + return rc; + + /* Validate the descriptor is not too big */ + size = ena_dev->tx_max_header_size; + size += ena_dev->llq_info.descs_num_before_header * + sizeof(struct ena_eth_io_tx_desc); + + if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) { + pr_err("the size of the LLQ entry is smaller than needed\n"); + return -EINVAL; + } + + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; + + return 0; +} diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 7b784f8a06a6..ae8b4857fce3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -37,6 +37,7 @@ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/gfp.h> +#include <linux/io.h> #include <linux/sched.h> #include <linux/sizes.h> #include <linux/spinlock.h> @@ -108,6 +109,14 @@ enum ena_intr_moder_level { ENA_INTR_MAX_NUM_OF_LEVELS, }; +struct ena_llq_configurations { + enum ena_admin_llq_header_location llq_header_location; + enum ena_admin_llq_ring_entry_size llq_ring_entry_size; + enum ena_admin_llq_stride_ctrl llq_stride_ctrl; + enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header; + u16 llq_ring_entry_size_value; +}; + struct ena_intr_moder_entry { unsigned int intr_moder_interval; unsigned int pkts_per_interval; @@ -142,6 +151,15 @@ struct ena_com_tx_meta { u16 l4_hdr_len; /* In words */ }; +struct ena_com_llq_info { + u16 header_location_ctrl; + u16 desc_stride_ctrl; + u16 desc_list_entry_size_ctrl; + u16 desc_list_entry_size; + u16 descs_num_before_header; + u16 descs_per_entry; +}; + struct ena_com_io_cq { struct ena_com_io_desc_addr cdesc_addr; @@ -179,6 +197,20 @@ struct ena_com_io_cq { } ____cacheline_aligned; +struct ena_com_io_bounce_buffer_control { + u8 *base_buffer; + u16 next_to_use; + u16 buffer_size; + u16 buffers_num; /* Must be a power of 2 */ +}; + +/* This struct is to keep tracking the current location of the next llq entry */ +struct ena_com_llq_pkt_ctrl { + u8 *curr_bounce_buf; + u16 idx; + u16 descs_left_in_line; +}; + struct ena_com_io_sq { struct ena_com_io_desc_addr desc_addr; @@ -190,6 +222,9 @@ struct ena_com_io_sq { u32 msix_vector; struct ena_com_tx_meta cached_tx_meta; + struct ena_com_llq_info llq_info; + struct ena_com_llq_pkt_ctrl llq_buf_ctrl; + struct ena_com_io_bounce_buffer_control bounce_buf_ctrl; u16 q_depth; u16 qid; @@ -197,6 +232,7 @@ struct ena_com_io_sq { u16 idx; u16 tail; u16 next_to_comp; + u16 llq_last_copy_tail; u32 tx_max_header_size; u8 phase; u8 desc_entry_size; @@ -334,6 +370,8 @@ struct ena_com_dev { u16 intr_delay_resolution; u32 intr_moder_tx_interval; struct ena_intr_moder_entry *intr_moder_tbl; + + struct ena_com_llq_info llq_info; }; struct ena_com_dev_get_features_ctx { @@ -342,6 +380,7 @@ struct ena_com_dev_get_features_ctx { struct ena_admin_feature_aenq_desc aenq; struct ena_admin_feature_offload_desc offload; struct ena_admin_ena_hw_hints hw_hints; + struct ena_admin_feature_llq_desc llq; }; struct ena_com_create_io_ctx { @@ -397,8 +436,6 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); /* ena_com_admin_init - Init the admin and the async queues * @ena_dev: ENA communication layer struct * @aenq_handlers: Those handlers to be called upon event. - * @init_spinlock: Indicate if this method should init the admin spinlock or - * the spinlock was init before (for example, in a case of FLR). * * Initialize the admin submission and completion queues. * Initialize the asynchronous events notification queues. @@ -406,8 +443,7 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); * @return - 0 on success, negative value on failure. */ int ena_com_admin_init(struct ena_com_dev *ena_dev, - struct ena_aenq_handlers *aenq_handlers, - bool init_spinlock); + struct ena_aenq_handlers *aenq_handlers); /* ena_com_admin_destroy - Destroy the admin and the async events queues. * @ena_dev: ENA communication layer struct @@ -935,6 +971,16 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, enum ena_intr_moder_level level, struct ena_intr_moder_entry *entry); +/* ena_com_config_dev_mode - Configure the placement policy of the device. + * @ena_dev: ENA communication layer struct + * @llq_features: LLQ feature descriptor, retrieve via + * ena_com_get_dev_attr_feat. + * @ena_llq_config: The default driver LLQ parameters configurations + */ +int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq_features, + struct ena_llq_configurations *llq_default_config); + static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) { return ena_dev->adaptive_coalescing; @@ -1044,4 +1090,21 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg, intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; } +static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl) +{ + u16 size, buffers_num; + u8 *buf; + + size = bounce_buf_ctrl->buffer_size; + buffers_num = bounce_buf_ctrl->buffers_num; + + buf = bounce_buf_ctrl->base_buffer + + (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size; + + prefetchw(bounce_buf_ctrl->base_buffer + + (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size); + + return buf; +} + #endif /* !(ENA_COM) */ diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h index bb8d73676eab..23beb7e7ed7b 100644 --- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h @@ -32,8 +32,8 @@ #ifndef _ENA_COMMON_H_ #define _ENA_COMMON_H_ -#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */ -#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */ +#define ENA_COMMON_SPEC_VERSION_MAJOR 2 +#define ENA_COMMON_SPEC_VERSION_MINOR 0 /* ENA operates with 48-bit memory addresses. ena_mem_addr_t */ struct ena_common_mem_addr { diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 2b3ff0c20155..f6c2d3855be8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -59,16 +59,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( return cdesc; } -static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) -{ - io_cq->head++; - - /* Switch phase bit in case of wrap around */ - if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) - io_cq->phase ^= 1; -} - -static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) +static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq) { u16 tail_masked; u32 offset; @@ -80,45 +71,159 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset); } -static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq) +static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, + u8 *bounce_buffer) { - u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); - u32 offset = tail_masked * io_sq->desc_entry_size; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; - /* In case this queue isn't a LLQ */ - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) - return; + u16 dst_tail_mask; + u32 dst_offset; - memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset, - io_sq->desc_addr.virt_addr + offset, - io_sq->desc_entry_size); -} + dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); + dst_offset = dst_tail_mask * llq_info->desc_list_entry_size; + + /* Make sure everything was written into the bounce buffer before + * writing the bounce buffer to the device + */ + wmb(); + + /* The line is completed. Copy it to dev */ + __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, + bounce_buffer, (llq_info->desc_list_entry_size) / 8); -static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) -{ io_sq->tail++; /* Switch phase bit in case of wrap around */ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) io_sq->phase ^= 1; + + return 0; } -static inline int ena_com_write_header(struct ena_com_io_sq *io_sq, - u8 *head_src, u16 header_len) +static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, + u8 *header_src, + u16 header_len) { - u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); - u8 __iomem *dev_head_addr = - io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size); + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf; + u16 header_offset; - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) return 0; - if (unlikely(!io_sq->header_addr)) { - pr_err("Push buffer header ptr is NULL\n"); - return -EINVAL; + header_offset = + llq_info->descs_num_before_header * io_sq->desc_entry_size; + + if (unlikely((header_offset + header_len) > + llq_info->desc_list_entry_size)) { + pr_err("trying to write header larger than llq entry can accommodate\n"); + return -EFAULT; + } + + if (unlikely(!bounce_buffer)) { + pr_err("bounce buffer is NULL\n"); + return -EFAULT; + } + + memcpy(bounce_buffer + header_offset, header_src, header_len); + + return 0; +} + +static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + u8 *bounce_buffer; + void *sq_desc; + + bounce_buffer = pkt_ctrl->curr_bounce_buf; + + if (unlikely(!bounce_buffer)) { + pr_err("bounce buffer is NULL\n"); + return NULL; + } + + sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size; + pkt_ctrl->idx++; + pkt_ctrl->descs_left_in_line--; + + return sq_desc; +} + +static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + int rc; + + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) + return 0; + + /* bounce buffer was used, so write it and get a new one */ + if (pkt_ctrl->idx) { + rc = ena_com_write_bounce_buffer_to_dev(io_sq, + pkt_ctrl->curr_bounce_buf); + if (unlikely(rc)) + return rc; + + pkt_ctrl->curr_bounce_buf = + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, + 0x0, llq_info->desc_list_entry_size); + } + + pkt_ctrl->idx = 0; + pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header; + return 0; +} + +static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) +{ + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + return get_sq_desc_llq(io_sq); + + return get_sq_desc_regular_queue(io_sq); +} + +static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + int rc; + + if (!pkt_ctrl->descs_left_in_line) { + rc = ena_com_write_bounce_buffer_to_dev(io_sq, + pkt_ctrl->curr_bounce_buf); + if (unlikely(rc)) + return rc; + + pkt_ctrl->curr_bounce_buf = + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, + 0x0, llq_info->desc_list_entry_size); + + pkt_ctrl->idx = 0; + if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)) + pkt_ctrl->descs_left_in_line = 1; + else + pkt_ctrl->descs_left_in_line = + llq_info->desc_list_entry_size / io_sq->desc_entry_size; } - memcpy_toio(dev_head_addr, head_src, header_len); + return 0; +} + +static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) +{ + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + return ena_com_sq_update_llq_tail(io_sq); + + io_sq->tail++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) + io_sq->phase ^= 1; return 0; } @@ -186,8 +291,8 @@ static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, return false; } -static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, - struct ena_com_tx_ctx *ena_tx_ctx) +static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx) { struct ena_eth_io_tx_meta_desc *meta_desc = NULL; struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; @@ -232,8 +337,7 @@ static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *i memcpy(&io_sq->cached_tx_meta, ena_meta, sizeof(struct ena_com_tx_meta)); - ena_com_copy_curr_sq_desc_to_dev(io_sq); - ena_com_sq_update_tail(io_sq); + return ena_com_sq_update_tail(io_sq); } static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, @@ -250,6 +354,9 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, ena_rx_ctx->l4_csum_err = !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT); + ena_rx_ctx->l4_csum_checked = + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT); ena_rx_ctx->hash = cdesc->hash; ena_rx_ctx->frag = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> @@ -271,18 +378,19 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, { struct ena_eth_io_tx_desc *desc = NULL; struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs; - void *push_header = ena_tx_ctx->push_header; + void *buffer_to_push = ena_tx_ctx->push_header; u16 header_len = ena_tx_ctx->header_len; u16 num_bufs = ena_tx_ctx->num_bufs; - int total_desc, i, rc; + u16 start_tail = io_sq->tail; + int i, rc; bool have_meta; u64 addr_hi; WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type"); /* num_bufs +1 for potential meta desc */ - if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) { - pr_err("Not enough space in the tx queue\n"); + if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) { + pr_debug("Not enough space in the tx queue\n"); return -ENOMEM; } @@ -292,23 +400,32 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, return -EINVAL; } - /* start with pushing the header (if needed) */ - rc = ena_com_write_header(io_sq, push_header, header_len); + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && + !buffer_to_push)) + return -EINVAL; + + rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len); if (unlikely(rc)) return rc; have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq, ena_tx_ctx); - if (have_meta) - ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); + if (have_meta) { + rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); + if (unlikely(rc)) + return rc; + } - /* If the caller doesn't want send packets */ + /* If the caller doesn't want to send packets */ if (unlikely(!num_bufs && !header_len)) { - *nb_hw_desc = have_meta ? 0 : 1; - return 0; + rc = ena_com_close_bounce_buffer(io_sq); + *nb_hw_desc = io_sq->tail - start_tail; + return rc; } desc = get_sq_desc(io_sq); + if (unlikely(!desc)) + return -EFAULT; memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); /* Set first desc when we don't have meta descriptor */ @@ -360,10 +477,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, for (i = 0; i < num_bufs; i++) { /* The first desc share the same desc as the header */ if (likely(i != 0)) { - ena_com_copy_curr_sq_desc_to_dev(io_sq); - ena_com_sq_update_tail(io_sq); + rc = ena_com_sq_update_tail(io_sq); + if (unlikely(rc)) + return rc; desc = get_sq_desc(io_sq); + if (unlikely(!desc)) + return -EFAULT; + memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); desc->len_ctrl |= (io_sq->phase << @@ -386,15 +507,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, /* set the last desc indicator */ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK; - ena_com_copy_curr_sq_desc_to_dev(io_sq); - - ena_com_sq_update_tail(io_sq); + rc = ena_com_sq_update_tail(io_sq); + if (unlikely(rc)) + return rc; - total_desc = max_t(u16, num_bufs, 1); - total_desc += have_meta ? 1 : 0; + rc = ena_com_close_bounce_buffer(io_sq); - *nb_hw_desc = total_desc; - return 0; + *nb_hw_desc = io_sq->tail - start_tail; + return rc; } int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, @@ -453,15 +573,18 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); - if (unlikely(ena_com_sq_empty_space(io_sq) == 0)) + if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1))) return -ENOSPC; desc = get_sq_desc(io_sq); + if (unlikely(!desc)) + return -EFAULT; + memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc)); desc->length = ena_buf->len; - desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK; + desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK; desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK; desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK; desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; @@ -472,43 +595,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, desc->buff_addr_hi = ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); - ena_com_sq_update_tail(io_sq); - - return 0; -} - -int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) -{ - u8 expected_phase, cdesc_phase; - struct ena_eth_io_tx_cdesc *cdesc; - u16 masked_head; - - masked_head = io_cq->head & (io_cq->q_depth - 1); - expected_phase = io_cq->phase; - - cdesc = (struct ena_eth_io_tx_cdesc *) - ((uintptr_t)io_cq->cdesc_addr.virt_addr + - (masked_head * io_cq->cdesc_entry_size_in_bytes)); - - /* When the current completion descriptor phase isn't the same as the - * expected, it mean that the device still didn't update - * this completion. - */ - cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; - if (cdesc_phase != expected_phase) - return -EAGAIN; - - dma_rmb(); - if (unlikely(cdesc->req_id >= io_cq->q_depth)) { - pr_err("Invalid req id %d\n", cdesc->req_id); - return -EINVAL; - } - - ena_com_cq_inc_head(io_cq); - - *req_id = READ_ONCE(cdesc->req_id); - - return 0; + return ena_com_sq_update_tail(io_sq); } bool ena_com_cq_empty(struct ena_com_io_cq *io_cq) diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 2f7657227cfe..340d02b64ca6 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -67,6 +67,7 @@ struct ena_com_rx_ctx { enum ena_eth_io_l4_proto_index l4_proto; bool l3_csum_err; bool l4_csum_err; + u8 l4_csum_checked; /* fragmented packet */ bool frag; u32 hash; @@ -86,8 +87,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, struct ena_com_buf *ena_buf, u16 req_id); -int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id); - bool ena_com_cq_empty(struct ena_com_io_cq *io_cq); static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, @@ -96,7 +95,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, writel(intr_reg->intr_control, io_cq->unmask_reg); } -static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) +static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq) { u16 tail, next_to_comp, cnt; @@ -107,11 +106,28 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) return io_sq->q_depth - 1 - cnt; } -static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) +/* Check if the submission queue has enough space to hold required_buffers */ +static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, + u16 required_buffers) { - u16 tail; + int temp; - tail = io_sq->tail; + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + return ena_com_free_desc(io_sq) >= required_buffers; + + /* This calculation doesn't need to be 100% accurate. So to reduce + * the calculation overhead just Subtract 2 lines from the free descs + * (one for the header line and one to compensate the devision + * down calculation. + */ + temp = required_buffers / io_sq->llq_info.descs_per_entry + 2; + + return ena_com_free_desc(io_sq) > temp; +} + +static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) +{ + u16 tail = io_sq->tail; pr_debug("write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail); @@ -159,4 +175,48 @@ static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) io_sq->next_to_comp += elem; } +static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) +{ + io_cq->head++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) + io_cq->phase ^= 1; +} + +static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, + u16 *req_id) +{ + u8 expected_phase, cdesc_phase; + struct ena_eth_io_tx_cdesc *cdesc; + u16 masked_head; + + masked_head = io_cq->head & (io_cq->q_depth - 1); + expected_phase = io_cq->phase; + + cdesc = (struct ena_eth_io_tx_cdesc *) + ((uintptr_t)io_cq->cdesc_addr.virt_addr + + (masked_head * io_cq->cdesc_entry_size_in_bytes)); + + /* When the current completion descriptor phase isn't the same as the + * expected, it mean that the device still didn't update + * this completion. + */ + cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; + if (cdesc_phase != expected_phase) + return -EAGAIN; + + dma_rmb(); + + *req_id = READ_ONCE(cdesc->req_id); + if (unlikely(*req_id >= io_cq->q_depth)) { + pr_err("Invalid req id %d\n", cdesc->req_id); + return -EINVAL; + } + + ena_com_cq_inc_head(io_cq); + + return 0; +} + #endif /* ENA_ETH_COM_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h index f320c58793a5..00e0f056a741 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h @@ -33,25 +33,18 @@ #define _ENA_ETH_IO_H_ enum ena_eth_io_l3_proto_index { - ENA_ETH_IO_L3_PROTO_UNKNOWN = 0, - - ENA_ETH_IO_L3_PROTO_IPV4 = 8, - - ENA_ETH_IO_L3_PROTO_IPV6 = 11, - - ENA_ETH_IO_L3_PROTO_FCOE = 21, - - ENA_ETH_IO_L3_PROTO_ROCE = 22, + ENA_ETH_IO_L3_PROTO_UNKNOWN = 0, + ENA_ETH_IO_L3_PROTO_IPV4 = 8, + ENA_ETH_IO_L3_PROTO_IPV6 = 11, + ENA_ETH_IO_L3_PROTO_FCOE = 21, + ENA_ETH_IO_L3_PROTO_ROCE = 22, }; enum ena_eth_io_l4_proto_index { - ENA_ETH_IO_L4_PROTO_UNKNOWN = 0, - - ENA_ETH_IO_L4_PROTO_TCP = 12, - - ENA_ETH_IO_L4_PROTO_UDP = 13, - - ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23, + ENA_ETH_IO_L4_PROTO_UNKNOWN = 0, + ENA_ETH_IO_L4_PROTO_TCP = 12, + ENA_ETH_IO_L4_PROTO_UDP = 13, + ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23, }; struct ena_eth_io_tx_desc { @@ -242,9 +235,13 @@ struct ena_eth_io_rx_cdesc_base { * checksum error detected, or, the controller didn't * validate the checksum. This bit is valid only when * l4_proto_idx indicates TCP/UDP packet, and, - * ipv4_frag is not set + * ipv4_frag is not set. This bit is valid only when + * l4_csum_checked below is set. * 15 : ipv4_frag - Indicates IPv4 fragmented packet - * 23:16 : reserved16 + * 16 : l4_csum_checked - L4 checksum was verified + * (could be OK or error), when cleared the status of + * checksum is unknown + * 23:17 : reserved17 - MBZ * 24 : phase * 25 : l3_csum2 - second checksum engine result * 26 : first - Indicates first descriptor in @@ -303,114 +300,116 @@ struct ena_eth_io_numa_node_cfg_reg { }; /* tx_desc */ -#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0) -#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16 -#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16) -#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23 -#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23) -#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24 -#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24) -#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26 -#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26) -#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27 -#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27) -#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28 -#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28) -#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0) -#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4 -#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4) -#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7 -#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7) -#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8 -#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8) -#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13 -#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13) -#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14 -#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14) -#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15 -#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) -#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 -#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) -#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22 -#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) -#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) -#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24 -#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24) +#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0) +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16 +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16) +#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23 +#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23) +#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24 +#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24) +#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26 +#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26) +#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27 +#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27) +#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28 +#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28) +#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0) +#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4 +#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4) +#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7 +#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7) +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8 +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8) +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13 +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13) +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14 +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14) +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15 +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22 +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) +#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24 +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24) /* tx_meta_desc */ -#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0) -#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14 -#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14) -#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16 -#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16) -#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20 -#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20) -#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21 -#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21) -#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23 -#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23) -#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24 -#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24) -#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26 -#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26) -#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27 -#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27) -#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28 -#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28) -#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0) -#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0) -#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8 -#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8) -#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16 -#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16) -#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22 -#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22) +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0) +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14 +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14) +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16 +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16) +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20 +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20) +#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21 +#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21) +#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23 +#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23) +#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24 +#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24) +#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26 +#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26) +#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27 +#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27) +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28 +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28) +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0) +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0) +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8 +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8) +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16 +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16) +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22 +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22) /* tx_cdesc */ -#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0) +#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0) /* rx_desc */ -#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0) -#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2 -#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2) -#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3 -#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3) -#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4 -#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4) +#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0) +#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2 +#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2) +#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3 +#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3) +#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4 +#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4) /* rx_cdesc_base */ -#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0) -#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5 -#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5) -#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8 -#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8) -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13 -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13) -#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14 -#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14) -#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15 -#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15) -#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24 -#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24) -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25 -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25) -#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26 -#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26) -#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27 -#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27) -#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30 -#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0) +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5 +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13 +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14) +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15 +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT 16 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK BIT(16) +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24 +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25 +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25) +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26 +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26) +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27 +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27) +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30 +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30) /* intr_reg */ -#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0) -#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15 -#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15) -#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30 -#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30) +#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0) +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15 +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15) +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30 +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30) /* numa_node_cfg_reg */ -#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0) -#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 -#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0) +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) #endif /*_ENA_ETH_IO_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 521607bc4393..f3a5a384e6e8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -81,6 +81,7 @@ static const struct ena_stats ena_stats_tx_strings[] = { ENA_STAT_TX_ENTRY(doorbells), ENA_STAT_TX_ENTRY(prepare_ctx_err), ENA_STAT_TX_ENTRY(bad_req_id), + ENA_STAT_TX_ENTRY(llq_buffer_copy), ENA_STAT_TX_ENTRY(missed_tx), }; @@ -96,6 +97,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { ENA_STAT_RX_ENTRY(rx_copybreak_pkt), ENA_STAT_RX_ENTRY(bad_req_id), ENA_STAT_RX_ENTRY(empty_rx_ring), + ENA_STAT_RX_ENTRY(csum_unchecked), }; static const struct ena_stats ena_stats_ena_com_strings[] = { diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d906293ce07d..284a0a612131 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -39,7 +39,6 @@ #include <linux/if_vlan.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/moduleparam.h> #include <linux/numa.h> #include <linux/pci.h> #include <linux/utsname.h> @@ -238,6 +237,17 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) } } + size = tx_ring->tx_max_header_size; + tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); + if (!tx_ring->push_buf_intermediate_buf) { + tx_ring->push_buf_intermediate_buf = vzalloc(size); + if (!tx_ring->push_buf_intermediate_buf) { + vfree(tx_ring->tx_buffer_info); + vfree(tx_ring->free_tx_ids); + return -ENOMEM; + } + } + /* Req id ring for TX out of order completions */ for (i = 0; i < tx_ring->ring_size; i++) tx_ring->free_tx_ids[i] = i; @@ -266,6 +276,9 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) vfree(tx_ring->free_tx_ids); tx_ring->free_tx_ids = NULL; + + vfree(tx_ring->push_buf_intermediate_buf); + tx_ring->push_buf_intermediate_buf = NULL; } /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues @@ -603,6 +616,36 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter) ena_free_rx_bufs(adapter, i); } +static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring, + struct ena_tx_buffer *tx_info) +{ + struct ena_com_buf *ena_buf; + u32 cnt; + int i; + + ena_buf = tx_info->bufs; + cnt = tx_info->num_of_bufs; + + if (unlikely(!cnt)) + return; + + if (tx_info->map_linear_data) { + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(ena_buf, paddr), + dma_unmap_len(ena_buf, len), + DMA_TO_DEVICE); + ena_buf++; + cnt--; + } + + /* unmap remaining mapped pages */ + for (i = 0; i < cnt; i++) { + dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), + dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); + ena_buf++; + } +} + /* ena_free_tx_bufs - Free Tx Buffers per Queue * @tx_ring: TX ring for which buffers be freed */ @@ -613,9 +656,6 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring) for (i = 0; i < tx_ring->ring_size; i++) { struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; - struct ena_com_buf *ena_buf; - int nr_frags; - int j; if (!tx_info->skb) continue; @@ -631,21 +671,7 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring) tx_ring->qid, i); } - ena_buf = tx_info->bufs; - dma_unmap_single(tx_ring->dev, - ena_buf->paddr, - ena_buf->len, - DMA_TO_DEVICE); - - /* unmap remaining mapped pages */ - nr_frags = tx_info->num_of_bufs - 1; - for (j = 0; j < nr_frags; j++) { - ena_buf++; - dma_unmap_page(tx_ring->dev, - ena_buf->paddr, - ena_buf->len, - DMA_TO_DEVICE); - } + ena_unmap_tx_skb(tx_ring, tx_info); dev_kfree_skb_any(tx_info->skb); } @@ -736,8 +762,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) while (tx_pkts < budget) { struct ena_tx_buffer *tx_info; struct sk_buff *skb; - struct ena_com_buf *ena_buf; - int i, nr_frags; rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id); @@ -757,24 +781,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) tx_info->skb = NULL; tx_info->last_jiffies = 0; - if (likely(tx_info->num_of_bufs != 0)) { - ena_buf = tx_info->bufs; - - dma_unmap_single(tx_ring->dev, - dma_unmap_addr(ena_buf, paddr), - dma_unmap_len(ena_buf, len), - DMA_TO_DEVICE); - - /* unmap remaining mapped pages */ - nr_frags = tx_info->num_of_bufs - 1; - for (i = 0; i < nr_frags; i++) { - ena_buf++; - dma_unmap_page(tx_ring->dev, - dma_unmap_addr(ena_buf, paddr), - dma_unmap_len(ena_buf, len), - DMA_TO_DEVICE); - } - } + ena_unmap_tx_skb(tx_ring, tx_info); netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, "tx_poll: q %d skb %p completed\n", tx_ring->qid, @@ -805,12 +812,13 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) */ smp_mb(); - above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > - ENA_TX_WAKEUP_THRESH; + above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + ENA_TX_WAKEUP_THRESH); if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) { __netif_tx_lock(txq, smp_processor_id()); - above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > - ENA_TX_WAKEUP_THRESH; + above_thresh = + ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + ENA_TX_WAKEUP_THRESH); if (netif_tx_queue_stopped(txq) && above_thresh) { netif_tx_wake_queue(txq); u64_stats_update_begin(&tx_ring->syncp); @@ -986,8 +994,19 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring, return; } - skb->ip_summed = CHECKSUM_UNNECESSARY; + if (likely(ena_rx_ctx->l4_csum_checked)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.csum_unchecked++; + u64_stats_update_end(&rx_ring->syncp); + skb->ip_summed = CHECKSUM_NONE; + } + } else { + skb->ip_summed = CHECKSUM_NONE; + return; } + } static void ena_set_rx_hash(struct ena_ring *rx_ring, @@ -1102,8 +1121,10 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, rx_ring->next_to_clean = next_to_clean; - refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq); - refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER; + refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); + refill_threshold = + min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, + ENA_RX_REFILL_THRESH_PACKET); /* Optimization, try to batch new rx buffers */ if (refill_required > refill_threshold) { @@ -1300,7 +1321,6 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) /* Reserved the max msix vectors we might need */ msix_vecs = ENA_MAX_MSIX_VEC(num_queues); - netif_dbg(adapter, probe, adapter->netdev, "trying to enable MSI-X, vectors %d\n", msix_vecs); @@ -1591,7 +1611,7 @@ static int ena_up_complete(struct ena_adapter *adapter) static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) { - struct ena_com_create_io_ctx ctx = { 0 }; + struct ena_com_create_io_ctx ctx; struct ena_com_dev *ena_dev; struct ena_ring *tx_ring; u32 msix_vector; @@ -1604,6 +1624,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) msix_vector = ENA_IO_IRQ_IDX(qid); ena_qid = ENA_IO_TXQ_IDX(qid); + memset(&ctx, 0x0, sizeof(ctx)); + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; ctx.qid = ena_qid; ctx.mem_queue_type = ena_dev->tx_mem_queue_type; @@ -1657,7 +1679,7 @@ create_err: static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) { struct ena_com_dev *ena_dev; - struct ena_com_create_io_ctx ctx = { 0 }; + struct ena_com_create_io_ctx ctx; struct ena_ring *rx_ring; u32 msix_vector; u16 ena_qid; @@ -1669,6 +1691,8 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) msix_vector = ENA_IO_IRQ_IDX(qid); ena_qid = ENA_IO_RXQ_IDX(qid); + memset(&ctx, 0x0, sizeof(ctx)); + ctx.qid = ena_qid; ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; @@ -1986,73 +2010,70 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, return rc; } -/* Called with netif_tx_lock. */ -static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) +static int ena_tx_map_skb(struct ena_ring *tx_ring, + struct ena_tx_buffer *tx_info, + struct sk_buff *skb, + void **push_hdr, + u16 *header_len) { - struct ena_adapter *adapter = netdev_priv(dev); - struct ena_tx_buffer *tx_info; - struct ena_com_tx_ctx ena_tx_ctx; - struct ena_ring *tx_ring; - struct netdev_queue *txq; + struct ena_adapter *adapter = tx_ring->adapter; struct ena_com_buf *ena_buf; - void *push_hdr; - u32 len, last_frag; - u16 next_to_use; - u16 req_id; - u16 push_len; - u16 header_len; dma_addr_t dma; - int qid, rc, nb_hw_desc; - int i = -1; - - netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); - /* Determine which tx ring we will be placed on */ - qid = skb_get_queue_mapping(skb); - tx_ring = &adapter->tx_ring[qid]; - txq = netdev_get_tx_queue(dev, qid); + u32 skb_head_len, frag_len, last_frag; + u16 push_len = 0; + u16 delta = 0; + int i = 0; - rc = ena_check_and_linearize_skb(tx_ring, skb); - if (unlikely(rc)) - goto error_drop_packet; - - skb_tx_timestamp(skb); - len = skb_headlen(skb); - - next_to_use = tx_ring->next_to_use; - req_id = tx_ring->free_tx_ids[next_to_use]; - tx_info = &tx_ring->tx_buffer_info[req_id]; - tx_info->num_of_bufs = 0; - - WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); - ena_buf = tx_info->bufs; + skb_head_len = skb_headlen(skb); tx_info->skb = skb; + ena_buf = tx_info->bufs; if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - /* prepared the push buffer */ - push_len = min_t(u32, len, tx_ring->tx_max_header_size); - header_len = push_len; - push_hdr = skb->data; + /* When the device is LLQ mode, the driver will copy + * the header into the device memory space. + * the ena_com layer assume the header is in a linear + * memory space. + * This assumption might be wrong since part of the header + * can be in the fragmented buffers. + * Use skb_header_pointer to make sure the header is in a + * linear memory space. + */ + + push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); + *push_hdr = skb_header_pointer(skb, 0, push_len, + tx_ring->push_buf_intermediate_buf); + *header_len = push_len; + if (unlikely(skb->data != *push_hdr)) { + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.llq_buffer_copy++; + u64_stats_update_end(&tx_ring->syncp); + + delta = push_len - skb_head_len; + } } else { - push_len = 0; - header_len = min_t(u32, len, tx_ring->tx_max_header_size); - push_hdr = NULL; + *push_hdr = NULL; + *header_len = min_t(u32, skb_head_len, + tx_ring->tx_max_header_size); } - netif_dbg(adapter, tx_queued, dev, + netif_dbg(adapter, tx_queued, adapter->netdev, "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, - push_hdr, push_len); + *push_hdr, push_len); - if (len > push_len) { + if (skb_head_len > push_len) { dma = dma_map_single(tx_ring->dev, skb->data + push_len, - len - push_len, DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, dma)) + skb_head_len - push_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) goto error_report_dma_error; ena_buf->paddr = dma; - ena_buf->len = len - push_len; + ena_buf->len = skb_head_len - push_len; ena_buf++; tx_info->num_of_bufs++; + tx_info->map_linear_data = 1; + } else { + tx_info->map_linear_data = 0; } last_frag = skb_shinfo(skb)->nr_frags; @@ -2060,18 +2081,75 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < last_frag; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - len = skb_frag_size(frag); - dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, - DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, dma)) + frag_len = skb_frag_size(frag); + + if (unlikely(delta >= frag_len)) { + delta -= frag_len; + continue; + } + + dma = skb_frag_dma_map(tx_ring->dev, frag, delta, + frag_len - delta, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) goto error_report_dma_error; ena_buf->paddr = dma; - ena_buf->len = len; + ena_buf->len = frag_len - delta; ena_buf++; + tx_info->num_of_bufs++; + delta = 0; } - tx_info->num_of_bufs += last_frag; + return 0; + +error_report_dma_error: + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.dma_mapping_err++; + u64_stats_update_end(&tx_ring->syncp); + netdev_warn(adapter->netdev, "failed to map skb\n"); + + tx_info->skb = NULL; + + tx_info->num_of_bufs += i; + ena_unmap_tx_skb(tx_ring, tx_info); + + return -EINVAL; +} + +/* Called with netif_tx_lock. */ +static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ena_adapter *adapter = netdev_priv(dev); + struct ena_tx_buffer *tx_info; + struct ena_com_tx_ctx ena_tx_ctx; + struct ena_ring *tx_ring; + struct netdev_queue *txq; + void *push_hdr; + u16 next_to_use, req_id, header_len; + int qid, rc, nb_hw_desc; + + netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); + /* Determine which tx ring we will be placed on */ + qid = skb_get_queue_mapping(skb); + tx_ring = &adapter->tx_ring[qid]; + txq = netdev_get_tx_queue(dev, qid); + + rc = ena_check_and_linearize_skb(tx_ring, skb); + if (unlikely(rc)) + goto error_drop_packet; + + skb_tx_timestamp(skb); + + next_to_use = tx_ring->next_to_use; + req_id = tx_ring->free_tx_ids[next_to_use]; + tx_info = &tx_ring->tx_buffer_info[req_id]; + tx_info->num_of_bufs = 0; + + WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); + + rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len); + if (unlikely(rc)) + goto error_drop_packet; memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); ena_tx_ctx.ena_bufs = tx_info->bufs; @@ -2087,14 +2165,22 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, &nb_hw_desc); + /* ena_com_prepare_tx() can't fail due to overflow of tx queue, + * since the number of free descriptors in the queue is checked + * after sending the previous packet. In case there isn't enough + * space in the queue for the next packet, it is stopped + * until there is again enough available space in the queue. + * All other failure reasons of ena_com_prepare_tx() are fatal + * and therefore require a device reset. + */ if (unlikely(rc)) { netif_err(adapter, tx_queued, dev, "failed to prepare tx bufs\n"); u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.queue_stop++; tx_ring->tx_stats.prepare_ctx_err++; u64_stats_update_end(&tx_ring->syncp); - netif_tx_stop_queue(txq); + adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE; + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); goto error_unmap_dma; } @@ -2116,8 +2202,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) * to sgl_size + 2. one for the meta descriptor and one for header * (if the header is larger than tx_max_header_size). */ - if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) < - (tx_ring->sgl_size + 2))) { + if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + tx_ring->sgl_size + 2))) { netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n", __func__, qid); @@ -2136,8 +2222,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) */ smp_mb(); - if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) - > ENA_TX_WAKEUP_THRESH) { + if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + ENA_TX_WAKEUP_THRESH)) { netif_tx_wake_queue(txq); u64_stats_update_begin(&tx_ring->syncp); tx_ring->tx_stats.queue_wakeup++; @@ -2157,35 +2243,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; -error_report_dma_error: - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.dma_mapping_err++; - u64_stats_update_end(&tx_ring->syncp); - netdev_warn(adapter->netdev, "failed to map skb\n"); - - tx_info->skb = NULL; - error_unmap_dma: - if (i >= 0) { - /* save value of frag that failed */ - last_frag = i; - - /* start back at beginning and unmap skb */ - tx_info->skb = NULL; - ena_buf = tx_info->bufs; - dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), - dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); - - /* unmap remaining mapped pages */ - for (i = 0; i < last_frag; i++) { - ena_buf++; - dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), - dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); - } - } + ena_unmap_tx_skb(tx_ring, tx_info); + tx_info->skb = NULL; error_drop_packet: - dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -2207,7 +2269,8 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, return qid; } -static void ena_config_host_info(struct ena_com_dev *ena_dev) +static void ena_config_host_info(struct ena_com_dev *ena_dev, + struct pci_dev *pdev) { struct ena_admin_host_info *host_info; int rc; @@ -2221,6 +2284,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) host_info = ena_dev->host_attr.host_info; + host_info->bdf = (pdev->bus->number << 8) | pdev->devfn; host_info->os_type = ENA_ADMIN_OS_LINUX; host_info->kernel_ver = LINUX_VERSION_CODE; strncpy(host_info->kernel_ver_str, utsname()->version, @@ -2231,7 +2295,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) host_info->driver_version = (DRV_MODULE_VER_MAJOR) | (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | - (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); + (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) | + ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT); + host_info->num_cpus = num_online_cpus(); rc = ena_com_set_host_attributes(ena_dev); if (rc) { @@ -2442,7 +2508,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, } /* ENA admin level init */ - rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); + rc = ena_com_admin_init(ena_dev, &aenq_handlers); if (rc) { dev_err(dev, "Can not initialize ena admin queue with device\n"); @@ -2455,7 +2521,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, */ ena_com_set_admin_polling_mode(ena_dev, true); - ena_config_host_info(ena_dev); + ena_config_host_info(ena_dev, pdev); /* Get Device Attributes*/ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); @@ -2540,15 +2606,14 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); adapter->dev_up_before_reset = dev_up; - if (!graceful) ena_com_set_admin_running_state(ena_dev, false); if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) ena_down(adapter); - /* Before releasing the ENA resources, a device reset is required. - * (to prevent the device from accessing them). + /* Stop the device from sending AENQ events (in case reset flag is set + * and device is up, ena_close already reset the device * In case the reset flag is set and the device is up, ena_down() * already perform the reset, so it can be skipped. */ @@ -2617,7 +2682,9 @@ static int ena_restore_device(struct ena_adapter *adapter) set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); - dev_err(&pdev->dev, "Device reset completed successfully\n"); + dev_err(&pdev->dev, + "Device reset completed successfully, Driver info: %s\n", + version); return rc; err_disable_msix: @@ -2810,7 +2877,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter) rx_ring = &adapter->rx_ring[i]; refill_required = - ena_com_sq_empty_space(rx_ring->ena_com_io_sq); + ena_com_free_desc(rx_ring->ena_com_io_sq); if (unlikely(refill_required == (rx_ring->ring_size - 1))) { rx_ring->empty_rx_queue++; @@ -2956,7 +3023,7 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev, /* In case of LLQ use the llq number in the get feature cmd */ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - io_sq_num = get_feat_ctx->max_queues.max_llq_num; + io_sq_num = get_feat_ctx->max_queues.max_legacy_llq_num; if (io_sq_num == 0) { dev_err(&pdev->dev, @@ -2984,18 +3051,52 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev, return io_queue_num; } -static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev, - struct ena_com_dev_get_features_ctx *get_feat_ctx) +static int ena_set_queues_placement_policy(struct pci_dev *pdev, + struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq, + struct ena_llq_configurations *llq_default_configurations) { bool has_mem_bar; + int rc; + u32 llq_feature_mask; + + llq_feature_mask = 1 << ENA_ADMIN_LLQ; + if (!(ena_dev->supported_features & llq_feature_mask)) { + dev_err(&pdev->dev, + "LLQ is not supported Fallback to host mode policy.\n"); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR); - /* Enable push mode if device supports LLQ */ - if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0)) - ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; - else + rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); + if (unlikely(rc)) { + dev_err(&pdev->dev, + "Failed to configure the device mode. Fallback to host mode policy.\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + + /* Nothing to config, exit */ + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + return 0; + + if (!has_mem_bar) { + dev_err(&pdev->dev, + "ENA device does not expose LLQ bar. Fallback to host mode policy.\n"); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + + ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, ENA_MEM_BAR), + pci_resource_len(pdev, ENA_MEM_BAR)); + + if (!ena_dev->mem_bar) + return -EFAULT; + + return 0; } static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, @@ -3113,6 +3214,15 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) pci_release_selected_regions(pdev, release_bars); } +static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config) +{ + llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; + llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; + llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; + llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; + llq_config->llq_ring_entry_size_value = 128; +} + static int ena_calc_queue_size(struct pci_dev *pdev, struct ena_com_dev *ena_dev, u16 *max_tx_sgl_size, @@ -3128,7 +3238,7 @@ static int ena_calc_queue_size(struct pci_dev *pdev, if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) queue_size = min_t(u32, queue_size, - get_feat_ctx->max_queues.max_llq_depth); + get_feat_ctx->max_queues.max_legacy_llq_depth); queue_size = rounddown_pow_of_two(queue_size); @@ -3161,7 +3271,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static int version_printed; struct net_device *netdev; struct ena_adapter *adapter; + struct ena_llq_configurations llq_config; struct ena_com_dev *ena_dev = NULL; + char *queue_type_str; static int adapters_found; int io_queue_num, bars, rc; int queue_size; @@ -3215,16 +3327,13 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free_region; } - ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); + set_default_llq_configurations(&llq_config); - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, - pci_resource_start(pdev, ENA_MEM_BAR), - pci_resource_len(pdev, ENA_MEM_BAR)); - if (!ena_dev->mem_bar) { - rc = -EFAULT; - goto err_device_destroy; - } + rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, + &llq_config); + if (rc) { + dev_err(&pdev->dev, "ena device init failed\n"); + goto err_device_destroy; } /* initial Tx interrupt delay, Assumes 1 usec granularity. @@ -3239,8 +3348,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_device_destroy; } - dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n", - io_queue_num, queue_size); + dev_info(&pdev->dev, "creating %d io queues. queue size: %d. LLQ is %s\n", + io_queue_num, queue_size, + (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ? + "ENABLED" : "DISABLED"); /* dev zeroed in init_etherdev */ netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num); @@ -3330,9 +3441,15 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) timer_setup(&adapter->timer_service, ena_timer_service, 0); mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); - dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n", + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + queue_type_str = "Regular"; + else + queue_type_str = "Low Latency"; + + dev_info(&pdev->dev, + "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n", DEVICE_NAME, (long)pci_resource_start(pdev, 0), - netdev->dev_addr, io_queue_num); + netdev->dev_addr, io_queue_num, queue_type_str); set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 7c7ae56c52cf..521873642339 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -43,9 +43,9 @@ #include "ena_com.h" #include "ena_eth_com.h" -#define DRV_MODULE_VER_MAJOR 1 -#define DRV_MODULE_VER_MINOR 5 -#define DRV_MODULE_VER_SUBMINOR 0 +#define DRV_MODULE_VER_MAJOR 2 +#define DRV_MODULE_VER_MINOR 0 +#define DRV_MODULE_VER_SUBMINOR 1 #define DRV_MODULE_NAME "ena" #ifndef DRV_MODULE_VERSION @@ -61,6 +61,17 @@ #define ENA_ADMIN_MSIX_VEC 1 #define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues)) +/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the + * driver passes 0. + * Since the max packet size the ENA handles is ~9kB limit the buffer length to + * 16kB. + */ +#if PAGE_SIZE > SZ_16K +#define ENA_PAGE_SIZE SZ_16K +#else +#define ENA_PAGE_SIZE PAGE_SIZE +#endif + #define ENA_MIN_MSIX_VEC 2 #define ENA_REG_BAR 0 @@ -70,7 +81,7 @@ #define ENA_DEFAULT_RING_SIZE (1024) #define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2) -#define ENA_DEFAULT_RX_COPYBREAK (128 - NET_IP_ALIGN) +#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN) /* limit the buffer size to 600 bytes to handle MTU changes from very * small to very large, in which case the number of buffers per packet @@ -95,10 +106,11 @@ */ #define ENA_TX_POLL_BUDGET_DIVIDER 4 -/* Refill Rx queue when number of available descriptors is below - * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER +/* Refill Rx queue when number of required descriptors is above + * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER or ENA_RX_REFILL_THRESH_PACKET */ #define ENA_RX_REFILL_THRESH_DIVIDER 8 +#define ENA_RX_REFILL_THRESH_PACKET 256 /* Number of queues to check for missing queues per timer service */ #define ENA_MONITORED_TX_QUEUES 4 @@ -151,6 +163,9 @@ struct ena_tx_buffer { /* num of buffers used by this skb */ u32 num_of_bufs; + /* Indicate if bufs[0] map the linear data of the skb. */ + u8 map_linear_data; + /* Used for detect missing tx packets to limit the number of prints */ u32 print_once; /* Save the last jiffies to detect missing tx packets @@ -186,6 +201,7 @@ struct ena_stats_tx { u64 tx_poll; u64 doorbells; u64 bad_req_id; + u64 llq_buffer_copy; u64 missed_tx; }; @@ -201,6 +217,7 @@ struct ena_stats_rx { u64 rx_copybreak_pkt; u64 bad_req_id; u64 empty_rx_ring; + u64 csum_unchecked; }; struct ena_ring { @@ -257,6 +274,8 @@ struct ena_ring { struct ena_stats_tx tx_stats; struct ena_stats_rx rx_stats; }; + + u8 *push_buf_intermediate_buf; int empty_rx_queue; } ____cacheline_aligned; @@ -355,15 +374,4 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf); int ena_get_sset_count(struct net_device *netdev, int sset); -/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the - * driver passas 0. - * Since the max packet size the ENA handles is ~9kB limit the buffer length to - * 16kB. - */ -#if PAGE_SIZE > SZ_16K -#define ENA_PAGE_SIZE SZ_16K -#else -#define ENA_PAGE_SIZE PAGE_SIZE -#endif - #endif /* !(ENA_H) */ diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h index 48ca97fbe7bc..04fcafcc059c 100644 --- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h @@ -33,137 +33,125 @@ #define _ENA_REGS_H_ enum ena_regs_reset_reason_types { - ENA_REGS_RESET_NORMAL = 0, - - ENA_REGS_RESET_KEEP_ALIVE_TO = 1, - - ENA_REGS_RESET_ADMIN_TO = 2, - - ENA_REGS_RESET_MISS_TX_CMPL = 3, - - ENA_REGS_RESET_INV_RX_REQ_ID = 4, - - ENA_REGS_RESET_INV_TX_REQ_ID = 5, - - ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6, - - ENA_REGS_RESET_INIT_ERR = 7, - - ENA_REGS_RESET_DRIVER_INVALID_STATE = 8, - - ENA_REGS_RESET_OS_TRIGGER = 9, - - ENA_REGS_RESET_OS_NETDEV_WD = 10, - - ENA_REGS_RESET_SHUTDOWN = 11, - - ENA_REGS_RESET_USER_TRIGGER = 12, - - ENA_REGS_RESET_GENERIC = 13, - - ENA_REGS_RESET_MISS_INTERRUPT = 14, + ENA_REGS_RESET_NORMAL = 0, + ENA_REGS_RESET_KEEP_ALIVE_TO = 1, + ENA_REGS_RESET_ADMIN_TO = 2, + ENA_REGS_RESET_MISS_TX_CMPL = 3, + ENA_REGS_RESET_INV_RX_REQ_ID = 4, + ENA_REGS_RESET_INV_TX_REQ_ID = 5, + ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6, + ENA_REGS_RESET_INIT_ERR = 7, + ENA_REGS_RESET_DRIVER_INVALID_STATE = 8, + ENA_REGS_RESET_OS_TRIGGER = 9, + ENA_REGS_RESET_OS_NETDEV_WD = 10, + ENA_REGS_RESET_SHUTDOWN = 11, + ENA_REGS_RESET_USER_TRIGGER = 12, + ENA_REGS_RESET_GENERIC = 13, + ENA_REGS_RESET_MISS_INTERRUPT = 14, }; /* ena_registers offsets */ -#define ENA_REGS_VERSION_OFF 0x0 -#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4 -#define ENA_REGS_CAPS_OFF 0x8 -#define ENA_REGS_CAPS_EXT_OFF 0xc -#define ENA_REGS_AQ_BASE_LO_OFF 0x10 -#define ENA_REGS_AQ_BASE_HI_OFF 0x14 -#define ENA_REGS_AQ_CAPS_OFF 0x18 -#define ENA_REGS_ACQ_BASE_LO_OFF 0x20 -#define ENA_REGS_ACQ_BASE_HI_OFF 0x24 -#define ENA_REGS_ACQ_CAPS_OFF 0x28 -#define ENA_REGS_AQ_DB_OFF 0x2c -#define ENA_REGS_ACQ_TAIL_OFF 0x30 -#define ENA_REGS_AENQ_CAPS_OFF 0x34 -#define ENA_REGS_AENQ_BASE_LO_OFF 0x38 -#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c -#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40 -#define ENA_REGS_AENQ_TAIL_OFF 0x44 -#define ENA_REGS_INTR_MASK_OFF 0x4c -#define ENA_REGS_DEV_CTL_OFF 0x54 -#define ENA_REGS_DEV_STS_OFF 0x58 -#define ENA_REGS_MMIO_REG_READ_OFF 0x5c -#define ENA_REGS_MMIO_RESP_LO_OFF 0x60 -#define ENA_REGS_MMIO_RESP_HI_OFF 0x64 -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68 + +/* 0 base */ +#define ENA_REGS_VERSION_OFF 0x0 +#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4 +#define ENA_REGS_CAPS_OFF 0x8 +#define ENA_REGS_CAPS_EXT_OFF 0xc +#define ENA_REGS_AQ_BASE_LO_OFF 0x10 +#define ENA_REGS_AQ_BASE_HI_OFF 0x14 +#define ENA_REGS_AQ_CAPS_OFF 0x18 +#define ENA_REGS_ACQ_BASE_LO_OFF 0x20 +#define ENA_REGS_ACQ_BASE_HI_OFF 0x24 +#define ENA_REGS_ACQ_CAPS_OFF 0x28 +#define ENA_REGS_AQ_DB_OFF 0x2c +#define ENA_REGS_ACQ_TAIL_OFF 0x30 +#define ENA_REGS_AENQ_CAPS_OFF 0x34 +#define ENA_REGS_AENQ_BASE_LO_OFF 0x38 +#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c +#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40 +#define ENA_REGS_AENQ_TAIL_OFF 0x44 +#define ENA_REGS_INTR_MASK_OFF 0x4c +#define ENA_REGS_DEV_CTL_OFF 0x54 +#define ENA_REGS_DEV_STS_OFF 0x58 +#define ENA_REGS_MMIO_REG_READ_OFF 0x5c +#define ENA_REGS_MMIO_RESP_LO_OFF 0x60 +#define ENA_REGS_MMIO_RESP_HI_OFF 0x64 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68 /* version register */ -#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff -#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8 -#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00 +#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff +#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8 +#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00 /* controller_version register */ -#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff -#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8 -#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00 -#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16 -#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000 -#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24 -#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000 +#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8 +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00 +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16 +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000 +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24 +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000 /* caps register */ -#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 -#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1 -#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e -#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 -#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 -#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16 -#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000 +#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 +#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1 +#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 +#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16 +#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000 /* aq_caps register */ -#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff -#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16 -#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000 +#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000 /* acq_caps register */ -#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff -#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16 -#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000 +#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000 /* aenq_caps register */ -#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff -#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16 -#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000 +#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000 /* dev_ctl register */ -#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1 -#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1 -#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2 -#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2 -#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4 -#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3 -#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8 -#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28 -#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000 +#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1 +#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1 +#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2 +#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2 +#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4 +#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3 +#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8 +#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28 +#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000 /* dev_sts register */ -#define ENA_REGS_DEV_STS_READY_MASK 0x1 -#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 -#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 -#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 -#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 -#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 -#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 -#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4 -#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10 -#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5 -#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20 -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6 -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40 -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7 -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80 +#define ENA_REGS_DEV_STS_READY_MASK 0x1 +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 +#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4 +#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10 +#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5 +#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80 /* mmio_reg_read register */ -#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff -#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16 -#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000 +#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff +#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16 +#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000 /* rss_ind_entry_update register */ -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 #endif /*_ENA_REGS_H_ */ diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c index 01d132c02ff9..265039c57023 100644 --- a/drivers/net/ethernet/amd/am79c961a.c +++ b/drivers/net/ethernet/amd/am79c961a.c @@ -440,7 +440,7 @@ static void am79c961_timeout(struct net_device *dev) /* * Transmit a packet */ -static int +static netdev_tx_t am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev) { struct dev_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index c5b81268c284..d3d44e07afbc 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -339,7 +339,8 @@ static unsigned long lance_probe1( struct net_device *dev, struct lance_addr *init_rec ); static int lance_open( struct net_device *dev ); static void lance_init_ring( struct net_device *dev ); -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, + struct net_device *dev); static irqreturn_t lance_interrupt( int irq, void *dev_id ); static int lance_rx( struct net_device *dev ); static int lance_close( struct net_device *dev ); @@ -769,7 +770,8 @@ static void lance_tx_timeout (struct net_device *dev) /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) +static netdev_tx_t +lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_ioreg *IO = lp->iobase; diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 73ca8879ada7..7c1eb304c27e 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -564,17 +564,7 @@ static int au1000_mii_probe(struct net_device *dev) return PTR_ERR(phydev); } - /* mask with MAC supported features */ - phydev->supported &= (SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_Autoneg - /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */ - | SUPPORTED_MII - | SUPPORTED_TP); - - phydev->advertising = phydev->supported; + phy_set_max_speed(phydev, SPEED_100); aup->old_link = 0; aup->old_speed = 0; diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index 00332a1ea84b..9f23703dd509 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -894,7 +894,7 @@ static void lance_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } -static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_regs *ll = lp->ll; diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index e248d1ab3e47..8931ce6bab7b 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -435,10 +435,8 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr) } if(cards[i].vendor_id) { for(j=0;j<3;j++) - if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) { + if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) release_region(ioaddr, cards[i].total_size); - continue; - } } break; } diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c index 77b1db267730..da7e3d4f4166 100644 --- a/drivers/net/ethernet/amd/sun3lance.c +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -236,7 +236,8 @@ struct lance_private { static int lance_probe( struct net_device *dev); static int lance_open( struct net_device *dev ); static void lance_init_ring( struct net_device *dev ); -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, + struct net_device *dev); static irqreturn_t lance_interrupt( int irq, void *dev_id); static int lance_rx( struct net_device *dev ); static int lance_close( struct net_device *dev ); @@ -511,7 +512,8 @@ static void lance_init_ring( struct net_device *dev ) } -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) +static netdev_tx_t +lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int entry, len; diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index cdd7a611479b..b4fc0ed5bce8 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1106,7 +1106,7 @@ static void lance_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } -static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int entry, skblen, len; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 24f1053b8785..d96a84a62d78 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2009,7 +2009,7 @@ static int xgbe_close(struct net_device *netdev) return 0; } -static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; @@ -2018,7 +2018,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) struct xgbe_ring *ring; struct xgbe_packet_data *packet; struct netdev_queue *txq; - int ret; + netdev_tx_t ret; DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 3ceb4f95ca7c..151bdb629e8a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -878,9 +878,10 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) phy_write(phy_data->phydev, 0x04, 0x0d01); phy_write(phy_data->phydev, 0x00, 0x9140); - phy_data->phydev->supported = PHY_GBIT_FEATURES; - phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phy_data->phydev->advertising = phy_data->phydev->supported; + phy_data->phydev->supported = PHY_10BT_FEATURES | + PHY_100BT_FEATURES | + PHY_1000BT_FEATURES; + phy_support_asym_pause(phy_data->phydev); netif_dbg(pdata, drv, pdata->netdev, "Finisar PHY quirk in place\n"); @@ -950,9 +951,10 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) reg = phy_read(phy_data->phydev, 0x00); phy_write(phy_data->phydev, 0x00, reg & ~0x00800); - phy_data->phydev->supported = PHY_GBIT_FEATURES; - phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phy_data->phydev->advertising = phy_data->phydev->supported; + phy_data->phydev->supported = (PHY_10BT_FEATURES | + PHY_100BT_FEATURES | + PHY_1000BT_FEATURES); + phy_support_asym_pause(phy_data->phydev); netif_dbg(pdata, drv, pdata->netdev, "BelFuse PHY quirk in place\n"); @@ -1495,10 +1497,7 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) if (!phy_data->phydev) return; - if (phy_data->phydev->advertising & ADVERTISED_Pause) - lcl_adv |= ADVERTISE_PAUSE_CAP; - if (phy_data->phydev->advertising & ADVERTISED_Asym_Pause) - lcl_adv |= ADVERTISE_PAUSE_ASYM; + lcl_adv = ethtool_adv_to_lcl_adv_t(phy_data->phydev->advertising); if (phy_data->phydev->pause) { XGBE_SET_LP_ADV(lks, Pause); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c index 4f50f11718f4..78dd09b5beeb 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c @@ -306,45 +306,25 @@ static int xgene_set_pauseparam(struct net_device *ndev, { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; - u32 oldadv, newadv; if (phy_interface_mode_is_rgmii(pdata->phy_mode) || pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { if (!phydev) return -EINVAL; - if (!(phydev->supported & SUPPORTED_Pause) || - (!(phydev->supported & SUPPORTED_Asym_Pause) && - pp->rx_pause != pp->tx_pause)) + if (!phy_validate_pause(phydev, pp)) return -EINVAL; pdata->pause_autoneg = pp->autoneg; pdata->tx_pause = pp->tx_pause; pdata->rx_pause = pp->rx_pause; - oldadv = phydev->advertising; - newadv = oldadv & ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + phy_set_asym_pause(phydev, pp->rx_pause, pp->tx_pause); - if (pp->rx_pause) - newadv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - - if (pp->tx_pause) - newadv ^= ADVERTISED_Asym_Pause; - - if (oldadv ^ newadv) { - phydev->advertising = newadv; - - if (phydev->autoneg) - return phy_start_aneg(phydev); - - if (!pp->autoneg) { - pdata->mac_ops->flowctl_tx(pdata, - pdata->tx_pause); - pdata->mac_ops->flowctl_rx(pdata, - pdata->rx_pause); - } + if (!pp->autoneg) { + pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause); + pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause); } - } else { if (pp->autoneg) return -EINVAL; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 078a04dc1182..e3560311711a 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -895,12 +895,10 @@ int xgene_enet_phy_connect(struct net_device *ndev) } pdata->phy_speed = SPEED_UNKNOWN; - phy_dev->supported &= ~SUPPORTED_10baseT_Half & - ~SUPPORTED_100baseT_Half & - ~SUPPORTED_1000baseT_Half; - phy_dev->supported |= SUPPORTED_Pause | - SUPPORTED_Asym_Pause; - phy_dev->advertising = phy_dev->supported; + phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_support_asym_pause(phy_dev); return 0; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h index d52b088ff8f0..becb578211ed 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h @@ -57,4 +57,9 @@ #define AQ_NIC_RATE_1G BIT(4) #define AQ_NIC_RATE_100M BIT(5) +#define AQ_NIC_RATE_EEE_10G BIT(6) +#define AQ_NIC_RATE_EEE_5G BIT(7) +#define AQ_NIC_RATE_EEE_2GS BIT(8) +#define AQ_NIC_RATE_EEE_1G BIT(9) + #endif /* AQ_COMMON_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 08c9fa6ca71f..6a633c70f603 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -98,8 +98,8 @@ static void aq_ethtool_stats(struct net_device *ndev, struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) + - ARRAY_SIZE(aq_ethtool_queue_stat_names) * - cfg->vecs) * sizeof(u64)); + ARRAY_SIZE(aq_ethtool_queue_stat_names) * + cfg->vecs) * sizeof(u64)); aq_nic_get_stats(aq_nic, data); } @@ -285,6 +285,111 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev, return aq_nic_update_interrupt_moderation_settings(aq_nic); } +static void aq_ethtool_get_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + wol->supported = WAKE_MAGIC; + wol->wolopts = 0; + + if (cfg->wol) + wol->wolopts |= WAKE_MAGIC; +} + +static int aq_ethtool_set_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + struct pci_dev *pdev = to_pci_dev(ndev->dev.parent); + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + int err = 0; + + if (wol->wolopts & WAKE_MAGIC) + cfg->wol |= AQ_NIC_WOL_ENABLED; + else + cfg->wol &= ~AQ_NIC_WOL_ENABLED; + err = device_set_wakeup_enable(&pdev->dev, wol->wolopts); + + return err; +} + +static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed) +{ + u32 rate = 0; + + if (speed & AQ_NIC_RATE_EEE_10G) + rate |= SUPPORTED_10000baseT_Full; + + if (speed & AQ_NIC_RATE_EEE_2GS) + rate |= SUPPORTED_2500baseX_Full; + + if (speed & AQ_NIC_RATE_EEE_1G) + rate |= SUPPORTED_1000baseT_Full; + + return rate; +} + +static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + u32 rate, supported_rates; + int err = 0; + + if (!aq_nic->aq_fw_ops->get_eee_rate) + return -EOPNOTSUPP; + + err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate, + &supported_rates); + if (err < 0) + return err; + + eee->supported = eee_mask_to_ethtool_mask(supported_rates); + + if (aq_nic->aq_nic_cfg.eee_speeds) + eee->advertised = eee->supported; + + eee->lp_advertised = eee_mask_to_ethtool_mask(rate); + + eee->eee_enabled = !!eee->advertised; + + eee->tx_lpi_enabled = eee->eee_enabled; + if (eee->advertised & eee->lp_advertised) + eee->eee_active = true; + + return 0; +} + +static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + u32 rate, supported_rates; + struct aq_nic_cfg_s *cfg; + int err = 0; + + cfg = aq_nic_get_cfg(aq_nic); + + if (unlikely(!aq_nic->aq_fw_ops->get_eee_rate || + !aq_nic->aq_fw_ops->set_eee_rate)) + return -EOPNOTSUPP; + + err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate, + &supported_rates); + if (err < 0) + return err; + + if (eee->eee_enabled) { + rate = supported_rates; + cfg->eee_speeds = rate; + } else { + rate = 0; + cfg->eee_speeds = 0; + } + + return aq_nic->aq_fw_ops->set_eee_rate(aq_nic->aq_hw, rate); +} + static int aq_ethtool_nway_reset(struct net_device *ndev) { struct aq_nic_s *aq_nic = netdev_priv(ndev); @@ -403,9 +508,13 @@ const struct ethtool_ops aq_ethtool_ops = { .get_drvinfo = aq_ethtool_get_drvinfo, .get_strings = aq_ethtool_get_strings, .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size, + .get_wol = aq_ethtool_get_wol, + .set_wol = aq_ethtool_set_wol, .nway_reset = aq_ethtool_nway_reset, .get_ringparam = aq_get_ringparam, .set_ringparam = aq_set_ringparam, + .get_eee = aq_ethtool_get_eee, + .set_eee = aq_ethtool_set_eee, .get_pauseparam = aq_ethtool_get_pauseparam, .set_pauseparam = aq_ethtool_set_pauseparam, .get_rxfh_key_size = aq_ethtool_get_rss_key_size, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 5c00671f248d..e8689241204e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -112,7 +112,7 @@ struct aq_hw_s { const struct aq_fw_ops *aq_fw_ops; void __iomem *mmio; struct aq_hw_link_status_s aq_link_status; - struct hw_aq_atl_utils_mbox mbox; + struct hw_atl_utils_mbox mbox; struct hw_atl_stats_s last_stats; struct aq_stats_s curr_stats; u64 speed; @@ -124,7 +124,7 @@ struct aq_hw_s { u32 mbox_addr; u32 rpc_addr; u32 rpc_tid; - struct hw_aq_atl_utils_fw_rpc rpc; + struct hw_atl_utils_fw_rpc rpc; }; struct aq_ring_s; @@ -204,7 +204,6 @@ struct aq_hw_ops { int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); - int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state); }; struct aq_fw_ops { @@ -228,6 +227,14 @@ struct aq_fw_ops { int (*update_stats)(struct aq_hw_s *self); int (*set_flow_control)(struct aq_hw_s *self); + + int (*set_power)(struct aq_hw_s *self, unsigned int power_state, + u8 *mac); + + int (*set_eee_rate)(struct aq_hw_s *self, u32 speed); + + int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate, + u32 *supported_rates); }; #endif /* AQ_HW_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 26dc6782b475..5fed24446687 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -189,7 +189,7 @@ static void aq_nic_polling_timer_cb(struct timer_list *t) aq_vec_isr(i, (void *)aq_vec); mod_timer(&self->polling_timer, jiffies + - AQ_CFG_POLLING_TIMER_INTERVAL); + AQ_CFG_POLLING_TIMER_INTERVAL); } int aq_nic_ndev_register(struct aq_nic_s *self) @@ -301,13 +301,13 @@ int aq_nic_start(struct aq_nic_s *self) unsigned int i = 0U; err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, - self->mc_list.ar, - self->mc_list.count); + self->mc_list.ar, + self->mc_list.count); if (err < 0) goto err_exit; err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, - self->packet_filter); + self->packet_filter); if (err < 0) goto err_exit; @@ -327,7 +327,7 @@ int aq_nic_start(struct aq_nic_s *self) goto err_exit; timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0); mod_timer(&self->service_timer, jiffies + - AQ_CFG_SERVICE_TIMER_INTERVAL); + AQ_CFG_SERVICE_TIMER_INTERVAL); if (self->aq_nic_cfg.is_polling) { timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0); @@ -344,7 +344,7 @@ int aq_nic_start(struct aq_nic_s *self) } err = self->aq_hw_ops->hw_irq_enable(self->aq_hw, - AQ_CFG_IRQ_MASK); + AQ_CFG_IRQ_MASK); if (err < 0) goto err_exit; } @@ -889,11 +889,13 @@ void aq_nic_deinit(struct aq_nic_s *self) self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) aq_vec_deinit(aq_vec); - if (self->power_state == AQ_HW_POWER_STATE_D0) { - (void)self->aq_fw_ops->deinit(self->aq_hw); - } else { - (void)self->aq_hw_ops->hw_set_power(self->aq_hw, - self->power_state); + self->aq_fw_ops->deinit(self->aq_hw); + + if (self->power_state != AQ_HW_POWER_STATE_D0 || + self->aq_hw->aq_nic_cfg->wol) { + self->aq_fw_ops->set_power(self->aq_hw, + self->power_state, + self->ndev->dev_addr); } err_exit:; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index fecfc401f95d..c1582f4e8e1b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -36,6 +36,7 @@ struct aq_nic_cfg_s { u32 flow_control; u32 link_speed_msk; u32 vlan_id; + u32 wol; u16 is_mc_list_enabled; u16 mc_list_count; bool is_autoneg; @@ -44,6 +45,7 @@ struct aq_nic_cfg_s { bool is_lro; u8 tcs; struct aq_rss_parameters aq_rss; + u32 eee_speeds; }; #define AQ_NIC_FLAG_STARTED 0x00000004U @@ -54,6 +56,8 @@ struct aq_nic_cfg_s { #define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U #define AQ_NIC_FLAG_ERR_HW 0x80000000U +#define AQ_NIC_WOL_ENABLED BIT(0) + #define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \ ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_)) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 750007513f9d..1d5d6b8df855 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -84,7 +84,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev, const struct aq_hw_ops **ops, const struct aq_hw_caps_s **caps) { - int i = 0; + int i; if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA) return -EINVAL; @@ -107,7 +107,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev, int aq_pci_func_init(struct pci_dev *pdev) { - int err = 0; + int err; err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (!err) { @@ -141,7 +141,7 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i, char *name, void *aq_vec, cpumask_t *affinity_mask) { struct pci_dev *pdev = self->pdev; - int err = 0; + int err; if (pdev->msix_enabled || pdev->msi_enabled) err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0, @@ -164,7 +164,7 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i, void aq_pci_func_free_irqs(struct aq_nic_s *self) { struct pci_dev *pdev = self->pdev; - unsigned int i = 0U; + unsigned int i; for (i = 32U; i--;) { if (!((1U << i) & self->msix_entry_mask)) @@ -194,8 +194,8 @@ static void aq_pci_free_irq_vectors(struct aq_nic_s *self) static int aq_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { - struct aq_nic_s *self = NULL; - int err = 0; + struct aq_nic_s *self; + int err; struct net_device *ndev; resource_size_t mmio_pa; u32 bar; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index d1e1a0ba8615..3db91446cc67 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -29,8 +29,8 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, goto err_exit; } self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), - self->size * self->dx_size, - &self->dx_ring_pa, GFP_KERNEL); + self->size * self->dx_size, + &self->dx_ring_pa, GFP_KERNEL); if (!self->dx_ring) { err = -ENOMEM; goto err_exit; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 97addfa6f895..2469ed4d86b9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -49,37 +49,37 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = { DEFAULT_A0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_FIBRE, - .link_speed_msk = HW_ATL_A0_RATE_5G | - HW_ATL_A0_RATE_2G5 | - HW_ATL_A0_RATE_1G | - HW_ATL_A0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = { DEFAULT_A0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_A0_RATE_10G | - HW_ATL_A0_RATE_5G | - HW_ATL_A0_RATE_2G5 | - HW_ATL_A0_RATE_1G | - HW_ATL_A0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_10G | + AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = { DEFAULT_A0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_A0_RATE_5G | - HW_ATL_A0_RATE_2G5 | - HW_ATL_A0_RATE_1G | - HW_ATL_A0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = { DEFAULT_A0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_A0_RATE_2G5 | - HW_ATL_A0_RATE_1G | - HW_ATL_A0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; static int hw_atl_a0_hw_reset(struct aq_hw_s *self) @@ -284,7 +284,7 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self) /* RSS Ring selection */ hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ? - 0xB3333333U : 0x00000000U); + 0xB3333333U : 0x00000000U); /* Multicast filters */ for (i = HW_ATL_A0_MAC_MAX; i--;) { @@ -325,7 +325,7 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) } h = (mac_addr[0] << 8) | (mac_addr[1]); l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | - (mac_addr[4] << 8) | mac_addr[5]; + (mac_addr[4] << 8) | mac_addr[5]; hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC); hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC); @@ -519,7 +519,7 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self, hw_atl_rdm_rx_desc_data_buff_size_set(self, AQ_CFG_RX_FRAME_MAX / 1024U, - aq_ring->idx); + aq_ring->idx); hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); @@ -758,7 +758,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self, hw_atl_rpfl2_uc_flr_en_set(self, (self->aq_nic_cfg->is_mc_list_enabled && (i <= self->aq_nic_cfg->mc_list_count)) ? - 1U : 0U, i); + 1U : 0U, i); return aq_hw_err_from_flags(self); } @@ -877,7 +877,6 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self, const struct aq_hw_ops hw_atl_ops_a0 = { .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set, .hw_init = hw_atl_a0_hw_init, - .hw_set_power = hw_atl_utils_hw_set_power, .hw_reset = hw_atl_a0_hw_reset, .hw_start = hw_atl_a0_hw_start, .hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h index 3c94cff57876..a021dc431ef7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h @@ -62,12 +62,6 @@ #define HW_ATL_A0_MPI_SPEED_MSK 0xFFFFU #define HW_ATL_A0_MPI_SPEED_SHIFT 16U -#define HW_ATL_A0_RATE_10G BIT(0) -#define HW_ATL_A0_RATE_5G BIT(1) -#define HW_ATL_A0_RATE_2G5 BIT(3) -#define HW_ATL_A0_RATE_1G BIT(4) -#define HW_ATL_A0_RATE_100M BIT(5) - #define HW_ATL_A0_TXBUF_MAX 160U #define HW_ATL_A0_RXBUF_MAX 320U diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 1d44a386e7d3..76d25d594a0f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -51,38 +51,38 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = { DEFAULT_B0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_FIBRE, - .link_speed_msk = HW_ATL_B0_RATE_10G | - HW_ATL_B0_RATE_5G | - HW_ATL_B0_RATE_2G5 | - HW_ATL_B0_RATE_1G | - HW_ATL_B0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_10G | + AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = { DEFAULT_B0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_B0_RATE_10G | - HW_ATL_B0_RATE_5G | - HW_ATL_B0_RATE_2G5 | - HW_ATL_B0_RATE_1G | - HW_ATL_B0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_10G | + AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = { DEFAULT_B0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_B0_RATE_5G | - HW_ATL_B0_RATE_2G5 | - HW_ATL_B0_RATE_1G | - HW_ATL_B0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = { DEFAULT_B0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_B0_RATE_2G5 | - HW_ATL_B0_RATE_1G | - HW_ATL_B0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; static int hw_atl_b0_hw_reset(struct aq_hw_s *self) @@ -935,7 +935,6 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, const struct aq_hw_ops hw_atl_ops_b0 = { .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, .hw_init = hw_atl_b0_hw_init, - .hw_set_power = hw_atl_utils_hw_set_power, .hw_reset = hw_atl_b0_hw_reset, .hw_start = hw_atl_b0_hw_start, .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index 28568f5fa74b..b318eefd36ae 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -67,12 +67,6 @@ #define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU #define HW_ATL_B0_MPI_SPEED_SHIFT 16U -#define HW_ATL_B0_RATE_10G BIT(0) -#define HW_ATL_B0_RATE_5G BIT(1) -#define HW_ATL_B0_RATE_2G5 BIT(3) -#define HW_ATL_B0_RATE_1G BIT(4) -#define HW_ATL_B0_RATE_100M BIT(5) - #define HW_ATL_B0_TXBUF_MAX 160U #define HW_ATL_B0_RXBUF_MAX 320U diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 10ba035dadb1..be0a3a90dfad 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -1460,3 +1460,11 @@ void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp), glb_cpu_scratch_scp); } + +void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR, + HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK, + HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT, + up_force_intr); +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index dfb426f2dc2c..7056c7342afc 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -698,4 +698,7 @@ void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe); /* set pci register reset disable */ void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis); +/* set uP Force Interrupt */ +void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr); + #endif /* HW_ATL_LLH_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index e0cf70120f1d..716674a9b729 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -2387,4 +2387,17 @@ #define HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp) \ (0x00000300u + (scratch_scp) * 0x4) +/* register address for bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR 0x00000404 +/* bitmask for bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK 0x00000002 +/* inverted bitmask for bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSKN 0xFFFFFFFD +/* lower bit position of bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT 1 +/* width of bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_WIDTH 1 +/* default value of bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0 + #endif /* HW_ATL_LLH_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index c965e65d07db..7def1cb8ab9d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -49,6 +49,7 @@ #define FORCE_FLASHLESS 0 static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual); + static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); @@ -69,10 +70,10 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) self->fw_ver_actual) == 0) { *fw_ops = &aq_fw_1x_ops; } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X, - self->fw_ver_actual) == 0) { + self->fw_ver_actual) == 0) { *fw_ops = &aq_fw_2x_ops; } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X, - self->fw_ver_actual) == 0) { + self->fw_ver_actual) == 0) { *fw_ops = &aq_fw_2x_ops; } else { aq_pr_err("Bad FW version detected: %x\n", @@ -260,7 +261,7 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self) hw_atl_utils_mpi_set_state(self, MPI_DEINIT); AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) & - HW_ATL_MPI_STATE_MSK) == MPI_DEINIT, + HW_ATL_MPI_STATE_MSK) == MPI_DEINIT, 10, 1000U); } @@ -277,7 +278,7 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM) == 1U, - 1U, 10000U); + 1U, 10000U); if (err < 0) { bool is_locked; @@ -325,17 +326,31 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, err = -ETIME; goto err_exit; } + if (IS_CHIP_FEATURE(REVISION_B1)) { + u32 offset = 0; + + for (; offset < cnt; ++offset) { + aq_hw_write_reg(self, 0x328, p[offset]); + aq_hw_write_reg(self, 0x32C, + (0x80000000 | (0xFFFF & (offset * 4)))); + hw_atl_mcp_up_force_intr_set(self, 1); + /* 1000 times by 10us = 10ms */ + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, + 0x32C) & 0xF0000000) != + 0x80000000, + 10, 1000); + } + } else { + u32 offset = 0; - aq_hw_write_reg(self, 0x00000208U, a); - - for (++cnt; --cnt;) { - u32 i = 0U; + aq_hw_write_reg(self, 0x208, a); - aq_hw_write_reg(self, 0x0000020CU, *(p++)); - aq_hw_write_reg(self, 0x00000200U, 0xC000U); + for (; offset < cnt; ++offset) { + aq_hw_write_reg(self, 0x20C, p[offset]); + aq_hw_write_reg(self, 0x200, 0xC000); - for (i = 1024U; - (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, 0x200U) & + 0x100) == 0, 10, 1000); } } @@ -379,7 +394,7 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self, /* check 10 times by 1ms */ AQ_HW_WAIT_FOR(0U != (self->mbox_addr = - aq_hw_read_reg(self, 0x360U)), 1000U, 10U); + aq_hw_read_reg(self, 0x360U)), 1000U, 10U); return err; } @@ -399,7 +414,7 @@ struct aq_hw_atl_utils_fw_rpc_tid_s { #define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL) -static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) +int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) { int err = 0; struct aq_hw_atl_utils_fw_rpc_tid_s sw; @@ -411,7 +426,7 @@ static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, (u32 *)(void *)&self->rpc, (rpc_size + sizeof(u32) - - sizeof(u8)) / sizeof(u32)); + sizeof(u8)) / sizeof(u32)); if (err < 0) goto err_exit; @@ -423,8 +438,8 @@ err_exit: return err; } -static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, - struct hw_aq_atl_utils_fw_rpc **rpc) +int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + struct hw_atl_utils_fw_rpc **rpc) { int err = 0; struct aq_hw_atl_utils_fw_rpc_tid_s sw; @@ -436,7 +451,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, self->rpc_tid = sw.tid; AQ_HW_WAIT_FOR(sw.tid == - (fw.val = + (fw.val = aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR), fw.tid), 1000U, 100U); if (err < 0) @@ -459,7 +474,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, (u32 *)(void *) &self->rpc, (fw.len + sizeof(u32) - - sizeof(u8)) / + sizeof(u8)) / sizeof(u32)); if (err < 0) goto err_exit; @@ -489,16 +504,16 @@ err_exit: } int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, - struct hw_aq_atl_utils_mbox_header *pmbox) + struct hw_atl_utils_mbox_header *pmbox) { return hw_atl_utils_fw_downld_dwords(self, - self->mbox_addr, - (u32 *)(void *)pmbox, - sizeof(*pmbox) / sizeof(u32)); + self->mbox_addr, + (u32 *)(void *)pmbox, + sizeof(*pmbox) / sizeof(u32)); } void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, - struct hw_aq_atl_utils_mbox *pmbox) + struct hw_atl_utils_mbox *pmbox) { int err = 0; @@ -538,7 +553,7 @@ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, { int err = 0; u32 transaction_id = 0; - struct hw_aq_atl_utils_mbox_header mbox; + struct hw_atl_utils_mbox_header mbox; u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); if (state == MPI_RESET) { @@ -547,8 +562,8 @@ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, transaction_id = mbox.transaction_id; AQ_HW_WAIT_FOR(transaction_id != - (hw_atl_utils_mpi_read_mbox(self, &mbox), - mbox.transaction_id), + (hw_atl_utils_mpi_read_mbox(self, &mbox), + mbox.transaction_id), 1000U, 100U); if (err < 0) goto err_exit; @@ -645,9 +660,9 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) { /* chip revision */ - l = 0xE3000000U - | (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) - | (0x00 << 16); + l = 0xE3000000U | + (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) | + (0x00 << 16); h = 0x8001300EU; mac[5] = (u8)(0xFFU & l); @@ -730,17 +745,9 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self) return 0; } -int hw_atl_utils_hw_set_power(struct aq_hw_s *self, - unsigned int power_state) -{ - hw_atl_utils_mpi_set_speed(self, 0); - hw_atl_utils_mpi_set_state(self, MPI_POWER); - return 0; -} - int hw_atl_utils_update_stats(struct aq_hw_s *self) { - struct hw_aq_atl_utils_mbox mbox; + struct hw_atl_utils_mbox mbox; hw_atl_utils_mpi_read_stats(self, &mbox); @@ -825,6 +832,81 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version) return 0; } +static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac) +{ + struct hw_atl_utils_fw_rpc *prpc = NULL; + unsigned int rpc_size = 0U; + int err = 0; + + err = hw_atl_utils_fw_rpc_wait(self, &prpc); + if (err < 0) + goto err_exit; + + memset(prpc, 0, sizeof(*prpc)); + + if (wol_enabled) { + rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_wol); + + prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD; + prpc->msg_wol.priority = + HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR; + prpc->msg_wol.pattern_id = + HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN; + prpc->msg_wol.wol_packet_type = + HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT; + + ether_addr_copy((u8 *)&prpc->msg_wol.wol_pattern, mac); + } else { + rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_del_id); + + prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL; + prpc->msg_wol.pattern_id = + HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN; + } + + err = hw_atl_utils_fw_rpc_call(self, rpc_size); + +err_exit: + return err; +} + +static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state, + u8 *mac) +{ + struct hw_atl_utils_fw_rpc *prpc = NULL; + unsigned int rpc_size = 0U; + int err = 0; + + if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) { + err = aq_fw1x_set_wol(self, 1, mac); + + if (err < 0) + goto err_exit; + + rpc_size = sizeof(prpc->msg_id) + + sizeof(prpc->msg_enable_wakeup); + + err = hw_atl_utils_fw_rpc_wait(self, &prpc); + + if (err < 0) + goto err_exit; + + memset(prpc, 0, rpc_size); + + prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP; + prpc->msg_enable_wakeup.pattern_mask = 0x00000002; + + err = hw_atl_utils_fw_rpc_call(self, rpc_size); + if (err < 0) + goto err_exit; + } + hw_atl_utils_mpi_set_speed(self, 0); + hw_atl_utils_mpi_set_state(self, MPI_POWER); + +err_exit: + return err; +} + const struct aq_fw_ops aq_fw_1x_ops = { .init = hw_atl_utils_mpi_create, .deinit = hw_atl_fw1x_deinit, @@ -834,5 +916,8 @@ const struct aq_fw_ops aq_fw_1x_ops = { .set_state = hw_atl_utils_mpi_set_state, .update_link_status = hw_atl_utils_mpi_get_link_status, .update_stats = hw_atl_utils_update_stats, + .set_power = aq_fw1x_set_power, + .set_eee_rate = NULL, + .get_eee_rate = NULL, .set_flow_control = NULL, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index b875590efcbd..3613fca64b58 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -75,7 +75,7 @@ union __packed ip_addr { } v4; }; -struct __packed hw_aq_atl_utils_fw_rpc { +struct __packed hw_atl_utils_fw_rpc { u32 msg_id; union { @@ -101,8 +101,6 @@ struct __packed hw_aq_atl_utils_fw_rpc { struct { u32 priority; u32 wol_packet_type; - u16 friendly_name_len; - u16 friendly_name[65]; u32 pattern_id; u32 next_wol_pattern_offset; @@ -134,25 +132,112 @@ struct __packed hw_aq_atl_utils_fw_rpc { u32 pattern_offset; u32 pattern_size; } wol_bit_map_pattern; + + struct { + u8 mac_addr[ETH_ALEN]; + } wol_magic_packet_patter; } wol_pattern; } msg_wol; struct { - u32 is_wake_on_link_down; - u32 is_wake_on_link_up; - } msg_wolink; + union { + u32 pattern_mask; + + struct { + u32 reason_arp_v4_pkt : 1; + u32 reason_ipv4_ping_pkt : 1; + u32 reason_ipv6_ns_pkt : 1; + u32 reason_ipv6_ping_pkt : 1; + u32 reason_link_up : 1; + u32 reason_link_down : 1; + u32 reason_maximum : 1; + }; + }; + + union { + u32 offload_mask; + }; + } msg_enable_wakeup; + + struct { + u32 id; + } msg_del_id; }; }; -struct __packed hw_aq_atl_utils_mbox_header { +struct __packed hw_atl_utils_mbox_header { u32 version; u32 transaction_id; u32 error; }; -struct __packed hw_aq_atl_utils_mbox { - struct hw_aq_atl_utils_mbox_header header; +struct __packed hw_aq_info { + u8 reserved[6]; + u16 phy_fault_code; + u16 phy_temperature; + u8 cable_len; + u8 reserved1; + u32 cable_diag_data[4]; + u8 reserved2[32]; + u32 caps_lo; + u32 caps_hi; +}; + +struct __packed hw_atl_utils_mbox { + struct hw_atl_utils_mbox_header header; struct hw_atl_stats_s stats; + struct hw_aq_info info; +}; + +/* fw2x */ +typedef u32 fw_offset_t; + +struct __packed offload_ip_info { + u8 v4_local_addr_count; + u8 v4_addr_count; + u8 v6_local_addr_count; + u8 v6_addr_count; + fw_offset_t v4_addr; + fw_offset_t v4_prefix; + fw_offset_t v6_addr; + fw_offset_t v6_prefix; +}; + +struct __packed offload_port_info { + u16 udp_port_count; + u16 tcp_port_count; + fw_offset_t udp_port; + fw_offset_t tcp_port; +}; + +struct __packed offload_ka_info { + u16 v4_ka_count; + u16 v6_ka_count; + u32 retry_count; + u32 retry_interval; + fw_offset_t v4_ka; + fw_offset_t v6_ka; +}; + +struct __packed offload_rr_info { + u32 rr_count; + u32 rr_buf_len; + fw_offset_t rr_id_x; + fw_offset_t rr_buf; +}; + +struct __packed offload_info { + u32 version; + u32 len; + u8 mac_addr[ETH_ALEN]; + + u8 reserved[2]; + + struct offload_ip_info ips; + struct offload_port_info ports; + struct offload_ka_info kas; + struct offload_rr_info rrs; + u8 buf[0]; }; #define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U @@ -181,6 +266,21 @@ enum hal_atl_utils_fw_state_e { #define HAL_ATLANTIC_RATE_100M BIT(5) #define HAL_ATLANTIC_RATE_INVALID BIT(6) +#define HAL_ATLANTIC_UTILS_FW_MSG_PING 0x1U +#define HAL_ATLANTIC_UTILS_FW_MSG_ARP 0x2U +#define HAL_ATLANTIC_UTILS_FW_MSG_INJECT 0x3U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD 0x4U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR 0x10000000U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN 0x1U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT 0x2U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL 0x5U +#define HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP 0x6U +#define HAL_ATLANTIC_UTILS_FW_MSG_MSM_PFC 0x7U +#define HAL_ATLANTIC_UTILS_FW_MSG_PROVISIONING 0x8U +#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_ADD 0x9U +#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_DEL 0xAU +#define HAL_ATLANTIC_UTILS_FW_MSG_CABLE_DIAG 0xDU + enum hw_atl_fw2x_rate { FW2X_RATE_100M = 0x20, FW2X_RATE_1G = 0x100, @@ -286,10 +386,10 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self); void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, - struct hw_aq_atl_utils_mbox_header *pmbox); + struct hw_atl_utils_mbox_header *pmbox); void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, - struct hw_aq_atl_utils_mbox *pmbox); + struct hw_atl_utils_mbox *pmbox); void hw_atl_utils_mpi_set(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state, @@ -316,9 +416,17 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); int hw_atl_utils_update_stats(struct aq_hw_s *self); struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self); + int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt); +int hw_atl_utils_fw_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac); + +int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size); + +int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + struct hw_atl_utils_fw_rpc **rpc); + extern const struct aq_fw_ops aq_fw_1x_ops; extern const struct aq_fw_ops aq_fw_2x_ops; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index e37943760a58..c0568465e10b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -16,11 +16,13 @@ #include "../aq_pci_func.h" #include "../aq_ring.h" #include "../aq_vec.h" +#include "../aq_nic.h" #include "hw_atl_utils.h" #include "hw_atl_llh.h" #define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364 #define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360 +#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334 #define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368 #define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C @@ -28,6 +30,42 @@ #define HW_ATL_FW2X_MPI_STATE_ADDR 0x370 #define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374 +#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY) +#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL) + +#define HW_ATL_FW2X_CTRL_SLEEP_PROXY BIT(CTRL_SLEEP_PROXY) +#define HW_ATL_FW2X_CTRL_WOL BIT(CTRL_WOL) +#define HW_ATL_FW2X_CTRL_LINK_DROP BIT(CTRL_LINK_DROP) +#define HW_ATL_FW2X_CTRL_PAUSE BIT(CTRL_PAUSE) +#define HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE BIT(CTRL_ASYMMETRIC_PAUSE) +#define HW_ATL_FW2X_CTRL_FORCE_RECONNECT BIT(CTRL_FORCE_RECONNECT) + +#define HW_ATL_FW2X_CAP_EEE_1G_MASK BIT(CAPS_HI_1000BASET_FD_EEE) +#define HW_ATL_FW2X_CAP_EEE_2G5_MASK BIT(CAPS_HI_2P5GBASET_FD_EEE) +#define HW_ATL_FW2X_CAP_EEE_5G_MASK BIT(CAPS_HI_5GBASET_FD_EEE) +#define HW_ATL_FW2X_CAP_EEE_10G_MASK BIT(CAPS_HI_10GBASET_FD_EEE) + +#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8 +#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E + +struct __packed fw2x_msg_wol_pattern { + u8 mask[16]; + u32 crc; +}; + +struct __packed fw2x_msg_wol { + u32 msg_id; + u8 hw_addr[ETH_ALEN]; + u8 magic_packet_enabled; + u8 filter_count; + struct fw2x_msg_wol_pattern filter[HAL_ATLANTIC_WOL_FILTERS_COUNT]; + u8 link_up_enabled; + u8 link_down_enabled; + u16 reserved; + u32 link_up_timeout; + u32 link_down_timeout; +}; + static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed); static int aq_fw2x_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); @@ -38,8 +76,12 @@ static int aq_fw2x_init(struct aq_hw_s *self) /* check 10 times by 1ms */ AQ_HW_WAIT_FOR(0U != (self->mbox_addr = - aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)), + aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)), 1000U, 10U); + AQ_HW_WAIT_FOR(0U != (self->rpc_addr = + aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR)), + 1000U, 100U); + return err; } @@ -78,6 +120,38 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed) return rate; } +static u32 fw2x_to_eee_mask(u32 speed) +{ + u32 rate = 0; + + if (speed & HW_ATL_FW2X_CAP_EEE_10G_MASK) + rate |= AQ_NIC_RATE_EEE_10G; + if (speed & HW_ATL_FW2X_CAP_EEE_5G_MASK) + rate |= AQ_NIC_RATE_EEE_5G; + if (speed & HW_ATL_FW2X_CAP_EEE_2G5_MASK) + rate |= AQ_NIC_RATE_EEE_2GS; + if (speed & HW_ATL_FW2X_CAP_EEE_1G_MASK) + rate |= AQ_NIC_RATE_EEE_1G; + + return rate; +} + +static u32 eee_mask_to_fw2x(u32 speed) +{ + u32 rate = 0; + + if (speed & AQ_NIC_RATE_EEE_10G) + rate |= HW_ATL_FW2X_CAP_EEE_10G_MASK; + if (speed & AQ_NIC_RATE_EEE_5G) + rate |= HW_ATL_FW2X_CAP_EEE_5G_MASK; + if (speed & AQ_NIC_RATE_EEE_2GS) + rate |= HW_ATL_FW2X_CAP_EEE_2G5_MASK; + if (speed & AQ_NIC_RATE_EEE_1G) + rate |= HW_ATL_FW2X_CAP_EEE_1G_MASK; + + return rate; +} + static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed) { u32 val = link_speed_mask_2fw2x_ratemask(speed); @@ -100,14 +174,27 @@ static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state) *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE); } +static void aq_fw2x_upd_eee_rate_bits(struct aq_hw_s *self, u32 *mpi_opts, + u32 eee_speeds) +{ + *mpi_opts &= ~(HW_ATL_FW2X_CAP_EEE_1G_MASK | + HW_ATL_FW2X_CAP_EEE_2G5_MASK | + HW_ATL_FW2X_CAP_EEE_5G_MASK | + HW_ATL_FW2X_CAP_EEE_10G_MASK); + + *mpi_opts |= eee_mask_to_fw2x(eee_speeds); +} + static int aq_fw2x_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state) { u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; switch (state) { case MPI_INIT: mpi_state &= ~BIT(CAPS_HI_LINK_DROP); + aq_fw2x_upd_eee_rate_bits(self, &mpi_state, cfg->eee_speeds); aq_fw2x_set_mpi_flow_control(self, &mpi_state); break; case MPI_DEINIT: @@ -126,7 +213,7 @@ static int aq_fw2x_update_link_status(struct aq_hw_s *self) { u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR); u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G | - FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G); + FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G); struct aq_hw_link_status_s *link_status = &self->aq_link_status; if (speed) { @@ -175,9 +262,7 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) get_random_bytes(&rnd, sizeof(unsigned int)); - l = 0xE3000000U - | (0xFFFFU & rnd) - | (0x00 << 16); + l = 0xE3000000U | (0xFFFFU & rnd) | (0x00 << 16); h = 0x8001300EU; mac[5] = (u8)(0xFFU & l); @@ -194,7 +279,7 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) return err; } -static int aq_fw2x_update_stats(struct aq_hw_s *self) +int aq_fw2x_update_stats(struct aq_hw_s *self) { int err = 0; u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); @@ -207,7 +292,7 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self) /* Wait FW to report back */ AQ_HW_WAIT_FOR(orig_stats_val != (aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & - BIT(CAPS_HI_STATISTICS)), + BIT(CAPS_HI_STATISTICS)), 1U, 10000U); if (err) return err; @@ -215,6 +300,135 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self) return hw_atl_utils_update_stats(self); } +static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac) +{ + struct hw_atl_utils_fw_rpc *rpc = NULL; + struct offload_info *cfg = NULL; + unsigned int rpc_size = 0U; + u32 mpi_opts; + int err = 0; + + rpc_size = sizeof(rpc->msg_id) + sizeof(*cfg); + + err = hw_atl_utils_fw_rpc_wait(self, &rpc); + if (err < 0) + goto err_exit; + + memset(rpc, 0, rpc_size); + cfg = (struct offload_info *)(&rpc->msg_id + 1); + + memcpy(cfg->mac_addr, mac, ETH_ALEN); + cfg->len = sizeof(*cfg); + + /* Clear bit 0x36C.23 and 0x36C.22 */ + mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + mpi_opts &= ~HW_ATL_FW2X_CTRL_SLEEP_PROXY; + mpi_opts &= ~HW_ATL_FW2X_CTRL_LINK_DROP; + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + err = hw_atl_utils_fw_rpc_call(self, rpc_size); + if (err < 0) + goto err_exit; + + /* Set bit 0x36C.23 */ + mpi_opts |= HW_ATL_FW2X_CTRL_SLEEP_PROXY; + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & + HW_ATL_FW2X_CTRL_SLEEP_PROXY), 1U, 10000U); + +err_exit: + return err; +} + +static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac) +{ + struct hw_atl_utils_fw_rpc *rpc = NULL; + struct fw2x_msg_wol *msg = NULL; + u32 mpi_opts; + int err = 0; + + err = hw_atl_utils_fw_rpc_wait(self, &rpc); + if (err < 0) + goto err_exit; + + msg = (struct fw2x_msg_wol *)rpc; + + msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL; + msg->magic_packet_enabled = true; + memcpy(msg->hw_addr, mac, ETH_ALEN); + + mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + mpi_opts &= ~(HW_ATL_FW2X_CTRL_SLEEP_PROXY | HW_ATL_FW2X_CTRL_WOL); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + err = hw_atl_utils_fw_rpc_call(self, sizeof(*msg)); + if (err < 0) + goto err_exit; + + /* Set bit 0x36C.24 */ + mpi_opts |= HW_ATL_FW2X_CTRL_WOL; + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & + HW_ATL_FW2X_CTRL_WOL), 1U, 10000U); + +err_exit: + return err; +} + +static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state, + u8 *mac) +{ + int err = 0; + + if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) { + err = aq_fw2x_set_sleep_proxy(self, mac); + if (err < 0) + goto err_exit; + err = aq_fw2x_set_wol_params(self, mac); + } + +err_exit: + return err; +} + +static int aq_fw2x_set_eee_rate(struct aq_hw_s *self, u32 speed) +{ + u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + + aq_fw2x_upd_eee_rate_bits(self, &mpi_opts, speed); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + return 0; +} + +static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate, + u32 *supported_rates) +{ + u32 mpi_state; + u32 caps_hi; + int err = 0; + u32 addr = self->mbox_addr + offsetof(struct hw_atl_utils_mbox, info) + + offsetof(struct hw_aq_info, caps_hi); + + err = hw_atl_utils_fw_downld_dwords(self, addr, &caps_hi, + sizeof(caps_hi) / sizeof(u32)); + + if (err) + return err; + + *supported_rates = fw2x_to_eee_mask(caps_hi); + + mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR); + *rate = fw2x_to_eee_mask(mpi_state); + + return err; +} + static int aq_fw2x_renegotiate(struct aq_hw_s *self) { u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); @@ -247,5 +461,8 @@ const struct aq_fw_ops aq_fw_2x_ops = { .set_state = aq_fw2x_set_state, .update_link_status = aq_fw2x_update_link_status, .update_stats = aq_fw2x_update_stats, - .set_flow_control = aq_fw2x_set_flow_control, + .set_power = aq_fw2x_set_power, + .set_eee_rate = aq_fw2x_set_eee_rate, + .get_eee_rate = aq_fw2x_get_eee_rate, + .set_flow_control = aq_fw2x_set_flow_control, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h index 94efc6477bdc..b48260114da3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/ver.h +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h @@ -12,7 +12,7 @@ #define NIC_MAJOR_DRIVER_VERSION 2 #define NIC_MINOR_DRIVER_VERSION 0 -#define NIC_BUILD_DRIVER_VERSION 3 +#define NIC_BUILD_DRIVER_VERSION 4 #define NIC_REVISION_DRIVER_VERSION 0 #define AQ_CFG_DRV_VERSION_SUFFIX "-kern" diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index b81fbf119bce..63edc5706c09 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -63,7 +63,6 @@ #include <linux/jiffies.h> #include <linux/mii.h> #include <linux/module.h> -#include <linux/moduleparam.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/pci.h> @@ -3278,7 +3277,6 @@ static int atl1_set_link_ksettings(struct net_device *netdev, u16 phy_data; int ret_val = 0; u16 old_media_type = hw->media_type; - u32 advertising; if (netif_running(adapter->netdev)) { if (netif_msg_link(adapter)) @@ -3312,25 +3310,7 @@ static int atl1_set_link_ksettings(struct net_device *netdev, hw->media_type = MEDIA_TYPE_10M_HALF; } } - switch (hw->media_type) { - case MEDIA_TYPE_AUTO_SENSOR: - advertising = - ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | ADVERTISED_TP; - break; - case MEDIA_TYPE_1000M_FULL: - advertising = - ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | ADVERTISED_TP; - break; - default: - advertising = 0; - break; - } + if (atl1_phy_setup_autoneg_adv(hw)) { ret_val = -EINVAL; if (netif_msg_link(adapter)) diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index c8d1f8fa4713..6f56276015a4 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -935,18 +935,11 @@ static void nb8800_pause_adv(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; - u32 adv = 0; if (!phydev) return; - if (priv->pause_rx) - adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - if (priv->pause_tx) - adv ^= ADVERTISED_Asym_Pause; - - phydev->supported |= adv; - phydev->advertising |= adv; + phy_set_asym_pause(phydev, priv->pause_rx, priv->pause_tx); } static int nb8800_open(struct net_device *dev) diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 897302adc38e..6bae973d4dce 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -568,12 +568,13 @@ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) /* * tx request callback */ -static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bcm_enet_priv *priv; struct bcm_enet_desc *desc; u32 len_stat; - int ret; + netdev_tx_t ret; priv = netdev_priv(dev); @@ -890,19 +891,10 @@ static int bcm_enet_open(struct net_device *dev) } /* mask with MAC supported features */ - phydev->supported &= (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_MII); - phydev->advertising = phydev->supported; - - if (priv->pause_auto && priv->pause_rx && priv->pause_tx) - phydev->advertising |= SUPPORTED_Pause; - else - phydev->advertising &= ~SUPPORTED_Pause; + phy_support_sym_pause(phydev); + phy_set_max_speed(phydev, SPEED_100); + phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx, + priv->pause_auto); phy_attached_info(phydev); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index c57238fce863..4122553e224b 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -126,8 +126,8 @@ static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, } /* Ethtool operations */ -static int bcm_sysport_set_rx_csum(struct net_device *dev, - netdev_features_t wanted) +static void bcm_sysport_set_rx_csum(struct net_device *dev, + netdev_features_t wanted) { struct bcm_sysport_priv *priv = netdev_priv(dev); u32 reg; @@ -157,12 +157,10 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev, reg &= ~RXCHK_BRCM_TAG_EN; rxchk_writel(priv, reg, RXCHK_CONTROL); - - return 0; } -static int bcm_sysport_set_tx_csum(struct net_device *dev, - netdev_features_t wanted) +static void bcm_sysport_set_tx_csum(struct net_device *dev, + netdev_features_t wanted) { struct bcm_sysport_priv *priv = netdev_priv(dev); u32 reg; @@ -177,23 +175,24 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev, else reg &= ~tdma_control_bit(priv, TSB_EN); tdma_writel(priv, reg, TDMA_CONTROL); - - return 0; } static int bcm_sysport_set_features(struct net_device *dev, netdev_features_t features) { - netdev_features_t changed = features ^ dev->features; - netdev_features_t wanted = dev->wanted_features; - int ret = 0; + struct bcm_sysport_priv *priv = netdev_priv(dev); + + /* Read CRC forward */ + if (!priv->is_lite) + priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); + else + priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & + GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT); - if (changed & NETIF_F_RXCSUM) - ret = bcm_sysport_set_rx_csum(dev, wanted); - if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) - ret = bcm_sysport_set_tx_csum(dev, wanted); + bcm_sysport_set_rx_csum(dev, features); + bcm_sysport_set_tx_csum(dev, features); - return ret; + return 0; } /* Hardware counters must be kept in sync because the order/offset @@ -285,6 +284,8 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), + STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb), + STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed), /* Per TX-queue statistics are dynamically appended */ }; @@ -1218,6 +1219,7 @@ static void bcm_sysport_poll_controller(struct net_device *dev) static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) { + struct bcm_sysport_priv *priv = netdev_priv(dev); struct sk_buff *nskb; struct bcm_tsb *tsb; u32 csum_info; @@ -1228,13 +1230,16 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, /* Re-allocate SKB if needed */ if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { nskb = skb_realloc_headroom(skb, sizeof(*tsb)); - dev_kfree_skb(skb); if (!nskb) { + dev_kfree_skb_any(skb); + priv->mib.tx_realloc_tsb_failed++; dev->stats.tx_errors++; dev->stats.tx_dropped++; return NULL; } + dev_consume_skb_any(skb); skb = nskb; + priv->mib.tx_realloc_tsb++; } tsb = skb_push(skb, sizeof(*tsb)); @@ -1970,16 +1975,14 @@ static int bcm_sysport_open(struct net_device *dev) else gib_set_pad_extension(priv); + /* Apply features again in case we changed them while interface was + * down + */ + bcm_sysport_set_features(dev, dev->features); + /* Set MAC address */ umac_set_hw_addr(priv, dev->dev_addr); - /* Read CRC forward */ - if (!priv->is_lite) - priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); - else - priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & - GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT); - phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 0, priv->phy_interface); if (!phydev) { @@ -2508,9 +2511,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) dev->netdev_ops = &bcm_sysport_netdev_ops; netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64); - /* HW supported features, none enabled by default */ - dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + dev->hw_features |= dev->features; + dev->vlan_features |= dev->features; /* Request the WOL interrupt and advertise suspend if available */ priv->wol_irq_disabled = 1; @@ -2710,7 +2714,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) struct net_device *dev = dev_get_drvdata(d); struct bcm_sysport_priv *priv = netdev_priv(dev); unsigned int i; - u32 reg; int ret; if (!netif_running(dev)) @@ -2754,12 +2757,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) goto out_free_rx_ring; } - /* Enable rxhck */ - if (priv->rx_chk_en) { - reg = rxchk_readl(priv, RXCHK_CONTROL); - reg |= RXCHK_EN; - rxchk_writel(priv, reg, RXCHK_CONTROL); - } + /* Restore enabled features */ + bcm_sysport_set_features(dev, dev->features); rbuf_init(priv); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 046c6c1d97fd..a7a230884a87 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -607,6 +607,8 @@ struct bcm_sysport_mib { u32 alloc_rx_buff_failed; u32 rx_dma_failed; u32 tx_dma_failed; + u32 tx_realloc_tsb; + u32 tx_realloc_tsb_failed; }; /* HW maintains a large list of counters */ diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 4c94d9218bba..cabc8e49ad24 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -616,7 +616,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; int size; /* ring size: different for Tx and Rx */ - int err; int i; BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); @@ -666,7 +665,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) if (!ring->cpu_base) { dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", ring->mmio_base); - err = -ENOMEM; goto err_dma_free; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 0e508e5defce..142bc11b9fbb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -494,6 +494,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vf, int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto); +int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val); /* select_queue callback */ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index fcc2328bb0d9..40093d88353f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -3536,6 +3536,16 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) */ static void bnx2x_config_mf_bw(struct bnx2x *bp) { + /* Workaround for MFW bug. + * MFW is not supposed to generate BW attention in + * single function mode. + */ + if (!IS_MF(bp)) { + DP(BNX2X_MSG_MCP, + "Ignoring MF BW config in single function mode\n"); + return; + } + if (bp->link_vars.link_up) { bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); bnx2x_link_sync_notify(bp); @@ -13105,6 +13115,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_set_vf_mac = bnx2x_set_vf_mac, .ndo_set_vf_vlan = bnx2x_set_vf_vlan, .ndo_get_vf_config = bnx2x_get_vf_config, + .ndo_set_vf_spoofchk = bnx2x_set_vf_spoofchk, #endif #ifdef NETDEV_FCOE_WWNN .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 62da46537734..c835f6c7ecd0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -209,7 +209,10 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp, */ __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); - __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); + if (vf->spoofchk) + __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); + else + __clear_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); /* Setup-op rx parameters */ if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { @@ -1269,6 +1272,8 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, bnx2x_vf(bp, i, state) = VF_FREE; mutex_init(&bnx2x_vf(bp, i, op_mutex)); bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; + /* enable spoofchk by default */ + bnx2x_vf(bp, i, spoofchk) = 1; } /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ @@ -2632,7 +2637,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, ivi->qos = 0; ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */ ivi->min_tx_rate = 0; - ivi->spoofchk = 1; /*always enabled */ + ivi->spoofchk = vf->spoofchk ? 1 : 0; + ivi->linkstate = vf->link_cfg; if (vf->state == VF_ENABLED) { /* mac and vlan are in vlan_mac objects */ if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { @@ -2950,6 +2956,77 @@ out: return rc; } +int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val) +{ + struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_virtf *vf; + int i, rc = 0; + + vf = BP_VF(bp, idx); + if (!vf) + return -EINVAL; + + /* nothing to do */ + if (vf->spoofchk == val) + return 0; + + vf->spoofchk = val ? 1 : 0; + + DP(BNX2X_MSG_IOV, "%s spoofchk for VF %d\n", + val ? "enabling" : "disabling", idx); + + /* is vf initialized and queue set up? */ + if (vf->state != VF_ENABLED || + bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != + BNX2X_Q_LOGICAL_STATE_ACTIVE) + return rc; + + /* User should be able to see error in system logs */ + if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) + return -EINVAL; + + /* send queue update ramrods to configure spoofchk */ + for_each_vfq(vf, i) { + struct bnx2x_queue_state_params q_params = {NULL}; + struct bnx2x_queue_update_params *update_params; + + q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj); + + /* validate the Q is UP */ + if (bnx2x_get_q_logical_state(bp, q_params.q_obj) != + BNX2X_Q_LOGICAL_STATE_ACTIVE) + continue; + + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + update_params = &q_params.params.update; + __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, + &update_params->update_flags); + if (val) { + __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, + &update_params->update_flags); + } else { + __clear_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, + &update_params->update_flags); + } + + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to %s spoofchk on VF %d - vfq %d\n", + val ? "enable" : "disable", idx, i); + goto out; + } + } +out: + if (!rc) + DP(BNX2X_MSG_IOV, + "%s spoofchk for VF[%d]\n", val ? "Enabled" : "Disabled", + idx); + + return rc; +} + /* crc is the first field in the bulletin board. Compute the crc over the * entire bulletin board excluding the crc field itself. Use the length field * as the Bulletin Board was posted by a PF with possibly a different version diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index eb814c65152f..b6ebd92ec565 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -142,6 +142,8 @@ struct bnx2x_virtf { bool flr_clnup_stage; /* true during flr cleanup */ bool malicious; /* true if FW indicated so, until FLR */ + /* 1(true) if spoof check is enabled */ + u8 spoofchk; /* dma */ dma_addr_t fw_stat_map; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 790c684f08ab..140dbd62106d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -21,9 +21,22 @@ static const struct devlink_ops bnxt_dl_ops = { #endif /* CONFIG_BNXT_SRIOV */ }; +enum bnxt_dl_param_id { + BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, +}; + static const struct bnxt_dl_nvm_param nvm_params[] = { {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV, BNXT_NVM_SHARED_CFG, 1}, + {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI, + BNXT_NVM_SHARED_CFG, 1}, + {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, + NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10}, + {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, + NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7}, + {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK, + BNXT_NVM_SHARED_CFG, 1}, }; static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, @@ -55,8 +68,22 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE; - if (nvm_param.num_bits == 1) - buf = &val->vbool; + switch (bytesize) { + case 1: + if (nvm_param.num_bits == 1) + buf = &val->vbool; + else + buf = &val->vu8; + break; + case 2: + buf = &val->vu16; + break; + case 4: + buf = &val->vu32; + break; + default: + return -EFAULT; + } data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize, &data_dma_addr, GFP_KERNEL); @@ -78,8 +105,12 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, memcpy(buf, data_addr, bytesize); dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr); - if (rc) + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { + netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n"); + return -EACCES; + } else if (rc) { return -EIO; + } return 0; } @@ -88,9 +119,15 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, { struct hwrm_nvm_get_variable_input req = {0}; struct bnxt *bp = bnxt_get_bp_from_dl(dl); + int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); - return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); + rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); + if (!rc) + if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) + ctx->val.vbool = !ctx->val.vbool; + + return rc; } static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, @@ -100,14 +137,55 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, struct bnxt *bp = bnxt_get_bp_from_dl(dl); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1); + + if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) + ctx->val.vbool = !ctx->val.vbool; + return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); } +static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + int max_val = -1; + + if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX) + max_val = BNXT_MSIX_VEC_MAX; + + if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN) + max_val = BNXT_MSIX_VEC_MIN_MAX; + + if (val.vu32 > max_val) { + NL_SET_ERR_MSG_MOD(extack, "MSIX value is exceeding the range"); + return -EINVAL; + } + + return 0; +} + static const struct devlink_param bnxt_dl_params[] = { DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, BIT(DEVLINK_PARAM_CMODE_PERMANENT), bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, NULL), + DEVLINK_PARAM_GENERIC(IGNORE_ARI, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + NULL), + DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + bnxt_dl_msix_validate), + DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + bnxt_dl_msix_validate), + DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, + "gre_ver_check", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + NULL), }; int bnxt_dl_register(struct bnxt *bp) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index 2f68dc048390..5b6b2c7d97cf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -33,8 +33,15 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) } } +#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108 +#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114 +#define NVM_OFF_IGNORE_ARI 164 +#define NVM_OFF_DIS_GRE_VER_CHECK 171 #define NVM_OFF_ENABLE_SRIOV 401 +#define BNXT_MSIX_VEC_MAX 1280 +#define BNXT_MSIX_VEC_MIN_MAX 128 + enum bnxt_nvm_dir_type { BNXT_NVM_SHARED_CFG = 40, BNXT_NVM_PORT_CFG, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index e1594c9df4c6..749f63beddd8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -189,7 +189,6 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, struct bnxt_tc_flow *flow) { struct flow_dissector *dissector = tc_flow_cmd->dissector; - u16 addr_type = 0; /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || @@ -199,13 +198,6 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, return -EOPNOTSUPP; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL); - - addr_type = key->addr_type; - } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_dissector_key_basic *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); @@ -301,13 +293,6 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, flow->l4_mask.icmp.code = mask->code; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { - struct flow_dissector_key_control *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL); - - addr_type = key->addr_type; - } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { struct flow_dissector_key_ipv4_addrs *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index e31f5d803c13..9a25c05aa571 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -209,9 +209,7 @@ struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) { struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev); - struct bnxt_vf_rep_stats *rx_stats; - rx_stats = &vf_rep->rx_stats; vf_rep->rx_stats.bytes += skb->len; vf_rep->rx_stats.packets++; @@ -523,7 +521,8 @@ int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode) return 0; } -int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode) +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) { struct bnxt *bp = bnxt_get_bp_from_dl(devlink); int rc = 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h index 38b9a75ad724..d7287651422f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h @@ -30,7 +30,8 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev) bool bnxt_dev_is_vf_rep(struct net_device *dev); int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode); -int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode); +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack); #else diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 4241ae928d4a..b756fc79424e 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -214,7 +214,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) case PHY_INTERFACE_MODE_MII: phy_name = "external MII"; - phydev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phydev, SPEED_100); bcmgenet_sys_writel(priv, PORT_MODE_EXT_EPHY, SYS_PORT_CTRL); break; @@ -226,11 +226,10 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) * capabilities, use that knowledge to also configure the * Reverse MII interface correctly. */ - if ((dev->phydev->supported & PHY_BASIC_FEATURES) == - PHY_BASIC_FEATURES) - port_ctrl = PORT_MODE_EXT_RVMII_25; - else + if (dev->phydev->supported & PHY_1000BT_FEATURES) port_ctrl = PORT_MODE_EXT_RVMII_50; + else + port_ctrl = PORT_MODE_EXT_RVMII_25; bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); break; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index ef4a0c326736..5db9f4158e62 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -156,7 +156,7 @@ enum sbmac_state { (d)->sbdma_dscrtable : (d)->f+1) -#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES) +#define NUMCACHEBLKS(x) DIV_ROUND_UP(x, SMP_CACHE_BYTES) #define SBMAC_MAX_TXDESCR 256 #define SBMAC_MAX_RXDESCR 256 @@ -299,7 +299,7 @@ static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *, static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff); static uint64_t sbmac_addr2reg(unsigned char *ptr); static irqreturn_t sbmac_intr(int irq, void *dev_instance); -static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); static void sbmac_setmulti(struct sbmac_softc *sc); static int sbmac_init(struct platform_device *pldev, long long base); static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed); @@ -2028,7 +2028,7 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance) * Return value: * nothing ********************************************************************* */ -static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); unsigned long flags; @@ -2357,21 +2357,11 @@ static int sbmac_mii_probe(struct net_device *dev) } /* Remove any features not supported by the controller */ - phy_dev->supported &= SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_MII | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause; + phy_set_max_speed(phy_dev, SPEED_1000); + phy_support_asym_pause(phy_dev); phy_attached_info(phy_dev); - phy_dev->advertising = phy_dev->supported; - sc->phy_dev = phy_dev; return 0; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index e6f28c7942ab..89295306f161 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1598,7 +1598,7 @@ static int tg3_mdio_init(struct tg3 *tp) phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; - /* fallthru */ + /* fall through */ case PHY_ID_RTL8211C: phydev->interface = PHY_INTERFACE_MODE_RGMII; break; @@ -2122,16 +2122,14 @@ static int tg3_phy_init(struct tg3 *tp) case PHY_INTERFACE_MODE_GMII: case PHY_INTERFACE_MODE_RGMII: if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { - phydev->supported &= (PHY_GBIT_FEATURES | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); + phy_set_max_speed(phydev, SPEED_1000); + phy_support_asym_pause(phydev); break; } - /* fallthru */ + /* fall through */ case PHY_INTERFACE_MODE_MII: - phydev->supported &= (PHY_BASIC_FEATURES | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); + phy_set_max_speed(phydev, SPEED_100); + phy_support_asym_pause(phydev); break; default: phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); @@ -2140,8 +2138,6 @@ static int tg3_phy_init(struct tg3 *tp) tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; - phydev->advertising = phydev->supported; - phy_attached_info(phydev); return 0; @@ -5215,7 +5211,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) ap->state = ANEG_STATE_AN_ENABLE; - /* fallthru */ + /* fall through */ case ANEG_STATE_AN_ENABLE: ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); if (ap->flags & MR_AN_ENABLE) { @@ -5245,7 +5241,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, ret = ANEG_TIMER_ENAB; ap->state = ANEG_STATE_RESTART; - /* fallthru */ + /* fall through */ case ANEG_STATE_RESTART: delta = ap->cur_time - ap->link_time; if (delta > ANEG_STATE_SETTLE_TIME) @@ -5288,7 +5284,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, ap->state = ANEG_STATE_ACK_DETECT; - /* fallthru */ + /* fall through */ case ANEG_STATE_ACK_DETECT: if (ap->ack_match != 0) { if ((ap->rxconfig & ~ANEG_CFG_ACK) == @@ -12496,31 +12492,24 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam tg3_warn_mgmt_link_flap(tp); if (tg3_flag(tp, USE_PHYLIB)) { - u32 newadv; struct phy_device *phydev; phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); - if (!(phydev->supported & SUPPORTED_Pause) || - (!(phydev->supported & SUPPORTED_Asym_Pause) && - (epause->rx_pause != epause->tx_pause))) + if (!phy_validate_pause(phydev, epause)) return -EINVAL; tp->link_config.flowctrl = 0; + phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); if (epause->rx_pause) { tp->link_config.flowctrl |= FLOW_CTRL_RX; if (epause->tx_pause) { tp->link_config.flowctrl |= FLOW_CTRL_TX; - newadv = ADVERTISED_Pause; - } else - newadv = ADVERTISED_Pause | - ADVERTISED_Asym_Pause; + } } else if (epause->tx_pause) { tp->link_config.flowctrl |= FLOW_CTRL_TX; - newadv = ADVERTISED_Asym_Pause; - } else - newadv = 0; + } if (epause->autoneg) tg3_flag_set(tp, PAUSE_AUTONEG); @@ -12528,33 +12517,19 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam tg3_flag_clear(tp, PAUSE_AUTONEG); if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { - u32 oldadv = phydev->advertising & - (ADVERTISED_Pause | ADVERTISED_Asym_Pause); - if (oldadv != newadv) { - phydev->advertising &= - ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - phydev->advertising |= newadv; - if (phydev->autoneg) { - /* - * Always renegotiate the link to - * inform our link partner of our - * flow control settings, even if the - * flow control is forced. Let - * tg3_adjust_link() do the final - * flow control setup. - */ - return phy_start_aneg(phydev); - } + if (phydev->autoneg) { + /* phy_set_asym_pause() will + * renegotiate the link to inform our + * link partner of our flow control + * settings, even if the flow control + * is forced. Let tg3_adjust_link() + * do the final flow control setup. + */ + return 0; } if (!epause->autoneg) tg3_setup_flow_control(tp, 0, 0); - } else { - tp->link_config.advertising &= - ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - tp->link_config.advertising |= newadv; } } else { int irq_sync = 0; @@ -14013,7 +13988,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIPHY: data->phy_id = tp->phy_addr; - /* fallthru */ + /* fall through */ case SIOCGMIIREG: { u32 mii_regval; diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index bba81735ce87..6d2d4527357c 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -1797,7 +1797,7 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna, /* A separate queue to allow synchronous setting of a list of MACs */ INIT_LIST_HEAD(&ucam_mod->del_q); - for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) + for (; i < (bna->ioceth.attr.num_ucmac * 2); i++) list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q); ucam_mod->bna = bna; @@ -1832,7 +1832,7 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna, /* A separate queue to allow synchronous setting of a list of MACs */ INIT_LIST_HEAD(&mcam_mod->del_q); - for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) + for (; i < (bna->ioceth.attr.num_mcmac * 2); i++) list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q); mcam_mod->bna = bna; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 58b9744c4058..0acaef3ef548 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -544,14 +544,13 @@ static int macb_mii_probe(struct net_device *dev) /* mask with MAC supported features */ if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) - phydev->supported &= PHY_GBIT_FEATURES; + phy_set_max_speed(phydev, SPEED_1000); else - phydev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phydev, SPEED_100); if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) - phydev->supported &= ~SUPPORTED_1000baseT_Half; - - phydev->advertising = phydev->supported; + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); bp->link = 0; bp->speed = 0; diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c index 962bb62933db..fda49404968c 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c @@ -616,7 +616,7 @@ static void cn23xx_disable_vf_interrupt(struct octeon_device *oct, u8 intr_flag) int cn23xx_setup_octeon_vf_device(struct octeon_device *oct) { struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip; - u32 rings_per_vf, ring_flag; + u32 rings_per_vf; u64 reg_val; if (octeon_map_pci_barx(oct, 0, 0)) @@ -634,8 +634,6 @@ int cn23xx_setup_octeon_vf_device(struct octeon_device *oct) rings_per_vf = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; - ring_flag = 0; - cn23xx->conf = oct_get_config_info(oct, LIO_23XX); if (!cn23xx->conf) { dev_err(&oct->pci_dev->dev, "%s No Config found for CN23XX\n", diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 8093c5eafea2..825a28e5b544 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -32,38 +32,6 @@ #define OCTNIC_MAX_SG MAX_SKB_FRAGS /** - * \brief Callback for getting interface configuration - * @param status status of request - * @param buf pointer to resp structure - */ -void lio_if_cfg_callback(struct octeon_device *oct, - u32 status __attribute__((unused)), void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_if_cfg_context *ctx; - struct liquidio_if_cfg_resp *resp; - - resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; - - oct = lio_get_device(ctx->octeon_id); - if (resp->status) - dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", - CVM_CAST64(resp->status)); - WRITE_ONCE(ctx->cond, 1); - - snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", - resp->cfg_info.liquidio_firmware_version); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - -/** * \brief Delete gather lists * @param lio per-network private data */ @@ -198,14 +166,15 @@ int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) nctrl.ncmd.s.cmd = cmd; nctrl.ncmd.s.param1 = param1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -285,15 +254,7 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) struct octeon_device *oct = lio->oct_dev; u8 *mac; - if (nctrl->completion && nctrl->response_code) { - /* Signal whoever is interested that the response code from the - * firmware has arrived. - */ - WRITE_ONCE(*nctrl->response_code, nctrl->status); - complete(nctrl->completion); - } - - if (nctrl->status) + if (nctrl->sc_status) return; switch (nctrl->ncmd.s.cmd) { @@ -464,56 +425,73 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac) */ } +void octeon_schedule_rxq_oom_work(struct octeon_device *oct, + struct octeon_droq *droq) +{ + struct net_device *netdev = oct->props[0].netdev; + struct lio *lio = GET_LIO(netdev); + struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no]; + + queue_delayed_work(wq->wq, &wq->wk.work, + msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS)); +} + static void octnet_poll_check_rxq_oom_status(struct work_struct *work) { struct cavium_wk *wk = (struct cavium_wk *)work; struct lio *lio = (struct lio *)wk->ctxptr; struct octeon_device *oct = lio->oct_dev; - struct octeon_droq *droq; - int q, q_no = 0; + int q_no = wk->ctxul; + struct octeon_droq *droq = oct->droq[q_no]; - if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) { - for (q = 0; q < lio->linfo.num_rxpciq; q++) { - q_no = lio->linfo.rxpciq[q].s.q_no; - droq = oct->droq[q_no]; - if (!droq) - continue; - octeon_droq_check_oom(droq); - } - } - queue_delayed_work(lio->rxq_status_wq.wq, - &lio->rxq_status_wq.wk.work, - msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS)); + if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq) + return; + + if (octeon_retry_droq_refill(droq)) + octeon_schedule_rxq_oom_work(oct, droq); } int setup_rx_oom_poll_fn(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct cavium_wq *wq; + int q, q_no; - lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status", - WQ_MEM_RECLAIM, 0); - if (!lio->rxq_status_wq.wq) { - dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n"); - return -ENOMEM; + for (q = 0; q < oct->num_oqs; q++) { + q_no = lio->linfo.rxpciq[q].s.q_no; + wq = &lio->rxq_status_wq[q_no]; + wq->wq = alloc_workqueue("rxq-oom-status", + WQ_MEM_RECLAIM, 0); + if (!wq->wq) { + dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n"); + return -ENOMEM; + } + + INIT_DELAYED_WORK(&wq->wk.work, + octnet_poll_check_rxq_oom_status); + wq->wk.ctxptr = lio; + wq->wk.ctxul = q_no; } - INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work, - octnet_poll_check_rxq_oom_status); - lio->rxq_status_wq.wk.ctxptr = lio; - queue_delayed_work(lio->rxq_status_wq.wq, - &lio->rxq_status_wq.wk.work, - msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS)); + return 0; } void cleanup_rx_oom_poll_fn(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); - - if (lio->rxq_status_wq.wq) { - cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work); - flush_workqueue(lio->rxq_status_wq.wq); - destroy_workqueue(lio->rxq_status_wq.wq); + struct octeon_device *oct = lio->oct_dev; + struct cavium_wq *wq; + int q_no; + + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + wq = &lio->rxq_status_wq[q_no]; + if (wq->wq) { + cancel_delayed_work_sync(&wq->wk.work); + flush_workqueue(wq->wq); + destroy_workqueue(wq->wq); + wq->wq = NULL; + } } } @@ -1218,30 +1196,6 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs) return 0; } -static void liquidio_change_mtu_completion(struct octeon_device *oct, - u32 status, void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_if_cfg_context *ctx; - - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; - - if (status) { - dev_err(&oct->pci_dev->dev, "MTU change failed. Status: %llx\n", - CVM_CAST64(status)); - WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_FAIL); - } else { - WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_SUCCESS); - } - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - /** * \brief Net device change_mtu * @param netdev network device @@ -1250,22 +1204,17 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; - struct liquidio_if_cfg_context *ctx; struct octeon_soft_command *sc; union octnet_cmd *ncmd; - int ctx_size; int ret = 0; - ctx_size = sizeof(struct liquidio_if_cfg_context); sc = (struct octeon_soft_command *) - octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, ctx_size); + octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0); ncmd = (union octnet_cmd *)sc->virtdptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct); - init_waitqueue_head(&ctx->wc); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; ncmd->u64 = 0; ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU; @@ -1278,28 +1227,28 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); - sc->callback = liquidio_change_mtu_completion; - sc->callback_arg = sc; - sc->wait_time = 100; - ret = octeon_send_soft_command(oct, sc); if (ret == IQ_SEND_FAILED) { netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n"); + octeon_free_soft_command(oct, sc); return -EINVAL; } /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR || - ctx->cond == LIO_CHANGE_MTU_FAIL) { - octeon_free_soft_command(oct, sc); + ret = wait_for_sc_completion_timeout(oct, sc, 0); + if (ret) + return ret; + + if (sc->sc_status) { + WRITE_ONCE(sc->caller_is_done, true); return -EINVAL; } netdev->mtu = new_mtu; lio->mtu = new_mtu; - octeon_free_soft_command(oct, sc); + WRITE_ONCE(sc->caller_is_done, true); return 0; } @@ -1333,8 +1282,6 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev, struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *)sc->virtrptr; - struct oct_nic_stats_ctrl *ctrl = - (struct oct_nic_stats_ctrl *)sc->ctxptr; struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; @@ -1422,93 +1369,148 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev, resp->status = 1; } else { + dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n"); resp->status = -1; } - complete(&ctrl->complete); } -int octnet_get_link_stats(struct net_device *netdev) +static int lio_fetch_vf_stats(struct lio *lio) { - struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; struct octeon_soft_command *sc; - struct oct_nic_stats_ctrl *ctrl; - struct oct_nic_stats_resp *resp; + struct oct_nic_vf_stats_resp *resp; + int retval; /* Alloc soft command */ sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct_dev, 0, - sizeof(struct oct_nic_stats_resp), - sizeof(struct octnic_ctrl_pkt)); + sizeof(struct oct_nic_vf_stats_resp), + 0); - if (!sc) - return -ENOMEM; + if (!sc) { + dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n"); + retval = -ENOMEM; + goto lio_fetch_vf_stats_exit; + } - resp = (struct oct_nic_stats_resp *)sc->virtrptr; - memset(resp, 0, sizeof(struct oct_nic_stats_resp)); + resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp)); - ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr; - memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl)); - ctrl->netdev = netdev; - init_completion(&ctrl->complete); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; sc->iq_no = lio->linfo.txpciq[0].s.q_no; octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, - OPCODE_NIC_PORT_STATS, 0, 0, 0); - - sc->callback = octnet_nic_stats_callback; - sc->callback_arg = sc; - sc->wait_time = 500; /*in milli seconds*/ + OPCODE_NIC_VF_PORT_STATS, 0, 0, 0); retval = octeon_send_soft_command(oct_dev, sc); if (retval == IQ_SEND_FAILED) { octeon_free_soft_command(oct_dev, sc); - return -EINVAL; + goto lio_fetch_vf_stats_exit; } - wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000)); + retval = + wait_for_sc_completion_timeout(oct_dev, sc, + (2 * LIO_SC_MAX_TMO_MS)); + if (retval) { + dev_err(&oct_dev->pci_dev->dev, + "sc OPCODE_NIC_VF_PORT_STATS command failed\n"); + goto lio_fetch_vf_stats_exit; + } - if (resp->status != 1) { - octeon_free_soft_command(oct_dev, sc); + if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) { + octeon_swap_8B_data((u64 *)&resp->spoofmac_cnt, + (sizeof(u64)) >> 3); - return -EINVAL; + if (resp->spoofmac_cnt != 0) { + dev_warn(&oct_dev->pci_dev->dev, + "%llu Spoofed packets detected\n", + resp->spoofmac_cnt); + } } + WRITE_ONCE(sc->caller_is_done, 1); - octeon_free_soft_command(oct_dev, sc); - - return 0; +lio_fetch_vf_stats_exit: + return retval; } -static void liquidio_nic_seapi_ctl_callback(struct octeon_device *oct, - u32 status, - void *buf) +void lio_fetch_stats(struct work_struct *work) { - struct liquidio_nic_seapi_ctl_context *ctx; - struct octeon_soft_command *sc = buf; + struct cavium_wk *wk = (struct cavium_wk *)work; + struct lio *lio = wk->ctxptr; + struct octeon_device *oct_dev = lio->oct_dev; + struct octeon_soft_command *sc; + struct oct_nic_stats_resp *resp; + unsigned long time_in_jiffies; + int retval; + + if (OCTEON_CN23XX_PF(oct_dev)) { + /* report spoofchk every 2 seconds */ + if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) && + (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) && + oct_dev->sriov_info.num_vfs_alloced) { + lio_fetch_vf_stats(lio); + } - ctx = sc->ctxptr; + oct_dev->vfstats_poll++; + } + + /* Alloc soft command */ + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + 0, + sizeof(struct oct_nic_stats_resp), + 0); + + if (!sc) { + dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n"); + goto lio_fetch_stats_exit; + } + + resp = (struct oct_nic_stats_resp *)sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_stats_resp)); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; - oct = lio_get_device(ctx->octeon_id); - if (status) { - dev_err(&oct->pci_dev->dev, "%s: instruction failed. Status: %llx\n", - __func__, - CVM_CAST64(status)); + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, + OPCODE_NIC_PORT_STATS, 0, 0, 0); + + retval = octeon_send_soft_command(oct_dev, sc); + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct_dev, sc); + goto lio_fetch_stats_exit; + } + + retval = wait_for_sc_completion_timeout(oct_dev, sc, + (2 * LIO_SC_MAX_TMO_MS)); + if (retval) { + dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n"); + goto lio_fetch_stats_exit; } - ctx->status = status; - complete(&ctx->complete); + + octnet_nic_stats_callback(oct_dev, sc->sc_status, sc); + WRITE_ONCE(sc->caller_is_done, true); + +lio_fetch_stats_exit: + time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS); + if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) + schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies); + + return; } int liquidio_set_speed(struct lio *lio, int speed) { - struct liquidio_nic_seapi_ctl_context *ctx; struct octeon_device *oct = lio->oct_dev; struct oct_nic_seapi_resp *resp; struct octeon_soft_command *sc; union octnet_cmd *ncmd; - u32 ctx_size; int retval; u32 var; @@ -1521,21 +1523,18 @@ int liquidio_set_speed(struct lio *lio, int speed) return -EOPNOTSUPP; } - ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context); sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, sizeof(struct oct_nic_seapi_resp), - ctx_size); + 0); if (!sc) return -ENOMEM; ncmd = sc->virtdptr; - ctx = sc->ctxptr; resp = sc->virtrptr; memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); - ctx->octeon_id = lio_get_device_id(oct); - ctx->status = 0; - init_completion(&ctx->complete); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; ncmd->u64 = 0; ncmd->s.cmd = SEAPI_CMD_SPEED_SET; @@ -1548,30 +1547,24 @@ int liquidio_set_speed(struct lio *lio, int speed) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_UBOOT_CTL, 0, 0, 0); - sc->callback = liquidio_nic_seapi_ctl_callback; - sc->callback_arg = sc; - sc->wait_time = 5000; - retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); + octeon_free_soft_command(oct, sc); retval = -EBUSY; } else { /* Wait for response or timeout */ - if (wait_for_completion_timeout(&ctx->complete, - msecs_to_jiffies(10000)) == 0) { - dev_err(&oct->pci_dev->dev, "%s: sc timeout\n", - __func__); - octeon_free_soft_command(oct, sc); - return -EINTR; - } + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; retval = resp->status; if (retval) { dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n", __func__, retval); - octeon_free_soft_command(oct, sc); + WRITE_ONCE(sc->caller_is_done, true); + return -EIO; } @@ -1583,38 +1576,32 @@ int liquidio_set_speed(struct lio *lio, int speed) } oct->speed_setting = var; + WRITE_ONCE(sc->caller_is_done, true); } - octeon_free_soft_command(oct, sc); - return retval; } int liquidio_get_speed(struct lio *lio) { - struct liquidio_nic_seapi_ctl_context *ctx; struct octeon_device *oct = lio->oct_dev; struct oct_nic_seapi_resp *resp; struct octeon_soft_command *sc; union octnet_cmd *ncmd; - u32 ctx_size; int retval; - ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context); sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, sizeof(struct oct_nic_seapi_resp), - ctx_size); + 0); if (!sc) return -ENOMEM; ncmd = sc->virtdptr; - ctx = sc->ctxptr; resp = sc->virtrptr; memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); - ctx->octeon_id = lio_get_device_id(oct); - ctx->status = 0; - init_completion(&ctx->complete); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; ncmd->u64 = 0; ncmd->s.cmd = SEAPI_CMD_SPEED_GET; @@ -1626,37 +1613,20 @@ int liquidio_get_speed(struct lio *lio) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_UBOOT_CTL, 0, 0, 0); - sc->callback = liquidio_nic_seapi_ctl_callback; - sc->callback_arg = sc; - sc->wait_time = 5000; - retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); - oct->no_speed_setting = 1; - oct->speed_setting = 25; - - retval = -EBUSY; + octeon_free_soft_command(oct, sc); + retval = -EIO; } else { - if (wait_for_completion_timeout(&ctx->complete, - msecs_to_jiffies(10000)) == 0) { - dev_err(&oct->pci_dev->dev, "%s: sc timeout\n", - __func__); - - oct->speed_setting = 25; - oct->no_speed_setting = 1; - - octeon_free_soft_command(oct, sc); + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; - return -EINTR; - } retval = resp->status; if (retval) { dev_err(&oct->pci_dev->dev, "%s failed retval=%d\n", __func__, retval); - oct->no_speed_setting = 1; - oct->speed_setting = 25; - octeon_free_soft_command(oct, sc); retval = -EIO; } else { u32 var; @@ -1664,16 +1634,171 @@ int liquidio_get_speed(struct lio *lio) var = be32_to_cpu((__force __be32)resp->speed); oct->speed_setting = var; if (var == 0xffff) { - oct->no_speed_setting = 1; /* unable to access boot variables * get the default value based on the NIC type */ - oct->speed_setting = 25; + if (oct->subsystem_id == + OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == + OCTEON_CN2360_25GB_SUBSYS_ID) { + oct->no_speed_setting = 1; + oct->speed_setting = 25; + } else { + oct->speed_setting = 10; + } } + } + WRITE_ONCE(sc->caller_is_done, true); + } + + return retval; +} + +int liquidio_set_fec(struct lio *lio, int on_off) +{ + struct oct_nic_seapi_resp *resp; + struct octeon_soft_command *sc; + struct octeon_device *oct; + union octnet_cmd *ncmd; + int retval; + u32 var; + + oct = lio->oct_dev; + + if (oct->props[lio->ifidx].fec == on_off) + return 0; + + if (!OCTEON_CN23XX_PF(oct)) { + dev_err(&oct->pci_dev->dev, "%s: SET FEC only for PF\n", + __func__); + return -1; + } + + if (oct->speed_boot != 25) { + dev_err(&oct->pci_dev->dev, + "Set FEC only when link speed is 25G during insmod\n"); + return -1; + } + + sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + sizeof(struct oct_nic_seapi_resp), 0); + + ncmd = sc->virtdptr; + resp = sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + ncmd->u64 = 0; + ncmd->s.cmd = SEAPI_CMD_FEC_SET; + ncmd->s.param1 = on_off; + /* SEAPI_CMD_FEC_DISABLE(0) or SEAPI_CMD_FEC_RS(1) */ + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_UBOOT_CTL, 0, 0, 0); + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); + octeon_free_soft_command(oct, sc); + return -EIO; + } + + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return (-EIO); + + var = be32_to_cpu(resp->fec_setting); + resp->fec_setting = var; + if (var != on_off) { + dev_err(&oct->pci_dev->dev, + "Setting failed fec= %x, expect %x\n", + var, on_off); + oct->props[lio->ifidx].fec = var; + if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS) + oct->props[lio->ifidx].fec = 1; + else + oct->props[lio->ifidx].fec = 0; + } + + WRITE_ONCE(sc->caller_is_done, true); + + if (oct->props[lio->ifidx].fec != + oct->props[lio->ifidx].fec_boot) { + dev_dbg(&oct->pci_dev->dev, + "Reload driver to change fec to %s\n", + oct->props[lio->ifidx].fec ? "on" : "off"); + } + + return retval; +} + +int liquidio_get_fec(struct lio *lio) +{ + struct oct_nic_seapi_resp *resp; + struct octeon_soft_command *sc; + struct octeon_device *oct; + union octnet_cmd *ncmd; + int retval; + u32 var; + + oct = lio->oct_dev; + + sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + sizeof(struct oct_nic_seapi_resp), 0); + if (!sc) + return -ENOMEM; + + ncmd = sc->virtdptr; + resp = sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + ncmd->u64 = 0; + ncmd->s.cmd = SEAPI_CMD_FEC_GET; + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_UBOOT_CTL, 0, 0, 0); + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_info(&oct->pci_dev->dev, + "%s: Failed to send soft command\n", __func__); + octeon_free_soft_command(oct, sc); + return -EIO; } - octeon_free_soft_command(oct, sc); + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; + + var = be32_to_cpu(resp->fec_setting); + resp->fec_setting = var; + if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS) + oct->props[lio->ifidx].fec = 1; + else + oct->props[lio->ifidx].fec = 0; + + WRITE_ONCE(sc->caller_is_done, true); + + if (oct->props[lio->ifidx].fec != + oct->props[lio->ifidx].fec_boot) { + dev_dbg(&oct->pci_dev->dev, + "Reload driver to change fec to %s\n", + oct->props[lio->ifidx].fec ? "on" : "off"); + } return retval; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 8e05afd5e39c..4c3925af53bc 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -33,25 +33,12 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs); -struct oct_intrmod_context { - int octeon_id; - wait_queue_head_t wc; - int cond; - int status; -}; - struct oct_intrmod_resp { u64 rh; struct oct_intrmod_cfg intrmod; u64 status; }; -struct oct_mdio_cmd_context { - int octeon_id; - wait_queue_head_t wc; - int cond; -}; - struct oct_mdio_cmd_resp { u64 rh; struct oct_mdio_cmd resp; @@ -257,6 +244,7 @@ static int lio_get_link_ksettings(struct net_device *netdev, linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || linfo->link.s.if_mode == INTERFACE_MODE_XFI) { dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n"); + ecmd->base.transceiver = XCVR_EXTERNAL; } else { dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n", linfo->link.s.if_mode); @@ -290,10 +278,12 @@ static int lio_get_link_ksettings(struct net_device *netdev, 10000baseCR_Full); } - if (oct->no_speed_setting == 0) + if (oct->no_speed_setting == 0) { liquidio_get_speed(lio); - else + liquidio_get_fec(lio); + } else { oct->speed_setting = 25; + } if (oct->speed_setting == 10) { ethtool_link_ksettings_add_link_mode @@ -317,6 +307,24 @@ static int lio_get_link_ksettings(struct net_device *netdev, (ecmd, advertising, 25000baseCR_Full); } + + if (oct->no_speed_setting) + break; + + ethtool_link_ksettings_add_link_mode + (ecmd, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, FEC_NONE); + /*FEC_OFF*/ + if (oct->props[lio->ifidx].fec == 1) { + /* ETHTOOL_FEC_RS */ + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, FEC_RS); + } else { + /* ETHTOOL_FEC_OFF */ + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, FEC_NONE); + } } else { /* VF */ if (linfo->link.s.speed == 10000) { ethtool_link_ksettings_add_link_mode @@ -472,12 +480,11 @@ lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues) nctrl.ncmd.s.param1 = num_queues; nctrl.ncmd.s.param2 = num_queues; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n", ret); return -1; @@ -708,13 +715,13 @@ static int octnet_gpio_access(struct net_device *netdev, int addr, int val) nctrl.ncmd.s.param1 = addr; nctrl.ncmd.s.param2 = val; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Failed to configure gpio value, ret=%d\n", ret); return -EINVAL; } @@ -734,41 +741,19 @@ static int octnet_id_active(struct net_device *netdev, int val) nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE; nctrl.ncmd.s.param1 = val; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Failed to configure gpio value, ret=%d\n", ret); return -EINVAL; } return 0; } -/* Callback for when mdio command response arrives - */ -static void octnet_mdio_resp_callback(struct octeon_device *oct, - u32 status, - void *buf) -{ - struct oct_mdio_cmd_context *mdio_cmd_ctx; - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - - mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; - - oct = lio_get_device(mdio_cmd_ctx->octeon_id); - if (status) { - dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n", - CVM_CAST64(status)); - WRITE_ONCE(mdio_cmd_ctx->cond, -1); - } else { - WRITE_ONCE(mdio_cmd_ctx->cond, 1); - } - wake_up_interruptible(&mdio_cmd_ctx->wc); -} - /* This routine provides PHY access routines for * mdio clause45 . */ @@ -778,25 +763,20 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) struct octeon_device *oct_dev = lio->oct_dev; struct octeon_soft_command *sc; struct oct_mdio_cmd_resp *mdio_cmd_rsp; - struct oct_mdio_cmd_context *mdio_cmd_ctx; struct oct_mdio_cmd *mdio_cmd; int retval = 0; sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct_dev, sizeof(struct oct_mdio_cmd), - sizeof(struct oct_mdio_cmd_resp), - sizeof(struct oct_mdio_cmd_context)); + sizeof(struct oct_mdio_cmd_resp), 0); if (!sc) return -ENOMEM; - mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr; - WRITE_ONCE(mdio_cmd_ctx->cond, 0); - mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev); mdio_cmd->op = op; mdio_cmd->mdio_addr = loc; if (op) @@ -808,42 +788,40 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, 0, 0, 0); - sc->wait_time = 1000; - sc->callback = octnet_mdio_resp_callback; - sc->callback_arg = sc; - - init_waitqueue_head(&mdio_cmd_ctx->wc); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct_dev, sc); - if (retval == IQ_SEND_FAILED) { dev_err(&oct_dev->pci_dev->dev, "octnet_mdio45_access instruction failed status: %x\n", retval); - retval = -EBUSY; + octeon_free_soft_command(oct_dev, sc); + return -EBUSY; } else { /* Sleep on a wait queue till the cond flag indicates that the * response arrived */ - sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond); + retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); + if (retval) + return retval; + retval = mdio_cmd_rsp->status; if (retval) { - dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n"); - retval = -EBUSY; - } else { - octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), - sizeof(struct oct_mdio_cmd) / 8); - - if (READ_ONCE(mdio_cmd_ctx->cond) == 1) { - if (!op) - *value = mdio_cmd_rsp->resp.value1; - } else { - retval = -EINVAL; - } + dev_err(&oct_dev->pci_dev->dev, + "octnet mdio45 access failed: %x\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -EBUSY; } - } - octeon_free_soft_command(oct_dev, sc); + octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), + sizeof(struct oct_mdio_cmd) / 8); + + if (!op) + *value = mdio_cmd_rsp->resp.value1; + + WRITE_ONCE(sc->caller_is_done, true); + } return retval; } @@ -1007,8 +985,7 @@ lio_ethtool_get_ringparam(struct net_device *netdev, static int lio_23xx_reconfigure_queue_count(struct lio *lio) { struct octeon_device *oct = lio->oct_dev; - struct liquidio_if_cfg_context *ctx; - u32 resp_size, ctx_size, data_size; + u32 resp_size, data_size; struct liquidio_if_cfg_resp *resp; struct octeon_soft_command *sc; union oct_nic_if_cfg if_cfg; @@ -1018,11 +995,10 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio) int j; resp_size = sizeof(struct liquidio_if_cfg_resp); - ctx_size = sizeof(struct liquidio_if_cfg_context); data_size = sizeof(struct lio_version); sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct, data_size, - resp_size, ctx_size); + resp_size, 0); if (!sc) { dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n", __func__); @@ -1030,7 +1006,6 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio) } resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; vdata = (struct lio_version *)sc->virtdptr; vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); @@ -1038,9 +1013,6 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio) vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); ifidx_or_pfnum = oct->pf_num; - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct); - init_waitqueue_head(&ctx->wc); if_cfg.u64 = 0; if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings; @@ -1052,27 +1024,29 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_QCOUNT_UPDATE, 0, if_cfg.u64, 0); - sc->callback = lio_if_cfg_callback; - sc->callback_arg = sc; - sc->wait_time = LIO_IFCFG_WAIT_TIME; + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { dev_err(&oct->pci_dev->dev, - "iq/oq config failed status: %x\n", + "Sending iq/oq config failed status: %x\n", retval); - goto qcount_update_fail; + octeon_free_soft_command(oct, sc); + return -EIO; } - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { - dev_err(&oct->pci_dev->dev, "Wait interrupted\n"); - return -1; - } + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; retval = resp->status; if (retval) { - dev_err(&oct->pci_dev->dev, "iq/oq config failed\n"); - goto qcount_update_fail; + dev_err(&oct->pci_dev->dev, + "iq/oq config failed: %x\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -1; } octeon_swap_8B_data((u64 *)(&resp->cfg_info), @@ -1097,16 +1071,12 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio) lio->txq = lio->linfo.txpciq[0].s.q_no; lio->rxq = lio->linfo.rxpciq[0].s.q_no; - octeon_free_soft_command(oct, sc); dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n", lio->linfo.num_rxpciq); - return 0; - -qcount_update_fail: - octeon_free_soft_command(oct, sc); + WRITE_ONCE(sc->caller_is_done, true); - return -1; + return 0; } static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) @@ -1166,6 +1136,8 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) * steps like updating sriov_info for the octeon device need to be done. */ if (queue_count_update) { + cleanup_rx_oom_poll_fn(netdev); + lio_delete_glists(lio); /* Delete mbox for PF which is SRIOV disabled because sriov_info @@ -1265,6 +1237,11 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) return -1; } + if (setup_rx_oom_poll_fn(netdev)) { + dev_err(&oct->pci_dev->dev, "lio_setup_rx_oom_poll_fn failed\n"); + return 1; + } + /* Send firmware the information about new number of queues * if the interface is a VF or a PF that is SRIOV enabled. */ @@ -1412,7 +1389,6 @@ lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; @@ -1433,8 +1409,9 @@ lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) } ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n"); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Failed to set pause parameter, ret=%d\n", ret); return -EINVAL; } @@ -1764,7 +1741,8 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev, */ data[i++] = lstats.rx_dropped; /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */ - data[i++] = lstats.tx_dropped; + data[i++] = lstats.tx_dropped + + oct_dev->link_stats.fromhost.fw_err_drop; data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; @@ -2013,34 +1991,11 @@ static int lio_vf_get_sset_count(struct net_device *netdev, int sset) } } -/* Callback function for intrmod */ -static void octnet_intrmod_callback(struct octeon_device *oct_dev, - u32 status, - void *ptr) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; - struct oct_intrmod_context *ctx; - - ctx = (struct oct_intrmod_context *)sc->ctxptr; - - ctx->status = status; - - WRITE_ONCE(ctx->cond, 1); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - /* get interrupt moderation parameters */ static int octnet_get_intrmod_cfg(struct lio *lio, struct oct_intrmod_cfg *intr_cfg) { struct octeon_soft_command *sc; - struct oct_intrmod_context *ctx; struct oct_intrmod_resp *resp; int retval; struct octeon_device *oct_dev = lio->oct_dev; @@ -2049,8 +2004,7 @@ static int octnet_get_intrmod_cfg(struct lio *lio, sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct_dev, 0, - sizeof(struct oct_intrmod_resp), - sizeof(struct oct_intrmod_context)); + sizeof(struct oct_intrmod_resp), 0); if (!sc) return -ENOMEM; @@ -2058,20 +2012,13 @@ static int octnet_get_intrmod_cfg(struct lio *lio, resp = (struct oct_intrmod_resp *)sc->virtrptr; memset(resp, 0, sizeof(struct oct_intrmod_resp)); - ctx = (struct oct_intrmod_context *)sc->ctxptr; - memset(ctx, 0, sizeof(struct oct_intrmod_context)); - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct_dev); - init_waitqueue_head(&ctx->wc); - sc->iq_no = lio->linfo.txpciq[0].s.q_no; octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0); - sc->callback = octnet_intrmod_callback; - sc->callback_arg = sc; - sc->wait_time = 1000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct_dev, sc); if (retval == IQ_SEND_FAILED) { @@ -2082,32 +2029,23 @@ static int octnet_get_intrmod_cfg(struct lio *lio, /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { - dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n"); - goto intrmod_info_wait_intr; - } + retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); + if (retval) + return -ENODEV; - retval = ctx->status || resp->status; - if (retval) { + if (resp->status) { dev_err(&oct_dev->pci_dev->dev, "Get interrupt moderation parameters failed\n"); - goto intrmod_info_wait_fail; + WRITE_ONCE(sc->caller_is_done, true); + return -ENODEV; } octeon_swap_8B_data((u64 *)&resp->intrmod, (sizeof(struct oct_intrmod_cfg)) / 8); memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg)); - octeon_free_soft_command(oct_dev, sc); + WRITE_ONCE(sc->caller_is_done, true); return 0; - -intrmod_info_wait_fail: - - octeon_free_soft_command(oct_dev, sc); - -intrmod_info_wait_intr: - - return -ENODEV; } /* Configure interrupt moderation parameters */ @@ -2115,7 +2053,6 @@ static int octnet_set_intrmod_cfg(struct lio *lio, struct oct_intrmod_cfg *intr_cfg) { struct octeon_soft_command *sc; - struct oct_intrmod_context *ctx; struct oct_intrmod_cfg *cfg; int retval; struct octeon_device *oct_dev = lio->oct_dev; @@ -2124,18 +2061,11 @@ static int octnet_set_intrmod_cfg(struct lio *lio, sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct_dev, sizeof(struct oct_intrmod_cfg), - 0, - sizeof(struct oct_intrmod_context)); + 16, 0); if (!sc) return -ENOMEM; - ctx = (struct oct_intrmod_context *)sc->ctxptr; - - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct_dev); - init_waitqueue_head(&ctx->wc); - cfg = (struct oct_intrmod_cfg *)sc->virtdptr; memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg)); @@ -2146,9 +2076,8 @@ static int octnet_set_intrmod_cfg(struct lio *lio, octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); - sc->callback = octnet_intrmod_callback; - sc->callback_arg = sc; - sc->wait_time = 1000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct_dev, sc); if (retval == IQ_SEND_FAILED) { @@ -2159,26 +2088,24 @@ static int octnet_set_intrmod_cfg(struct lio *lio, /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) { - retval = ctx->status; - if (retval) - dev_err(&oct_dev->pci_dev->dev, - "intrmod config failed. Status: %llx\n", - CVM_CAST64(retval)); - else - dev_info(&oct_dev->pci_dev->dev, - "Rx-Adaptive Interrupt moderation %s\n", - (intr_cfg->rx_enable) ? - "enabled" : "disabled"); - - octeon_free_soft_command(oct_dev, sc); - - return ((retval) ? -ENODEV : 0); + retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); + if (retval) + return retval; + + retval = sc->sc_status; + if (retval == 0) { + dev_info(&oct_dev->pci_dev->dev, + "Rx-Adaptive Interrupt moderation %s\n", + (intr_cfg->rx_enable) ? + "enabled" : "disabled"); + WRITE_ONCE(sc->caller_is_done, true); + return 0; } - dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n"); - - return -EINTR; + dev_err(&oct_dev->pci_dev->dev, + "intrmod config failed. Status: %x\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -ENODEV; } static int lio_get_intr_coalesce(struct net_device *netdev, @@ -3123,9 +3050,60 @@ static int lio_set_priv_flags(struct net_device *netdev, u32 flags) return 0; } +static int lio_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + fec->active_fec = ETHTOOL_FEC_NONE; + fec->fec = ETHTOOL_FEC_NONE; + + if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { + if (oct->no_speed_setting == 1) + return 0; + + liquidio_get_fec(lio); + fec->fec = (ETHTOOL_FEC_RS | ETHTOOL_FEC_OFF); + if (oct->props[lio->ifidx].fec == 1) + fec->active_fec = ETHTOOL_FEC_RS; + else + fec->active_fec = ETHTOOL_FEC_OFF; + } + + return 0; +} + +static int lio_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { + if (oct->no_speed_setting == 1) + return -EOPNOTSUPP; + + if (fec->fec & ETHTOOL_FEC_OFF) + liquidio_set_fec(lio, 0); + else if (fec->fec & ETHTOOL_FEC_RS) + liquidio_set_fec(lio, 1); + else + return -EOPNOTSUPP; + } else { + return -EOPNOTSUPP; + } + + return 0; +} + static const struct ethtool_ops lio_ethtool_ops = { .get_link_ksettings = lio_get_link_ksettings, .set_link_ksettings = lio_set_link_ksettings, + .get_fecparam = lio_get_fecparam, + .set_fecparam = lio_set_fecparam, .get_link = ethtool_op_get_link, .get_drvinfo = lio_get_drvinfo, .get_ringparam = lio_ethtool_get_ringparam, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 6fb13fa73b27..3d24133e5e49 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -99,14 +99,6 @@ struct lio_trusted_vf_ctx { int status; }; -struct liquidio_rx_ctl_context { - int octeon_id; - - wait_queue_head_t wc; - - int cond; -}; - struct oct_link_status_resp { u64 rh; struct oct_link_info link_info; @@ -642,26 +634,6 @@ static inline void update_link_status(struct net_device *netdev, } /** - * lio_sync_octeon_time_cb - callback that is invoked when soft command - * sent by lio_sync_octeon_time() has completed successfully or failed - * - * @oct - octeon device structure - * @status - indicates success or failure - * @buf - pointer to the command that was sent to firmware - **/ -static void lio_sync_octeon_time_cb(struct octeon_device *oct, - u32 status, void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - - if (status) - dev_err(&oct->pci_dev->dev, - "Failed to sync time to octeon; error=%d\n", status); - - octeon_free_soft_command(oct, sc); -} - -/** * lio_sync_octeon_time - send latest localtime to octeon firmware so that * firmware will correct it's time, in case there is a time skew * @@ -677,7 +649,7 @@ static void lio_sync_octeon_time(struct work_struct *work) struct lio_time *lt; int ret; - sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 0, 0); + sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0); if (!sc) { dev_err(&oct->pci_dev->dev, "Failed to sync time to octeon: soft command allocation failed\n"); @@ -696,15 +668,16 @@ static void lio_sync_octeon_time(struct work_struct *work) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0); - sc->callback = lio_sync_octeon_time_cb; - sc->callback_arg = sc; - sc->wait_time = 1000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; ret = octeon_send_soft_command(oct, sc); if (ret == IQ_SEND_FAILED) { dev_err(&oct->pci_dev->dev, "Failed to sync time to octeon: failed to send soft command\n"); octeon_free_soft_command(oct, sc); + } else { + WRITE_ONCE(sc->caller_is_done, true); } queue_delayed_work(lio->sync_octeon_time_wq.wq, @@ -1037,12 +1010,12 @@ static void octeon_destroy_resources(struct octeon_device *oct) /* fallthrough */ case OCT_DEV_IO_QUEUES_DONE: - if (wait_for_pending_requests(oct)) - dev_err(&oct->pci_dev->dev, "There were pending requests\n"); - if (lio_wait_for_instr_fetch(oct)) dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + /* Disable the input and output queues now. No more packets will * arrive from Octeon, but we should wait for all packet * processing to finish. @@ -1052,6 +1025,31 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (lio_wait_for_oq_pkts(oct)) dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); + /* Force all requests waiting to be fetched by OCTEON to + * complete. + */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + struct octeon_instr_queue *iq; + + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + iq = oct->instr_queue[i]; + + if (atomic_read(&iq->instr_pending)) { + spin_lock_bh(&iq->lock); + iq->fill_cnt = 0; + iq->octeon_read_index = iq->host_write_index; + iq->stats.instr_processed += + atomic_read(&iq->instr_pending); + lio_process_iq_request_list(oct, iq, 0); + spin_unlock_bh(&iq->lock); + } + } + + lio_process_ordered_list(oct, 1); + octeon_free_sc_done_list(oct); + octeon_free_sc_zombie_list(oct); + /* fallthrough */ case OCT_DEV_INTR_SET_DONE: /* Disable interrupts */ @@ -1178,34 +1176,6 @@ static void octeon_destroy_resources(struct octeon_device *oct) } /** - * \brief Callback for rx ctrl - * @param status status of request - * @param buf pointer to resp structure - */ -static void rx_ctl_callback(struct octeon_device *oct, - u32 status, - void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_rx_ctl_context *ctx; - - ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; - - oct = lio_get_device(ctx->octeon_id); - if (status) - dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n", - CVM_CAST64(status)); - WRITE_ONCE(ctx->cond, 1); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - -/** * \brief Send Rx control command * @param lio per-network private data * @param start_stop whether to start or stop @@ -1213,9 +1183,7 @@ static void rx_ctl_callback(struct octeon_device *oct, static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) { struct octeon_soft_command *sc; - struct liquidio_rx_ctl_context *ctx; union octnet_cmd *ncmd; - int ctx_size = sizeof(struct liquidio_rx_ctl_context); struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; int retval; @@ -1224,14 +1192,9 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, - 16, ctx_size); + 16, 0); ncmd = (union octnet_cmd *)sc->virtdptr; - ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; - - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct); - init_waitqueue_head(&ctx->wc); ncmd->u64 = 0; ncmd->s.cmd = OCTNET_CMD_RX_CTL; @@ -1244,23 +1207,25 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); - sc->callback = rx_ctl_callback; - sc->callback_arg = sc; - sc->wait_time = 5000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); + octeon_free_soft_command(oct, sc); + return; } else { /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) return; + oct->props[lio->ifidx].rx_on = start_stop; + WRITE_ONCE(sc->caller_is_done, true); } - - octeon_free_soft_command(oct, sc); } /** @@ -1274,8 +1239,10 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) { struct net_device *netdev = oct->props[ifidx].netdev; - struct lio *lio; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; + struct lio *lio; if (!netdev) { dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", @@ -1304,6 +1271,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) netif_napi_del(napi); + tasklet_enable(&oct_priv->droq_tasklet); + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); @@ -1840,9 +1809,13 @@ static int liquidio_open(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; if (oct->props[lio->ifidx].napi_enabled == 0) { + tasklet_disable(&oct_priv->droq_tasklet); + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) napi_enable(napi); @@ -1876,6 +1849,12 @@ static int liquidio_open(struct net_device *netdev) /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); + /* start periodical statistics fetch */ + INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); + lio->stats_wk.ctxptr = lio; + schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies + (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); + dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); @@ -1890,6 +1869,8 @@ static int liquidio_stop(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; ifstate_reset(lio, LIO_IFSTATE_RUNNING); @@ -1916,6 +1897,8 @@ static int liquidio_stop(struct net_device *netdev) cleanup_tx_poll_fn(netdev); } + cancel_delayed_work_sync(&lio->stats_wk.work); + if (lio->ptp_clock) { ptp_clock_unregister(lio->ptp_clock); lio->ptp_clock = NULL; @@ -1934,6 +1917,8 @@ static int liquidio_stop(struct net_device *netdev) if (OCTEON_CN23XX_PF(oct)) oct->droq[0]->ops.poll_mode = 0; + + tasklet_enable(&oct_priv->droq_tasklet); } dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); @@ -2014,10 +1999,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev) /* Apparently, any activity in this call from the kernel has to * be atomic. So we won't wait for response. */ - nctrl.wait_time = 0; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", ret); } @@ -2046,8 +2030,6 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) nctrl.ncmd.s.more = 1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; - nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nctrl.wait_time = 100; nctrl.udd[0] = 0; /* The MAC Address is presented in network byte order. */ @@ -2058,6 +2040,14 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); return -ENOMEM; } + + if (nctrl.sc_status) { + dev_err(&oct->pci_dev->dev, + "%s: MAC Address change failed. sc return=%x\n", + __func__, nctrl.sc_status); + return -EIO; + } + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); @@ -2111,7 +2101,6 @@ liquidio_get_stats64(struct net_device *netdev, lstats->rx_packets = pkts; lstats->rx_dropped = drop; - octnet_get_link_stats(netdev); lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; lstats->collisions = oct->link_stats.fromhost.total_collisions; @@ -2324,7 +2313,7 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct, * @returns whether the packet was transmitted to the device okay or not * (NETDEV_TX_OK or NETDEV_TX_BUSY) */ -static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) { struct lio *lio; struct octnet_buf_free_info *finfo; @@ -2598,14 +2587,15 @@ static int liquidio_vlan_rx_add_vid(struct net_device *netdev, nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; @@ -2626,14 +2616,15 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -2659,15 +2650,16 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, nctrl.ncmd.s.cmd = command; nctrl.ncmd.s.param1 = rx_cmd; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -2695,15 +2687,16 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command, nctrl.ncmd.s.more = vxlan_cmd_bit; nctrl.ncmd.s.param1 = vxlan_port; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "VxLAN port add/delete failed in core (ret:0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -2826,6 +2819,7 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; + int ret = 0; if (!is_valid_ether_addr(mac)) return -EINVAL; @@ -2839,12 +2833,13 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; /* vfidx is 0 based, but vf_num (param1) is 1 based */ nctrl.ncmd.s.param1 = vfidx + 1; - nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0); nctrl.ncmd.s.more = 1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; - nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nctrl.wait_time = LIO_CMD_WAIT_TM; + if (is_admin_assigned) { + nctrl.ncmd.s.param2 = true; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + } nctrl.udd[0] = 0; /* The MAC Address is presented in network byte order. */ @@ -2852,9 +2847,11 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0]; - octnet_send_nic_ctrl_pkt(oct, &nctrl); + ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); + if (ret > 0) + ret = -EIO; - return 0; + return ret; } static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) @@ -2873,6 +2870,62 @@ static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) return retval; } +static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx, + bool enable) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int retval; + + if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) { + netif_info(lio, drv, lio->netdev, + "firmware does not support spoofchk\n"); + return -EOPNOTSUPP; + } + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { + netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); + return -EINVAL; + } + + if (enable) { + if (oct->sriov_info.vf_spoofchk[vfidx]) + return 0; + } else { + /* Clear */ + if (!oct->sriov_info.vf_spoofchk[vfidx]) + return 0; + } + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1; + nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK; + nctrl.ncmd.s.param1 = + vfidx + 1; /* vfidx is 0 based, + * but vf_num (param1) is 1 based + */ + nctrl.ncmd.s.param2 = enable; + nctrl.ncmd.s.more = 0; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.cb_fn = 0; + + retval = octnet_send_nic_ctrl_pkt(oct, &nctrl); + + if (retval) { + netif_info(lio, drv, lio->netdev, + "Failed to set VF %d spoofchk %s\n", vfidx, + enable ? "on" : "off"); + return -1; + } + + oct->sriov_info.vf_spoofchk[vfidx] = enable; + netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx, + enable ? "on" : "off"); + + return 0; +} + static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, u16 vlan, u8 qos, __be16 vlan_proto) { @@ -2880,6 +2933,7 @@ static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; u16 vlantci; + int ret = 0; if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) return -EINVAL; @@ -2911,13 +2965,17 @@ static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.cb_fn = NULL; - nctrl.wait_time = LIO_CMD_WAIT_TM; - octnet_send_nic_ctrl_pkt(oct, &nctrl); + ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); + if (ret) { + if (ret > 0) + ret = -EIO; + return ret; + } oct->sriov_info.vf_vlantci[vfidx] = vlantci; - return 0; + return ret; } static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, @@ -2930,6 +2988,8 @@ static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) return -EINVAL; + memset(ivi, 0, sizeof(struct ifla_vf_info)); + ivi->vf = vfidx; macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx]; ether_addr_copy(&ivi->mac[0], macaddr); @@ -2941,33 +3001,22 @@ static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, else ivi->trusted = false; ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx]; - return 0; -} - -static void trusted_vf_callback(struct octeon_device *oct_dev, - u32 status, void *ptr) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; - struct lio_trusted_vf_ctx *ctx; + ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx]; + ivi->max_tx_rate = lio->linfo.link.s.speed; + ivi->min_tx_rate = 0; - ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr; - ctx->status = status; - - complete(&ctx->complete); + return 0; } static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) { struct octeon_device *oct = lio->oct_dev; - struct lio_trusted_vf_ctx *ctx; struct octeon_soft_command *sc; - int ctx_size, retval; - - ctx_size = sizeof(struct lio_trusted_vf_ctx); - sc = octeon_alloc_soft_command(oct, 0, 0, ctx_size); + int retval; - ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr; - init_completion(&ctx->complete); + sc = octeon_alloc_soft_command(oct, 0, 16, 0); + if (!sc) + return -ENOMEM; sc->iq_no = lio->linfo.txpciq[0].s.q_no; @@ -2976,23 +3025,21 @@ static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1, trusted); - sc->callback = trusted_vf_callback; - sc->callback_arg = sc; - sc->wait_time = 1000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct, sc); retval = -1; } else { /* Wait for response or timeout */ - if (wait_for_completion_timeout(&ctx->complete, - msecs_to_jiffies(2000))) - retval = ctx->status; - else - retval = -1; - } + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return (retval); - octeon_free_soft_command(oct, sc); + WRITE_ONCE(sc->caller_is_done, true); + } return retval; } @@ -3055,6 +3102,7 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; + int ret = 0; if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) return -EINVAL; @@ -3070,13 +3118,15 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.cb_fn = NULL; - nctrl.wait_time = LIO_CMD_WAIT_TM; - octnet_send_nic_ctrl_pkt(oct, &nctrl); + ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); - oct->sriov_info.vf_linkstate[vfidx] = linkstate; + if (!ret) + oct->sriov_info.vf_linkstate[vfidx] = linkstate; + else if (ret > 0) + ret = -EIO; - return 0; + return ret; } static int @@ -3094,7 +3144,8 @@ liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode) } static int -liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode) +liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) { struct lio_devlink_priv *priv; struct octeon_device *oct; @@ -3204,6 +3255,7 @@ static const struct net_device_ops lionetdevops = { .ndo_set_vf_mac = liquidio_set_vf_mac, .ndo_set_vf_vlan = liquidio_set_vf_vlan, .ndo_get_vf_config = liquidio_get_vf_config, + .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk, .ndo_set_vf_trust = liquidio_set_vf_trust, .ndo_set_vf_link_state = liquidio_set_vf_link_state, .ndo_get_vf_stats = liquidio_get_vf_stats, @@ -3307,7 +3359,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) unsigned long micro; u32 cur_ver; struct octeon_soft_command *sc; - struct liquidio_if_cfg_context *ctx; struct liquidio_if_cfg_resp *resp; struct octdev_props *props; int retval, num_iqueues, num_oqueues; @@ -3315,7 +3366,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) union oct_nic_if_cfg if_cfg; unsigned int base_queue; unsigned int gmx_port_id; - u32 resp_size, ctx_size, data_size; + u32 resp_size, data_size; u32 ifidx_or_pfnum; struct lio_version *vdata; struct devlink *devlink; @@ -3340,13 +3391,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) for (i = 0; i < octeon_dev->ifcount; i++) { resp_size = sizeof(struct liquidio_if_cfg_resp); - ctx_size = sizeof(struct liquidio_if_cfg_context); data_size = sizeof(struct lio_version); sc = (struct octeon_soft_command *) octeon_alloc_soft_command(octeon_dev, data_size, - resp_size, ctx_size); + resp_size, 0); resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; vdata = (struct lio_version *)sc->virtdptr; *((u64 *)vdata) = 0; @@ -3376,9 +3425,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_dbg(&octeon_dev->pci_dev->dev, "requesting config for interface %d, iqs %d, oqs %d\n", ifidx_or_pfnum, num_iqueues, num_oqueues); - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(octeon_dev); - init_waitqueue_head(&ctx->wc); if_cfg.u64 = 0; if_cfg.s.num_iqueues = num_iqueues; @@ -3392,9 +3438,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 0); - sc->callback = lio_if_cfg_callback; - sc->callback_arg = sc; - sc->wait_time = LIO_IFCFG_WAIT_TIME; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(octeon_dev, sc); if (retval == IQ_SEND_FAILED) { @@ -3402,22 +3447,26 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) "iq/oq config failed status: %x\n", retval); /* Soft instr is freed by driver in case of failure. */ - goto setup_nic_dev_fail; + octeon_free_soft_command(octeon_dev, sc); + return(-EIO); } /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { - dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n"); - goto setup_nic_wait_intr; - } + retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); + if (retval) + return retval; retval = resp->status; if (retval) { dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } + snprintf(octeon_dev->fw_info.liquidio_firmware_version, + 32, "%s", + resp->cfg_info.liquidio_firmware_version); /* Verify f/w version (in case of 'auto' loading from flash) */ fw_ver = octeon_dev->fw_info.liquidio_firmware_version; @@ -3427,7 +3476,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_err(&octeon_dev->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n", LIQUIDIO_BASE_VERSION, fw_ver); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } else if (atomic_read(octeon_dev->adapter_fw_state) == FW_IS_PRELOADED) { dev_info(&octeon_dev->pci_dev->dev, @@ -3454,7 +3504,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", resp->cfg_info.iqmask, resp->cfg_info.oqmask); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } if (OCTEON_CN6XXX(octeon_dev)) { @@ -3473,7 +3524,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (!netdev) { dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); @@ -3488,14 +3540,16 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (retval) { dev_err(&octeon_dev->pci_dev->dev, "setting real number rx failed\n"); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_free; } retval = netif_set_real_num_tx_queues(netdev, num_iqueues); if (retval) { dev_err(&octeon_dev->pci_dev->dev, "setting real number tx failed\n"); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_free; } lio = GET_LIO(netdev); @@ -3522,6 +3576,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; + WRITE_ONCE(sc->caller_is_done, true); + lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); if (OCTEON_CN23XX_PF(octeon_dev) || @@ -3588,7 +3644,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_err(&octeon_dev->pci_dev->dev, "Error setting VF%d MAC address\n", j); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } } @@ -3610,7 +3666,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->linfo.num_txpciq, lio->linfo.num_rxpciq)) { dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); @@ -3621,7 +3677,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } /* Register ethtool support */ @@ -3643,20 +3699,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OCTNET_CMD_VERBOSE_ENABLE, 0); if (setup_link_status_change_wq(netdev)) - goto setup_nic_dev_fail; + goto setup_nic_dev_free; if ((octeon_dev->fw_info.app_cap_flags & LIQUIDIO_TIME_SYNC_CAP) && setup_sync_octeon_time_wq(netdev)) - goto setup_nic_dev_fail; + goto setup_nic_dev_free; if (setup_rx_oom_poll_fn(netdev)) - goto setup_nic_dev_fail; + goto setup_nic_dev_free; /* Register the network device with the OS */ if (register_netdev(netdev)) { dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } dev_dbg(&octeon_dev->pci_dev->dev, @@ -3679,8 +3735,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_dbg(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup successful\n", i); - octeon_free_soft_command(octeon_dev, sc); - if (octeon_dev->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || octeon_dev->subsystem_id == @@ -3709,13 +3763,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) } octeon_dev->speed_boot = octeon_dev->speed_setting; + /* don't read FEC setting if unsupported by f/w (see above) */ + if (octeon_dev->speed_boot == 25 && + !octeon_dev->no_speed_setting) { + liquidio_get_fec(lio); + octeon_dev->props[lio->ifidx].fec_boot = + octeon_dev->props[lio->ifidx].fec; + } } devlink = devlink_alloc(&liquidio_devlink_ops, sizeof(struct lio_devlink_priv)); if (!devlink) { dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); - goto setup_nic_wait_intr; + goto setup_nic_dev_free; } lio_devlink = devlink_priv(devlink); @@ -3725,7 +3786,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) devlink_free(devlink); dev_err(&octeon_dev->pci_dev->dev, "devlink registration failed\n"); - goto setup_nic_wait_intr; + goto setup_nic_dev_free; } octeon_dev->devlink = devlink; @@ -3733,17 +3794,16 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) return 0; -setup_nic_dev_fail: - - octeon_free_soft_command(octeon_dev, sc); - -setup_nic_wait_intr: +setup_nic_dev_free: while (i--) { dev_err(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup failed\n", i); liquidio_destroy_nic_device(octeon_dev, i); } + +setup_nic_dev_done: + return -ENODEV; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index b77835724dc8..54b245797d2e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -40,14 +40,6 @@ MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) -struct liquidio_rx_ctl_context { - int octeon_id; - - wait_queue_head_t wc; - - int cond; -}; - struct oct_timestamp_resp { u64 rh; u64 timestamp; @@ -452,6 +444,8 @@ static void octeon_pci_flr(struct octeon_device *oct) */ static void octeon_destroy_resources(struct octeon_device *oct) { + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct msix_entry *msix_entries; int i; @@ -471,12 +465,12 @@ static void octeon_destroy_resources(struct octeon_device *oct) case OCT_DEV_HOST_OK: /* fallthrough */ case OCT_DEV_IO_QUEUES_DONE: - if (wait_for_pending_requests(oct)) - dev_err(&oct->pci_dev->dev, "There were pending requests\n"); - if (lio_wait_for_instr_fetch(oct)) dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + /* Disable the input and output queues now. No more packets will * arrive from Octeon, but we should wait for all packet * processing to finish. @@ -485,7 +479,33 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (lio_wait_for_oq_pkts(oct)) dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); - /* fall through */ + + /* Force all requests waiting to be fetched by OCTEON to + * complete. + */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + struct octeon_instr_queue *iq; + + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + iq = oct->instr_queue[i]; + + if (atomic_read(&iq->instr_pending)) { + spin_lock_bh(&iq->lock); + iq->fill_cnt = 0; + iq->octeon_read_index = iq->host_write_index; + iq->stats.instr_processed += + atomic_read(&iq->instr_pending); + lio_process_iq_request_list(oct, iq, 0); + spin_unlock_bh(&iq->lock); + } + } + + lio_process_ordered_list(oct, 1); + octeon_free_sc_done_list(oct); + octeon_free_sc_zombie_list(oct); + + /* fall through */ case OCT_DEV_INTR_SET_DONE: /* Disable interrupts */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); @@ -569,33 +589,8 @@ static void octeon_destroy_resources(struct octeon_device *oct) /* Nothing to be done here either */ break; } -} - -/** - * \brief Callback for rx ctrl - * @param status status of request - * @param buf pointer to resp structure - */ -static void rx_ctl_callback(struct octeon_device *oct, - u32 status, void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_rx_ctl_context *ctx; - - ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; - oct = lio_get_device(ctx->octeon_id); - if (status) - dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n", - CVM_CAST64(status)); - WRITE_ONCE(ctx->cond, 1); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); + tasklet_kill(&oct_priv->droq_tasklet); } /** @@ -606,8 +601,6 @@ static void rx_ctl_callback(struct octeon_device *oct, static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) { struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; - int ctx_size = sizeof(struct liquidio_rx_ctl_context); - struct liquidio_rx_ctl_context *ctx; struct octeon_soft_command *sc; union octnet_cmd *ncmd; int retval; @@ -617,14 +610,9 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, - 16, ctx_size); + 16, 0); ncmd = (union octnet_cmd *)sc->virtdptr; - ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; - - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct); - init_waitqueue_head(&ctx->wc); ncmd->u64 = 0; ncmd->s.cmd = OCTNET_CMD_RX_CTL; @@ -637,23 +625,24 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); - sc->callback = rx_ctl_callback; - sc->callback_arg = sc; - sc->wait_time = 5000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); + octeon_free_soft_command(oct, sc); } else { /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) return; + oct->props[lio->ifidx].rx_on = start_stop; + WRITE_ONCE(sc->caller_is_done, true); } - - octeon_free_soft_command(oct, sc); } /** @@ -667,6 +656,8 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) { struct net_device *netdev = oct->props[ifidx].netdev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; struct lio *lio; @@ -696,6 +687,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) netif_napi_del(napi); + tasklet_enable(&oct_priv->droq_tasklet); + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); @@ -913,9 +906,13 @@ static int liquidio_open(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; if (!oct->props[lio->ifidx].napi_enabled) { + tasklet_disable(&oct_priv->droq_tasklet); + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) napi_enable(napi); @@ -932,6 +929,11 @@ static int liquidio_open(struct net_device *netdev) netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); start_txqs(netdev); + INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); + lio->stats_wk.ctxptr = lio; + schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies + (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); + /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); @@ -948,6 +950,8 @@ static int liquidio_stop(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; /* tell Octeon to stop forwarding packets to host */ @@ -977,8 +981,12 @@ static int liquidio_stop(struct net_device *netdev) oct->props[lio->ifidx].napi_enabled = 0; oct->droq[0]->ops.poll_mode = 0; + + tasklet_enable(&oct_priv->droq_tasklet); } + cancel_delayed_work_sync(&lio->stats_wk.work); + dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); return 0; @@ -1093,10 +1101,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev) /* Apparently, any activity in this call from the kernel has to * be atomic. So we won't wait for response. */ - nctrl.wait_time = 0; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", ret); } @@ -1133,8 +1140,6 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) nctrl.ncmd.s.more = 1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; - nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nctrl.wait_time = 100; nctrl.udd[0] = 0; /* The MAC Address is presented in network byte order. */ @@ -1145,6 +1150,13 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); return -ENOMEM; } + + if (nctrl.sc_status == + FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) { + dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n"); + return -EPERM; + } + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); @@ -1198,7 +1210,6 @@ liquidio_get_stats64(struct net_device *netdev, lstats->rx_packets = pkts; lstats->rx_dropped = drop; - octnet_get_link_stats(netdev); lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; /* detailed rx_errors: */ @@ -1390,7 +1401,7 @@ static int send_nic_timestamp_pkt(struct octeon_device *oct, * @returns whether the packet was transmitted to the device okay or not * (NETDEV_TX_OK or NETDEV_TX_BUSY) */ -static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) { struct octnet_buf_free_info *finfo; union octnic_cmd_setup cmdsetup; @@ -1638,8 +1649,6 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev, struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; - struct completion compl; - u16 response_code; int ret = 0; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); @@ -1648,26 +1657,15 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev, nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - init_completion(&compl); - nctrl.completion = &compl; - nctrl.response_code = &response_code; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", ret); - return -EIO; - } - - if (!wait_for_completion_timeout(&compl, - msecs_to_jiffies(nctrl.wait_time))) - return -EPERM; - - if (READ_ONCE(response_code)) return -EPERM; + } return 0; } @@ -1687,14 +1685,15 @@ liquidio_vlan_rx_kill_vid(struct net_device *netdev, nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -1720,14 +1719,15 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, nctrl.ncmd.s.cmd = command; nctrl.ncmd.s.param1 = rx_cmd; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -1755,15 +1755,16 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command, nctrl.ncmd.s.more = vxlan_cmd_bit; nctrl.ncmd.s.param1 = vxlan_port; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -1924,8 +1925,7 @@ nic_info_err: static int setup_nic_devices(struct octeon_device *octeon_dev) { int retval, num_iqueues, num_oqueues; - struct liquidio_if_cfg_context *ctx; - u32 resp_size, ctx_size, data_size; + u32 resp_size, data_size; struct liquidio_if_cfg_resp *resp; struct octeon_soft_command *sc; union oct_nic_if_cfg if_cfg; @@ -1956,13 +1956,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) for (i = 0; i < octeon_dev->ifcount; i++) { resp_size = sizeof(struct liquidio_if_cfg_resp); - ctx_size = sizeof(struct liquidio_if_cfg_context); data_size = sizeof(struct lio_version); sc = (struct octeon_soft_command *) octeon_alloc_soft_command(octeon_dev, data_size, - resp_size, ctx_size); + resp_size, 0); resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; vdata = (struct lio_version *)sc->virtdptr; *((u64 *)vdata) = 0; @@ -1970,10 +1968,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(octeon_dev); - init_waitqueue_head(&ctx->wc); - if_cfg.u64 = 0; if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; @@ -1986,32 +1980,37 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 0); - sc->callback = lio_if_cfg_callback; - sc->callback_arg = sc; - sc->wait_time = 5000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(octeon_dev, sc); if (retval == IQ_SEND_FAILED) { dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed status: %x\n", retval); /* Soft instr is freed by driver in case of failure. */ - goto setup_nic_dev_fail; + octeon_free_soft_command(octeon_dev, sc); + return(-EIO); } /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { - dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n"); - goto setup_nic_wait_intr; - } + retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); + if (retval) + return retval; retval = resp->status; if (retval) { - dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); - goto setup_nic_dev_fail; + dev_err(&octeon_dev->pci_dev->dev, + "iq/oq config failed, retval = %d\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -EIO; } + snprintf(octeon_dev->fw_info.liquidio_firmware_version, + 32, "%s", + resp->cfg_info.liquidio_firmware_version); + octeon_swap_8B_data((u64 *)(&resp->cfg_info), (sizeof(struct liquidio_if_cfg_info)) >> 3); @@ -2022,7 +2021,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_err(&octeon_dev->pci_dev->dev, "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", resp->cfg_info.iqmask, resp->cfg_info.oqmask); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } dev_dbg(&octeon_dev->pci_dev->dev, "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", @@ -2033,7 +2033,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (!netdev) { dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); @@ -2070,6 +2071,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; lio->linfo.macaddr_is_admin_asgnd = resp->cfg_info.linfo.macaddr_is_admin_asgnd; + lio->linfo.macaddr_spoofchk = + resp->cfg_info.linfo.macaddr_spoofchk; lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); @@ -2109,6 +2112,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) netdev->min_mtu = LIO_MIN_MTU_SIZE; netdev->max_mtu = LIO_MAX_MTU_SIZE; + WRITE_ONCE(sc->caller_is_done, true); + /* Point to the properties for octeon device to which this * interface belongs. */ @@ -2132,7 +2137,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->linfo.num_txpciq, lio->linfo.num_rxpciq)) { dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); @@ -2155,7 +2160,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } /* Register ethtool support */ @@ -2170,15 +2175,15 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OCTNIC_LROIPV4 | OCTNIC_LROIPV6); if (setup_link_status_change_wq(netdev)) - goto setup_nic_dev_fail; + goto setup_nic_dev_free; if (setup_rx_oom_poll_fn(netdev)) - goto setup_nic_dev_fail; + goto setup_nic_dev_free; /* Register the network device with the OS */ if (register_netdev(netdev)) { dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } dev_dbg(&octeon_dev->pci_dev->dev, @@ -2201,24 +2206,21 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_dbg(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup successful\n", i); - octeon_free_soft_command(octeon_dev, sc); - octeon_dev->no_speed_setting = 1; } return 0; -setup_nic_dev_fail: - - octeon_free_soft_command(octeon_dev, sc); - -setup_nic_wait_intr: +setup_nic_dev_free: while (i--) { dev_err(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup failed\n", i); liquidio_destroy_nic_device(octeon_dev, i); } + +setup_nic_dev_done: + return -ENODEV; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index ddd7431579f4..ea9859e028d4 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -27,11 +27,11 @@ #include "octeon_network.h" #include <net/switchdev.h> #include "lio_vf_rep.h" -#include "octeon_network.h" static int lio_vf_rep_open(struct net_device *ndev); static int lio_vf_rep_stop(struct net_device *ndev); -static int lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev); +static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb, + struct net_device *ndev); static void lio_vf_rep_tx_timeout(struct net_device *netdev); static int lio_vf_rep_phys_port_name(struct net_device *dev, char *buf, size_t len); @@ -49,44 +49,25 @@ static const struct net_device_ops lio_vf_rep_ndev_ops = { .ndo_change_mtu = lio_vf_rep_change_mtu, }; -static void -lio_vf_rep_send_sc_complete(struct octeon_device *oct, - u32 status, void *ptr) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; - struct lio_vf_rep_sc_ctx *ctx = - (struct lio_vf_rep_sc_ctx *)sc->ctxptr; - struct lio_vf_rep_resp *resp = - (struct lio_vf_rep_resp *)sc->virtrptr; - - if (status != OCTEON_REQUEST_TIMEOUT && READ_ONCE(resp->status)) - WRITE_ONCE(resp->status, 0); - - complete(&ctx->complete); -} - static int lio_vf_rep_send_soft_command(struct octeon_device *oct, void *req, int req_size, void *resp, int resp_size) { int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size; - int ctx_size = sizeof(struct lio_vf_rep_sc_ctx); struct octeon_soft_command *sc = NULL; struct lio_vf_rep_resp *rep_resp; - struct lio_vf_rep_sc_ctx *ctx; void *sc_req; int err; sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct, req_size, - tot_resp_size, ctx_size); + tot_resp_size, 0); if (!sc) return -ENOMEM; - ctx = (struct lio_vf_rep_sc_ctx *)sc->ctxptr; - memset(ctx, 0, ctx_size); - init_completion(&ctx->complete); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; sc_req = (struct lio_vf_rep_req *)sc->virtdptr; memcpy(sc_req, req, req_size); @@ -98,23 +79,24 @@ lio_vf_rep_send_soft_command(struct octeon_device *oct, sc->iq_no = 0; octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_CMD, 0, 0, 0); - sc->callback = lio_vf_rep_send_sc_complete; - sc->callback_arg = sc; - sc->wait_time = LIO_VF_REP_REQ_TMO_MS; err = octeon_send_soft_command(oct, sc); if (err == IQ_SEND_FAILED) goto free_buff; - wait_for_completion_timeout(&ctx->complete, - msecs_to_jiffies - (2 * LIO_VF_REP_REQ_TMO_MS)); + err = wait_for_sc_completion_timeout(oct, sc, 0); + if (err) + return err; + err = READ_ONCE(rep_resp->status) ? -EBUSY : 0; if (err) dev_err(&oct->pci_dev->dev, "VF rep send config failed\n"); - - if (resp) + else if (resp) memcpy(resp, (rep_resp + 1), resp_size); + + WRITE_ONCE(sc->caller_is_done, true); + return err; + free_buff: octeon_free_soft_command(oct, sc); @@ -380,7 +362,7 @@ lio_vf_rep_packet_sent_callback(struct octeon_device *oct, netif_wake_queue(ndev); } -static int +static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) { struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev); @@ -404,7 +386,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) } sc = (struct octeon_soft_command *) - octeon_alloc_soft_command(oct, 0, 0, 0); + octeon_alloc_soft_command(oct, 0, 16, 0); if (!sc) { dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n"); goto xmit_failed; @@ -413,6 +395,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) /* Multiple buffers are not used for vf_rep packets. */ if (skb_shinfo(skb)->nr_frags != 0) { dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n"); + octeon_free_soft_command(oct, sc); goto xmit_failed; } @@ -420,6 +403,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) { dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n"); + octeon_free_soft_command(oct, sc); goto xmit_failed; } @@ -440,6 +424,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) if (status == IQ_SEND_FAILED) { dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr, sc->datasize, DMA_TO_DEVICE); + octeon_free_soft_command(oct, sc); goto xmit_failed; } diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 7407fcd338e9..a5e0e9f17959 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -118,6 +118,10 @@ enum octeon_tag_type { /* App specific capabilities from firmware to pf driver */ #define LIQUIDIO_TIME_SYNC_CAP 0x1 #define LIQUIDIO_SWITCHDEV_CAP 0x2 +#define LIQUIDIO_SPOOFCHK_CAP 0x4 + +/* error status return from firmware */ +#define OCTEON_REQUEST_NO_PERMISSION 0xc static inline u32 incr_index(u32 index, u32 count, u32 max) { @@ -241,6 +245,10 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_QUEUE_COUNT_CTL 0x1f +#define OCTNET_CMD_GROUP1 1 +#define OCTNET_CMD_SET_VF_SPOOFCHK 0x1 +#define OCTNET_GROUP1_LAST_CMD OCTNET_CMD_SET_VF_SPOOFCHK + #define OCTNET_CMD_VXLAN_PORT_ADD 0x0 #define OCTNET_CMD_VXLAN_PORT_DEL 0x1 #define OCTNET_CMD_RXCSUM_ENABLE 0x0 @@ -250,9 +258,18 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1 #define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0 +#define OCTNET_CMD_FAIL 0x1 + +#define SEAPI_CMD_FEC_SET 0x0 +#define SEAPI_CMD_FEC_SET_DISABLE 0x0 +#define SEAPI_CMD_FEC_SET_RS 0x1 +#define SEAPI_CMD_FEC_GET 0x1 + #define SEAPI_CMD_SPEED_SET 0x2 #define SEAPI_CMD_SPEED_GET 0x3 +#define OPCODE_NIC_VF_PORT_STATS 0x22 + #define LIO_CMD_WAIT_TM 100 /* RX(packets coming from wire) Checksum verification flags */ @@ -301,7 +318,8 @@ union octnet_cmd { u64 more:6; /* How many udd words follow the command */ - u64 reserved:29; + u64 cmdgroup:8; + u64 reserved:21; u64 param1:16; @@ -313,7 +331,8 @@ union octnet_cmd { u64 param1:16; - u64 reserved:29; + u64 reserved:21; + u64 cmdgroup:8; u64 more:6; @@ -757,13 +776,17 @@ struct oct_link_info { #ifdef __BIG_ENDIAN_BITFIELD u64 gmxport:16; u64 macaddr_is_admin_asgnd:1; - u64 rsvd:31; + u64 rsvd:13; + u64 macaddr_spoofchk:1; + u64 rsvd1:17; u64 num_txpciq:8; u64 num_rxpciq:8; #else u64 num_rxpciq:8; u64 num_txpciq:8; - u64 rsvd:31; + u64 rsvd1:17; + u64 macaddr_spoofchk:1; + u64 rsvd:13; u64 macaddr_is_admin_asgnd:1; u64 gmxport:16; #endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index ceac74388e09..24c212001212 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -438,9 +438,10 @@ struct octeon_config { #define MAX_BAR1_IOREMAP_SIZE (16 * OCTEON_BAR1_ENTRY_SIZE) /* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking + * 1 process done list, 1 zombie lists(timeouted sc list) * NoResponse Lists are now maintained with each IQ. (Dec' 2007). */ -#define MAX_RESPONSE_LISTS 4 +#define MAX_RESPONSE_LISTS 6 /* Opcode hash bits. The opcode is hashed on the lower 6-bits to lookup the * dispatch table. diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index f878a552fef3..ce8c3f818666 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1044,8 +1044,7 @@ void octeon_delete_dispatch_list(struct octeon_device *oct) dispatch = &oct->dispatch.dlist[i].list; while (dispatch->next != dispatch) { temp = dispatch->next; - list_del(temp); - list_add_tail(temp, &freelist); + list_move_tail(temp, &freelist); } oct->dispatch.dlist[i].opcode = 0; @@ -1440,18 +1439,15 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) /* the whole thing needs to be atomic, ideally */ if (droq) { pkts_pend = (u32)atomic_read(&droq->pkts_pending); - spin_lock_bh(&droq->lock); writel(droq->pkt_count - pkts_pend, droq->pkts_sent_reg); droq->pkt_count = pkts_pend; - /* this write needs to be flushed before we release the lock */ - mmiowb(); - spin_unlock_bh(&droq->lock); oct = droq->oct_dev; } if (iq) { spin_lock_bh(&iq->lock); - writel(iq->pkt_in_done, iq->inst_cnt_reg); - iq->pkt_in_done = 0; + writel(iq->pkts_processed, iq->inst_cnt_reg); + iq->pkt_in_done -= iq->pkts_processed; + iq->pkts_processed = 0; /* this write needs to be flushed before we release the lock */ mmiowb(); spin_unlock_bh(&iq->lock); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index d99ca6ba23a4..3d01d3602d8f 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -316,6 +316,8 @@ struct octdev_props { * device pointer (used for OS specific calls). */ int rx_on; + int fec; + int fec_boot; int napi_enabled; int gmxport; struct net_device *netdev; @@ -397,6 +399,8 @@ struct octeon_sriov_info { int vf_linkstate[MAX_POSSIBLE_VFS]; + bool vf_spoofchk[MAX_POSSIBLE_VFS]; + u64 vf_drv_loaded_mask; }; @@ -607,6 +611,9 @@ struct octeon_device { u8 speed_boot; u8 speed_setting; u8 no_speed_setting; + + u32 vfstats_poll; +#define LIO_VFSTATS_POLL 10 }; #define OCT_DRV_ONLINE 1 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index a71dbb7ab6af..a0c099f71524 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -301,8 +301,6 @@ int octeon_init_droq(struct octeon_device *oct, dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n", droq->max_empty_descs); - spin_lock_init(&droq->lock); - INIT_LIST_HEAD(&droq->dispatch_list); /* For 56xx Pass1, this function won't be called, so no checks. */ @@ -333,8 +331,6 @@ init_droq_fail: * Returns: * Success: Pointer to recv_info_t * Failure: NULL. - * Locks: - * The droq->lock is held when this routine is called. */ static inline struct octeon_recv_info *octeon_create_recv_info( struct octeon_device *octeon_dev, @@ -433,8 +429,6 @@ octeon_droq_refill_pullup_descs(struct octeon_droq *droq, * up buffers (that were not dispatched) to form a contiguous ring. * Returns: * No of descriptors refilled. - * Locks: - * This routine is called with droq->lock held. */ static u32 octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) @@ -449,8 +443,7 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) while (droq->refill_count && (desc_refilled < droq->max_count)) { /* If a valid buffer exists (happens if there is no dispatch), - * reuse - * the buffer, else allocate. + * reuse the buffer, else allocate. */ if (!droq->recv_buf_list[droq->refill_idx].buffer) { pg_info = @@ -503,34 +496,37 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) /** check if we can allocate packets to get out of oom. * @param droq - Droq being checked. - * @return does not return anything + * @return 1 if fails to refill minimum */ -void octeon_droq_check_oom(struct octeon_droq *droq) +int octeon_retry_droq_refill(struct octeon_droq *droq) { - int desc_refilled; struct octeon_device *oct = droq->oct_dev; + int desc_refilled, reschedule = 1; + u32 pkts_credit; + + pkts_credit = readl(droq->pkts_credit_reg); + desc_refilled = octeon_droq_refill(oct, droq); + if (desc_refilled) { + /* Flush the droq descriptor data to memory to be sure + * that when we update the credits the data in memory + * is accurate. + */ + wmb(); + writel(desc_refilled, droq->pkts_credit_reg); + /* make sure mmio write completes */ + mmiowb(); - if (readl(droq->pkts_credit_reg) <= CN23XX_SLI_DEF_BP) { - spin_lock_bh(&droq->lock); - desc_refilled = octeon_droq_refill(oct, droq); - if (desc_refilled) { - /* Flush the droq descriptor data to memory to be sure - * that when we update the credits the data in memory - * is accurate. - */ - wmb(); - writel(desc_refilled, droq->pkts_credit_reg); - /* make sure mmio write completes */ - mmiowb(); - } - spin_unlock_bh(&droq->lock); + if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP) + reschedule = 0; } + + return reschedule; } static inline u32 octeon_droq_get_bufcount(u32 buf_size, u32 total_len) { - return ((total_len + buf_size - 1) / buf_size); + return DIV_ROUND_UP(total_len, buf_size); } static int @@ -603,9 +599,9 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, struct octeon_droq *droq, u32 pkts_to_process) { + u32 pkt, total_len = 0, pkt_count, retval; struct octeon_droq_info *info; union octeon_rh *rh; - u32 pkt, total_len = 0, pkt_count; pkt_count = pkts_to_process; @@ -709,30 +705,43 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, if (droq->refill_count >= droq->refill_threshold) { int desc_refilled = octeon_droq_refill(oct, droq); - /* Flush the droq descriptor data to memory to be sure - * that when we update the credits the data in memory - * is accurate. - */ - wmb(); - writel((desc_refilled), droq->pkts_credit_reg); - /* make sure mmio write completes */ - mmiowb(); + if (desc_refilled) { + /* Flush the droq descriptor data to memory to + * be sure that when we update the credits the + * data in memory is accurate. + */ + wmb(); + writel(desc_refilled, droq->pkts_credit_reg); + /* make sure mmio write completes */ + mmiowb(); + } } - } /* for (each packet)... */ /* Increment refill_count by the number of buffers processed. */ droq->stats.pkts_received += pkt; droq->stats.bytes_received += total_len; + retval = pkt; if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) { octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt)); droq->stats.dropped_toomany += (pkts_to_process - pkt); - return pkts_to_process; + retval = pkts_to_process; + } + + atomic_sub(retval, &droq->pkts_pending); + + if (droq->refill_count >= droq->refill_threshold && + readl(droq->pkts_credit_reg) < CN23XX_SLI_DEF_BP) { + octeon_droq_check_hw_for_pkts(droq); + + /* Make sure there are no pkts_pending */ + if (!atomic_read(&droq->pkts_pending)) + octeon_schedule_rxq_oom_work(oct, droq); } - return pkt; + return retval; } int @@ -740,29 +749,19 @@ octeon_droq_process_packets(struct octeon_device *oct, struct octeon_droq *droq, u32 budget) { - u32 pkt_count = 0, pkts_processed = 0; + u32 pkt_count = 0; struct list_head *tmp, *tmp2; - /* Grab the droq lock */ - spin_lock(&droq->lock); - octeon_droq_check_hw_for_pkts(droq); pkt_count = atomic_read(&droq->pkts_pending); - if (!pkt_count) { - spin_unlock(&droq->lock); + if (!pkt_count) return 0; - } if (pkt_count > budget) pkt_count = budget; - pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count); - - atomic_sub(pkts_processed, &droq->pkts_pending); - - /* Release the spin lock */ - spin_unlock(&droq->lock); + octeon_droq_fast_process_packets(oct, droq, pkt_count); list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { struct __dispatch *rdisp = (struct __dispatch *)tmp; @@ -798,8 +797,6 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct, if (budget > droq->max_count) budget = droq->max_count; - spin_lock(&droq->lock); - while (total_pkts_processed < budget) { octeon_droq_check_hw_for_pkts(droq); @@ -813,13 +810,9 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct, octeon_droq_fast_process_packets(oct, droq, pkts_available); - atomic_sub(pkts_processed, &droq->pkts_pending); - total_pkts_processed += pkts_processed; } - spin_unlock(&droq->lock); - list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { struct __dispatch *rdisp = (struct __dispatch *)tmp; @@ -879,9 +872,8 @@ octeon_enable_irq(struct octeon_device *oct, u32 q_no) int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, struct octeon_droq_ops *ops) { - struct octeon_droq *droq; - unsigned long flags; struct octeon_config *oct_cfg = NULL; + struct octeon_droq *droq; oct_cfg = octeon_get_conf(oct); @@ -901,21 +893,15 @@ int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, } droq = oct->droq[q_no]; - - spin_lock_irqsave(&droq->lock, flags); - memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops)); - spin_unlock_irqrestore(&droq->lock, flags); - return 0; } int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) { - unsigned long flags; - struct octeon_droq *droq; struct octeon_config *oct_cfg = NULL; + struct octeon_droq *droq; oct_cfg = octeon_get_conf(oct); @@ -936,14 +922,10 @@ int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) return 0; } - spin_lock_irqsave(&droq->lock, flags); - droq->ops.fptr = NULL; droq->ops.farg = NULL; droq->ops.drop_on_max = 0; - spin_unlock_irqrestore(&droq->lock, flags); - return 0; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h index f28f262d4ab6..c9b19e624dce 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h @@ -245,9 +245,6 @@ struct octeon_droq_ops { * Octeon DROQ. */ struct octeon_droq { - /** A spinlock to protect access to this ring. */ - spinlock_t lock; - u32 q_no; u32 pkt_count; @@ -414,6 +411,6 @@ int octeon_droq_process_poll_pkts(struct octeon_device *oct, int octeon_enable_irq(struct octeon_device *oct, u32 q_no); -void octeon_droq_check_oom(struct octeon_droq *droq); +int octeon_retry_droq_refill(struct octeon_droq *droq); #endif /*__OCTEON_DROQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index 2327062e8af6..bebf3bd349c6 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -94,6 +94,8 @@ struct octeon_instr_queue { u32 pkt_in_done; + u32 pkts_processed; + /** A spinlock to protect access to the input ring.*/ spinlock_t iq_flush_running_lock; @@ -290,13 +292,19 @@ struct octeon_soft_command { u32 ctxsize; /** Time out and callback */ - size_t wait_time; - size_t timeout; + size_t expiry_time; u32 iq_no; void (*callback)(struct octeon_device *, u32, void *); void *callback_arg; + + int caller_is_done; + u32 sc_status; + struct completion complete; }; +/* max timeout (in milli sec) for soft request */ +#define LIO_SC_MAX_TMO_MS 60000 + /** Maximum number of buffers to allocate into soft command buffer pool */ #define MAX_SOFT_COMMAND_BUFFERS 256 @@ -317,6 +325,8 @@ struct octeon_sc_buffer_pool { (((octeon_dev_ptr)->instr_queue[iq_no]->stats.field) += count) int octeon_setup_sc_buffer_pool(struct octeon_device *oct); +int octeon_free_sc_done_list(struct octeon_device *oct); +int octeon_free_sc_zombie_list(struct octeon_device *oct); int octeon_free_sc_buffer_pool(struct octeon_device *oct); struct octeon_soft_command * octeon_alloc_soft_command(struct octeon_device *oct, @@ -368,6 +378,9 @@ int octeon_send_command(struct octeon_device *oct, u32 iq_no, u32 force_db, void *cmd, void *buf, u32 datasize, u32 reqtype); +void octeon_dump_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc); + void octeon_prepare_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc, u8 opcode, u8 subcode, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index c846eec11a45..073d0647b439 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -70,6 +70,10 @@ void octeon_update_tx_completion_counters(void *buf, int reqtype, void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl, unsigned int bytes_compl); void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac); + +void octeon_schedule_rxq_oom_work(struct octeon_device *oct, + struct octeon_droq *droq); + /** Swap 8B blocks */ static inline void octeon_swap_8B_data(u64 *data, u32 blocks) { @@ -146,46 +150,70 @@ err_release_region: return 1; } +/* input parameter: + * sc: pointer to a soft request + * timeout: milli sec which an application wants to wait for the + response of the request. + * 0: the request will wait until its response gets back + * from the firmware within LIO_SC_MAX_TMO_MS milli sec. + * It the response does not return within + * LIO_SC_MAX_TMO_MS milli sec, lio_process_ordered_list() + * will move the request to zombie response list. + * + * return value: + * 0: got the response from firmware for the sc request. + * errno -EINTR: user abort the command. + * errno -ETIME: user spefified timeout value has been expired. + * errno -EBUSY: the response of the request does not return in + * resonable time (LIO_SC_MAX_TMO_MS). + * the sc wll be move to zombie response list by + * lio_process_ordered_list() + * + * A request with non-zero return value, the sc->caller_is_done + * will be marked 1. + * When getting a request with zero return value, the requestor + * should mark sc->caller_is_done with 1 after examing the + * response of sc. + * lio_process_ordered_list() will free the soft command on behalf + * of the soft command requestor. + * This is to fix the possible race condition of both timeout process + * and lio_process_ordered_list()/callback function to free a + * sc strucutre. + */ static inline int -sleep_cond(wait_queue_head_t *wait_queue, int *condition) +wait_for_sc_completion_timeout(struct octeon_device *oct_dev, + struct octeon_soft_command *sc, + unsigned long timeout) { int errno = 0; - wait_queue_entry_t we; - - init_waitqueue_entry(&we, current); - add_wait_queue(wait_queue, &we); - while (!(READ_ONCE(*condition))) { - set_current_state(TASK_INTERRUPTIBLE); - if (signal_pending(current)) { - errno = -EINTR; - goto out; - } - schedule(); + long timeout_jiff; + + if (timeout) + timeout_jiff = msecs_to_jiffies(timeout); + else + timeout_jiff = MAX_SCHEDULE_TIMEOUT; + + timeout_jiff = + wait_for_completion_interruptible_timeout(&sc->complete, + timeout_jiff); + if (timeout_jiff == 0) { + dev_err(&oct_dev->pci_dev->dev, "%s: sc is timeout\n", + __func__); + WRITE_ONCE(sc->caller_is_done, true); + errno = -ETIME; + } else if (timeout_jiff == -ERESTARTSYS) { + dev_err(&oct_dev->pci_dev->dev, "%s: sc is interrupted\n", + __func__); + WRITE_ONCE(sc->caller_is_done, true); + errno = -EINTR; + } else if (sc->sc_status == OCTEON_REQUEST_TIMEOUT) { + dev_err(&oct_dev->pci_dev->dev, "%s: sc has fatal timeout\n", + __func__); + WRITE_ONCE(sc->caller_is_done, true); + errno = -EBUSY; } -out: - set_current_state(TASK_RUNNING); - remove_wait_queue(wait_queue, &we); - return errno; -} -/* Gives up the CPU for a timeout period. - * Check that the condition is not true before we go to sleep for a - * timeout period. - */ -static inline void -sleep_timeout_cond(wait_queue_head_t *wait_queue, - int *condition, - int timeout) -{ - wait_queue_entry_t we; - - init_waitqueue_entry(&we, current); - add_wait_queue(wait_queue, &we); - set_current_state(TASK_INTERRUPTIBLE); - if (!(*condition)) - schedule_timeout(timeout); - set_current_state(TASK_RUNNING); - remove_wait_queue(wait_queue, &we); + return errno; } #ifndef ROUNDUP4 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index d7a3916fe877..50201fc86dcf 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -35,12 +35,6 @@ #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 #define LIO_IFSTATE_RESETTING 0x10 -struct liquidio_if_cfg_context { - u32 octeon_id; - wait_queue_head_t wc; - int cond; -}; - struct liquidio_if_cfg_resp { u64 rh; struct liquidio_if_cfg_info cfg_info; @@ -48,6 +42,7 @@ struct liquidio_if_cfg_resp { }; #define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */ +#define LIQUIDIO_NDEV_STATS_POLL_TIME_MS 200 /* Structure of a node in list of gather components maintained by * NIC driver for each network device. @@ -76,6 +71,12 @@ struct oct_nic_stats_resp { u64 status; }; +struct oct_nic_vf_stats_resp { + u64 rh; + u64 spoofmac_cnt; + u64 status; +}; + struct oct_nic_stats_ctrl { struct completion complete; struct net_device *netdev; @@ -83,16 +84,13 @@ struct oct_nic_stats_ctrl { struct oct_nic_seapi_resp { u64 rh; - u32 speed; + union { + u32 fec_setting; + u32 speed; + }; u64 status; }; -struct liquidio_nic_seapi_ctl_context { - int octeon_id; - u32 status; - struct completion complete; -}; - /** LiquidIO per-interface network private data */ struct lio { /** State of the interface. Rx/Tx happens only in the RUNNING state. */ @@ -178,7 +176,7 @@ struct lio { struct cavium_wq txq_status_wq; /* work queue for rxq oom status */ - struct cavium_wq rxq_status_wq; + struct cavium_wq rxq_status_wq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES]; /* work queue for link status */ struct cavium_wq link_status_wq; @@ -187,6 +185,7 @@ struct lio { struct cavium_wq sync_octeon_time_wq; int netdev_uc_count; + struct cavium_wk stats_wk; }; #define LIO_SIZE (sizeof(struct lio)) @@ -225,7 +224,7 @@ irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)), int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs); -int octnet_get_link_stats(struct net_device *netdev); +void lio_fetch_stats(struct work_struct *work); int lio_wait_for_clean_oq(struct octeon_device *oct); /** @@ -234,16 +233,14 @@ int lio_wait_for_clean_oq(struct octeon_device *oct); */ void liquidio_set_ethtool_ops(struct net_device *netdev); -void lio_if_cfg_callback(struct octeon_device *oct, - u32 status __attribute__((unused)), - void *buf); - void lio_delete_glists(struct lio *lio); int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs); int liquidio_get_speed(struct lio *lio); int liquidio_set_speed(struct lio *lio, int speed); +int liquidio_get_fec(struct lio *lio); +int liquidio_set_fec(struct lio *lio, int on_off); /** * \brief Net device change_mtu diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c index 150609bd8849..1a706f81bbb0 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -75,8 +75,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, else sc->cmd.cmd2.rptr = sc->dmarptr; - sc->wait_time = 1000; - sc->timeout = jiffies + sc->wait_time; + sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS); return sc; } @@ -92,29 +91,6 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct, ndata->reqtype); } -static void octnet_link_ctrl_callback(struct octeon_device *oct, - u32 status, - void *sc_ptr) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)sc_ptr; - struct octnic_ctrl_pkt *nctrl; - - nctrl = (struct octnic_ctrl_pkt *)sc->ctxptr; - - /* Call the callback function if status is zero (meaning OK) or status - * contains a firmware status code bigger than zero (meaning the - * firmware is reporting an error). - * If no response was expected, status is OK if the command was posted - * successfully. - */ - if ((!status || status > FIRMWARE_STATUS_CODE(0)) && nctrl->cb_fn) { - nctrl->status = status; - nctrl->cb_fn(nctrl); - } - - octeon_free_soft_command(oct, sc); -} - static inline struct octeon_soft_command *octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct, struct octnic_ctrl_pkt *nctrl) @@ -127,17 +103,14 @@ static inline struct octeon_soft_command uddsize = (u32)(nctrl->ncmd.s.more * 8); datasize = OCTNET_CMD_SIZE + uddsize; - rdatasize = (nctrl->wait_time) ? 16 : 0; + rdatasize = 16; sc = (struct octeon_soft_command *) - octeon_alloc_soft_command(oct, datasize, rdatasize, - sizeof(struct octnic_ctrl_pkt)); + octeon_alloc_soft_command(oct, datasize, rdatasize, 0); if (!sc) return NULL; - memcpy(sc->ctxptr, nctrl, sizeof(struct octnic_ctrl_pkt)); - data = (u8 *)sc->virtdptr; memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE); @@ -154,9 +127,8 @@ static inline struct octeon_soft_command octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); - sc->callback = octnet_link_ctrl_callback; - sc->callback_arg = sc; - sc->wait_time = nctrl->wait_time; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; return sc; } @@ -199,5 +171,28 @@ octnet_send_nic_ctrl_pkt(struct octeon_device *oct, } spin_unlock_bh(&oct->cmd_resp_wqlock); + + if (nctrl->ncmd.s.cmdgroup == 0) { + switch (nctrl->ncmd.s.cmd) { + /* caller holds lock, can not sleep */ + case OCTNET_CMD_CHANGE_DEVFLAGS: + case OCTNET_CMD_SET_MULTI_LIST: + case OCTNET_CMD_SET_UC_LIST: + WRITE_ONCE(sc->caller_is_done, true); + return retval; + } + } + + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return (retval); + + nctrl->sc_status = sc->sc_status; + retval = nctrl->sc_status; + if (nctrl->cb_fn) + nctrl->cb_fn(nctrl); + + WRITE_ONCE(sc->caller_is_done, true); + return retval; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h index de4130d26a98..87dd6f89ce51 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h @@ -52,20 +52,13 @@ struct octnic_ctrl_pkt { /** Input queue to use to send this command. */ u64 iq_no; - /** Time to wait for Octeon software to respond to this control command. - * If wait_time is 0, OSI assumes no response is expected. - */ - size_t wait_time; - /** The network device that issued the control command. */ u64 netpndev; /** Callback function called when the command has been fetched */ octnic_ctrl_pkt_cb_fn_t cb_fn; - u32 status; - u16 *response_code; - struct completion *completion; + u32 sc_status; }; #define MAX_UDD_SIZE(nctrl) (sizeof((nctrl)->udd)) diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 8f746e1348d4..c6f4cbda040f 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -123,6 +123,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, iq->do_auto_flush = 1; iq->db_timeout = (u32)conf->db_timeout; atomic_set(&iq->instr_pending, 0); + iq->pkts_processed = 0; /* Initialize the spinlock for this instruction queue */ spin_lock_init(&iq->lock); @@ -379,7 +380,6 @@ lio_process_iq_request_list(struct octeon_device *oct, u32 inst_count = 0; unsigned int pkts_compl = 0, bytes_compl = 0; struct octeon_soft_command *sc; - struct octeon_instr_irh *irh; unsigned long flags; while (old != iq->octeon_read_index) { @@ -401,40 +401,21 @@ lio_process_iq_request_list(struct octeon_device *oct, case REQTYPE_RESP_NET: case REQTYPE_SOFT_COMMAND: sc = buf; - - if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) - irh = (struct octeon_instr_irh *) - &sc->cmd.cmd3.irh; - else - irh = (struct octeon_instr_irh *) - &sc->cmd.cmd2.irh; - if (irh->rflag) { - /* We're expecting a response from Octeon. - * It's up to lio_process_ordered_list() to - * process sc. Add sc to the ordered soft - * command response list because we expect - * a response from Octeon. - */ - spin_lock_irqsave - (&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock, - flags); - atomic_inc(&oct->response_list - [OCTEON_ORDERED_SC_LIST]. - pending_req_count); - list_add_tail(&sc->node, &oct->response_list - [OCTEON_ORDERED_SC_LIST].head); - spin_unlock_irqrestore - (&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock, - flags); - } else { - if (sc->callback) { - /* This callback must not sleep */ - sc->callback(oct, OCTEON_REQUEST_DONE, - sc->callback_arg); - } - } + /* We're expecting a response from Octeon. + * It's up to lio_process_ordered_list() to + * process sc. Add sc to the ordered soft + * command response list because we expect + * a response from Octeon. + */ + spin_lock_irqsave(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, flags); + atomic_inc(&oct->response_list + [OCTEON_ORDERED_SC_LIST].pending_req_count); + list_add_tail(&sc->node, &oct->response_list + [OCTEON_ORDERED_SC_LIST].head); + spin_unlock_irqrestore(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, + flags); break; default: dev_err(&oct->pci_dev->dev, @@ -459,7 +440,7 @@ lio_process_iq_request_list(struct octeon_device *oct, if (atomic_read(&oct->response_list [OCTEON_ORDERED_SC_LIST].pending_req_count)) - queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1)); + queue_work(cwq->wq, &cwq->wk.work.work); return inst_count; } @@ -495,6 +476,7 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, lio_process_iq_request_list(oct, iq, 0); if (inst_processed) { + iq->pkts_processed += inst_processed; atomic_sub(inst_processed, &iq->instr_pending); iq->stats.instr_processed += inst_processed; } @@ -753,8 +735,7 @@ int octeon_send_soft_command(struct octeon_device *oct, len = (u32)ih2->dlengsz; } - if (sc->wait_time) - sc->timeout = jiffies + sc->wait_time; + sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS); return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, len, REQTYPE_SOFT_COMMAND)); @@ -789,11 +770,76 @@ int octeon_setup_sc_buffer_pool(struct octeon_device *oct) return 0; } +int octeon_free_sc_done_list(struct octeon_device *oct) +{ + struct octeon_response_list *done_sc_list, *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST]; + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + + if (!atomic_read(&done_sc_list->pending_req_count)) + return 0; + + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &done_sc_list->head) { + sc = list_entry(tmp, struct octeon_soft_command, node); + + if (READ_ONCE(sc->caller_is_done)) { + list_del(&sc->node); + atomic_dec(&done_sc_list->pending_req_count); + + if (*sc->status_word == COMPLETION_WORD_INIT) { + /* timeout; move sc to zombie list */ + list_add_tail(&sc->node, &zombie_sc_list->head); + atomic_inc(&zombie_sc_list->pending_req_count); + } else { + octeon_free_soft_command(oct, sc); + } + } + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} + +int octeon_free_sc_zombie_list(struct octeon_device *oct) +{ + struct octeon_response_list *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) { + list_del(tmp); + atomic_dec(&zombie_sc_list->pending_req_count); + sc = list_entry(tmp, struct octeon_soft_command, node); + octeon_free_soft_command(oct, sc); + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} + int octeon_free_sc_buffer_pool(struct octeon_device *oct) { struct list_head *tmp, *tmp2; struct octeon_soft_command *sc; + octeon_free_sc_zombie_list(oct); + spin_lock_bh(&oct->sc_buf_pool.lock); list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) { @@ -822,6 +868,9 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc = NULL; struct list_head *tmp; + if (!rdatasize) + rdatasize = 16; + WARN_ON((offset + datasize + rdatasize + ctxsize) > SOFT_COMMAND_BUFFER_SIZE); diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c index fe5b53700576..ac7747ccf56a 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c @@ -69,6 +69,8 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, u32 status; u64 status64; + octeon_free_sc_done_list(octeon_dev); + ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST]; do { @@ -111,26 +113,88 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, } } } - } else if (force_quit || (sc->timeout && - time_after(jiffies, (unsigned long)sc->timeout))) { - dev_err(&octeon_dev->pci_dev->dev, "%s: cmd failed, timeout (%ld, %ld)\n", - __func__, (long)jiffies, (long)sc->timeout); + } else if (unlikely(force_quit) || (sc->expiry_time && + time_after(jiffies, (unsigned long)sc->expiry_time))) { + struct octeon_instr_irh *irh = + (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; + + dev_err(&octeon_dev->pci_dev->dev, "%s: ", __func__); + dev_err(&octeon_dev->pci_dev->dev, + "cmd %x/%x/%llx/%llx failed, ", + irh->opcode, irh->subcode, + sc->cmd.cmd3.ossp[0], sc->cmd.cmd3.ossp[1]); + dev_err(&octeon_dev->pci_dev->dev, + "timeout (%ld, %ld)\n", + (long)jiffies, (long)sc->expiry_time); status = OCTEON_REQUEST_TIMEOUT; } if (status != OCTEON_REQUEST_PENDING) { + sc->sc_status = status; + /* we have received a response or we have timed out */ /* remove node from linked list */ list_del(&sc->node); atomic_dec(&octeon_dev->response_list - [OCTEON_ORDERED_SC_LIST]. - pending_req_count); - spin_unlock_bh - (&ordered_sc_list->lock); + [OCTEON_ORDERED_SC_LIST]. + pending_req_count); + + if (!sc->callback) { + atomic_inc(&octeon_dev->response_list + [OCTEON_DONE_SC_LIST]. + pending_req_count); + list_add_tail(&sc->node, + &octeon_dev->response_list + [OCTEON_DONE_SC_LIST].head); + + if (unlikely(READ_ONCE(sc->caller_is_done))) { + /* caller does not wait for response + * from firmware + */ + if (status != OCTEON_REQUEST_DONE) { + struct octeon_instr_irh *irh; + + irh = + (struct octeon_instr_irh *) + &sc->cmd.cmd3.irh; + dev_dbg + (&octeon_dev->pci_dev->dev, + "%s: sc failed: opcode=%x, ", + __func__, irh->opcode); + dev_dbg + (&octeon_dev->pci_dev->dev, + "subcode=%x, ossp[0]=%llx, ", + irh->subcode, + sc->cmd.cmd3.ossp[0]); + dev_dbg + (&octeon_dev->pci_dev->dev, + "ossp[1]=%llx, status=%d\n", + sc->cmd.cmd3.ossp[1], + status); + } + } else { + complete(&sc->complete); + } + + spin_unlock_bh(&ordered_sc_list->lock); + } else { + /* sc with callback function */ + if (status == OCTEON_REQUEST_TIMEOUT) { + atomic_inc(&octeon_dev->response_list + [OCTEON_ZOMBIE_SC_LIST]. + pending_req_count); + list_add_tail(&sc->node, + &octeon_dev->response_list + [OCTEON_ZOMBIE_SC_LIST]. + head); + } + + spin_unlock_bh(&ordered_sc_list->lock); - if (sc->callback) sc->callback(octeon_dev, status, sc->callback_arg); + /* sc is freed by caller */ + } request_complete++; diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h index 9169c2815dba..ed4020d26fae 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.h +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h @@ -53,7 +53,9 @@ enum { OCTEON_ORDERED_LIST = 0, OCTEON_UNORDERED_NONBLOCKING_LIST = 1, OCTEON_UNORDERED_BLOCKING_LIST = 2, - OCTEON_ORDERED_SC_LIST = 3 + OCTEON_ORDERED_SC_LIST = 3, + OCTEON_DONE_SC_LIST = 4, + OCTEON_ZOMBIE_SC_LIST = 5 }; /** Response Order values for a Octeon Request. */ diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index bb43ddb7539e..4b3aecf98f2a 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1268,12 +1268,13 @@ static int octeon_mgmt_stop(struct net_device *netdev) return 0; } -static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t +octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); union mgmt_port_ring_entry re; unsigned long flags; - int rv = NETDEV_TX_BUSY; + netdev_tx_t rv = NETDEV_TX_BUSY; re.d64 = 0; re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0); diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index e2cdfa75673f..75c1c5ed2387 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -67,6 +67,7 @@ config CHELSIO_T3 config CHELSIO_T4 tristate "Chelsio Communications T4/T5/T6 Ethernet support" depends on PCI && (IPV6 || IPV6=n) + depends on THERMAL || !THERMAL select FW_LOADER select MDIO select ZLIB_DEFLATE diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index c34ea385fe4a..1e82b9efe447 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -33,7 +33,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> -#include <linux/moduleparam.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/dma-mapping.h> @@ -3440,8 +3439,7 @@ static void remove_one(struct pci_dev *pdev) free_netdev(adapter->port[i]); iounmap(adapter->regs); - if (adapter->nofail_skb) - kfree_skb(adapter->nofail_skb); + kfree_skb(adapter->nofail_skb); kfree(adapter); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index 50cd660732c5..84604aff53ce 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -1302,8 +1302,7 @@ void cxgb3_offload_deactivate(struct adapter *adapter) rcu_read_unlock(); RCU_INIT_POINTER(tdev->l2opt, NULL); call_rcu(&d->rcu_head, clean_l2_data); - if (t->nofail_skb) - kfree_skb(t->nofail_skb); + kfree_skb(t->nofail_skb); kfree(t); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index bea6a059a8f1..78e5d17a1d5f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -12,3 +12,6 @@ cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o +ifdef CONFIG_THERMAL +cxgb4-objs += cxgb4_thermal.o +endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 36d25883d123..b2d617abcf49 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -315,6 +315,48 @@ struct cudbg_pbt_tables { u32 pbt_data[CUDBG_PBT_DATA_ENTRIES]; }; +enum cudbg_qdesc_qtype { + CUDBG_QTYPE_UNKNOWN = 0, + CUDBG_QTYPE_NIC_TXQ, + CUDBG_QTYPE_NIC_RXQ, + CUDBG_QTYPE_NIC_FLQ, + CUDBG_QTYPE_CTRLQ, + CUDBG_QTYPE_FWEVTQ, + CUDBG_QTYPE_INTRQ, + CUDBG_QTYPE_PTP_TXQ, + CUDBG_QTYPE_OFLD_TXQ, + CUDBG_QTYPE_RDMA_RXQ, + CUDBG_QTYPE_RDMA_FLQ, + CUDBG_QTYPE_RDMA_CIQ, + CUDBG_QTYPE_ISCSI_RXQ, + CUDBG_QTYPE_ISCSI_FLQ, + CUDBG_QTYPE_ISCSIT_RXQ, + CUDBG_QTYPE_ISCSIT_FLQ, + CUDBG_QTYPE_CRYPTO_TXQ, + CUDBG_QTYPE_CRYPTO_RXQ, + CUDBG_QTYPE_CRYPTO_FLQ, + CUDBG_QTYPE_TLS_RXQ, + CUDBG_QTYPE_TLS_FLQ, + CUDBG_QTYPE_MAX, +}; + +#define CUDBG_QDESC_REV 1 + +struct cudbg_qdesc_entry { + u32 data_size; + u32 qtype; + u32 qid; + u32 desc_size; + u32 num_desc; + u8 data[0]; /* Must be last */ +}; + +struct cudbg_qdesc_info { + u32 qdesc_entry_size; + u32 num_queues; + u8 data[0]; /* Must be last */ +}; + #define IREG_NUM_ELEM 4 static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index 215fe6260fd7..dec63c15c0ba 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -81,7 +81,8 @@ enum cudbg_dbg_entity_type { CUDBG_MBOX_LOG = 66, CUDBG_HMA_INDIRECT = 67, CUDBG_HMA = 68, - CUDBG_MAX_ENTITY = 70, + CUDBG_QDESC = 70, + CUDBG_MAX_ENTITY = 71, }; struct cudbg_init { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index d97e0d7e541a..7c49681407ad 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -19,6 +19,7 @@ #include "t4_regs.h" #include "cxgb4.h" +#include "cxgb4_cudbg.h" #include "cudbg_if.h" #include "cudbg_lib_common.h" #include "cudbg_entity.h" @@ -2890,3 +2891,240 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, } return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } + +void cudbg_fill_qdesc_num_and_size(const struct adapter *padap, + u32 *num, u32 *size) +{ + u32 tot_entries = 0, tot_size = 0; + + /* NIC TXQ, RXQ, FLQ, and CTRLQ */ + tot_entries += MAX_ETH_QSETS * 3; + tot_entries += MAX_CTRL_QUEUES; + + tot_size += MAX_ETH_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; + tot_size += MAX_ETH_QSETS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE; + tot_size += MAX_ETH_QSETS * MAX_RX_BUFFERS * MAX_FL_DESC_SIZE; + tot_size += MAX_CTRL_QUEUES * MAX_CTRL_TXQ_ENTRIES * + MAX_CTRL_TXQ_DESC_SIZE; + + /* FW_EVTQ and INTRQ */ + tot_entries += INGQ_EXTRAS; + tot_size += INGQ_EXTRAS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE; + + /* PTP_TXQ */ + tot_entries += 1; + tot_size += MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; + + /* ULD TXQ, RXQ, and FLQ */ + tot_entries += CXGB4_TX_MAX * MAX_OFLD_QSETS; + tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS * 2; + + tot_size += CXGB4_TX_MAX * MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * + MAX_TXQ_DESC_SIZE; + tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RSPQ_ENTRIES * + MAX_RXQ_DESC_SIZE; + tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RX_BUFFERS * + MAX_FL_DESC_SIZE; + + /* ULD CIQ */ + tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS; + tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE * + MAX_RXQ_DESC_SIZE; + + tot_size += sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_qdesc_info) + + sizeof(struct cudbg_qdesc_entry) * tot_entries; + + if (num) + *num = tot_entries; + + if (size) + *size = tot_size; +} + +int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + u32 num_queues = 0, tot_entries = 0, size = 0; + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_qdesc_entry *qdesc_entry; + struct cudbg_qdesc_info *qdesc_info; + struct cudbg_ver_hdr *ver_hdr; + struct sge *s = &padap->sge; + u32 i, j, cur_off, tot_len; + u8 *data; + int rc; + + cudbg_fill_qdesc_num_and_size(padap, &tot_entries, &size); + size = min_t(u32, size, CUDBG_DUMP_BUFF_SIZE); + tot_len = size; + data = kvzalloc(size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + ver_hdr = (struct cudbg_ver_hdr *)data; + ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; + ver_hdr->revision = CUDBG_QDESC_REV; + ver_hdr->size = sizeof(struct cudbg_qdesc_info); + size -= sizeof(*ver_hdr); + + qdesc_info = (struct cudbg_qdesc_info *)(data + + sizeof(*ver_hdr)); + size -= sizeof(*qdesc_info); + qdesc_entry = (struct cudbg_qdesc_entry *)qdesc_info->data; + +#define QDESC_GET(q, desc, type, label) do { \ + if (size <= 0) { \ + goto label; \ + } \ + if (desc) { \ + cudbg_fill_qdesc_##q(q, type, qdesc_entry); \ + size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \ + num_queues++; \ + qdesc_entry = cudbg_next_qdesc(qdesc_entry); \ + } \ +} while (0) + +#define QDESC_GET_TXQ(q, type, label) do { \ + struct sge_txq *txq = (struct sge_txq *)q; \ + QDESC_GET(txq, txq->desc, type, label); \ +} while (0) + +#define QDESC_GET_RXQ(q, type, label) do { \ + struct sge_rspq *rxq = (struct sge_rspq *)q; \ + QDESC_GET(rxq, rxq->desc, type, label); \ +} while (0) + +#define QDESC_GET_FLQ(q, type, label) do { \ + struct sge_fl *flq = (struct sge_fl *)q; \ + QDESC_GET(flq, flq->desc, type, label); \ +} while (0) + + /* NIC TXQ */ + for (i = 0; i < s->ethqsets; i++) + QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out); + + /* NIC RXQ */ + for (i = 0; i < s->ethqsets; i++) + QDESC_GET_RXQ(&s->ethrxq[i].rspq, CUDBG_QTYPE_NIC_RXQ, out); + + /* NIC FLQ */ + for (i = 0; i < s->ethqsets; i++) + QDESC_GET_FLQ(&s->ethrxq[i].fl, CUDBG_QTYPE_NIC_FLQ, out); + + /* NIC CTRLQ */ + for (i = 0; i < padap->params.nports; i++) + QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out); + + /* FW_EVTQ */ + QDESC_GET_RXQ(&s->fw_evtq, CUDBG_QTYPE_FWEVTQ, out); + + /* INTRQ */ + QDESC_GET_RXQ(&s->intrq, CUDBG_QTYPE_INTRQ, out); + + /* PTP_TXQ */ + QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out); + + /* ULD Queues */ + mutex_lock(&uld_mutex); + + if (s->uld_txq_info) { + struct sge_uld_txq_info *utxq; + + /* ULD TXQ */ + for (j = 0; j < CXGB4_TX_MAX; j++) { + if (!s->uld_txq_info[j]) + continue; + + utxq = s->uld_txq_info[j]; + for (i = 0; i < utxq->ntxq; i++) + QDESC_GET_TXQ(&utxq->uldtxq[i].q, + cudbg_uld_txq_to_qtype(j), + out_unlock); + } + } + + if (s->uld_rxq_info) { + struct sge_uld_rxq_info *urxq; + u32 base; + + /* ULD RXQ */ + for (j = 0; j < CXGB4_ULD_MAX; j++) { + if (!s->uld_rxq_info[j]) + continue; + + urxq = s->uld_rxq_info[j]; + for (i = 0; i < urxq->nrxq; i++) + QDESC_GET_RXQ(&urxq->uldrxq[i].rspq, + cudbg_uld_rxq_to_qtype(j), + out_unlock); + } + + /* ULD FLQ */ + for (j = 0; j < CXGB4_ULD_MAX; j++) { + if (!s->uld_rxq_info[j]) + continue; + + urxq = s->uld_rxq_info[j]; + for (i = 0; i < urxq->nrxq; i++) + QDESC_GET_FLQ(&urxq->uldrxq[i].fl, + cudbg_uld_flq_to_qtype(j), + out_unlock); + } + + /* ULD CIQ */ + for (j = 0; j < CXGB4_ULD_MAX; j++) { + if (!s->uld_rxq_info[j]) + continue; + + urxq = s->uld_rxq_info[j]; + base = urxq->nrxq; + for (i = 0; i < urxq->nciq; i++) + QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq, + cudbg_uld_ciq_to_qtype(j), + out_unlock); + } + } + +out_unlock: + mutex_unlock(&uld_mutex); + +out: + qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry); + qdesc_info->num_queues = num_queues; + cur_off = 0; + while (tot_len) { + u32 chunk_size = min_t(u32, tot_len, CUDBG_CHUNK_SIZE); + + rc = cudbg_get_buff(pdbg_init, dbg_buff, chunk_size, + &temp_buff); + if (rc) { + cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; + goto out_free; + } + + memcpy(temp_buff.data, data + cur_off, chunk_size); + tot_len -= chunk_size; + cur_off += chunk_size; + rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff, + dbg_buff); + if (rc) { + cudbg_put_buff(pdbg_init, &temp_buff); + cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; + goto out_free; + } + } + +out_free: + if (data) + kvfree(data); + +#undef QDESC_GET_FLQ +#undef QDESC_GET_RXQ +#undef QDESC_GET_TXQ +#undef QDESC_GET + + return rc; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index eebefe7cd18e..f047a01a3e5b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -171,6 +171,9 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, @@ -182,4 +185,107 @@ int cudbg_fill_meminfo(struct adapter *padap, struct cudbg_meminfo *meminfo_buff); void cudbg_fill_le_tcam_info(struct adapter *padap, struct cudbg_tcam *tcam_region); +void cudbg_fill_qdesc_num_and_size(const struct adapter *padap, + u32 *num, u32 *size); + +static inline u32 cudbg_uld_txq_to_qtype(u32 uld) +{ + switch (uld) { + case CXGB4_TX_OFLD: + return CUDBG_QTYPE_OFLD_TXQ; + case CXGB4_TX_CRYPTO: + return CUDBG_QTYPE_CRYPTO_TXQ; + } + + return CUDBG_QTYPE_UNKNOWN; +} + +static inline u32 cudbg_uld_rxq_to_qtype(u32 uld) +{ + switch (uld) { + case CXGB4_ULD_RDMA: + return CUDBG_QTYPE_RDMA_RXQ; + case CXGB4_ULD_ISCSI: + return CUDBG_QTYPE_ISCSI_RXQ; + case CXGB4_ULD_ISCSIT: + return CUDBG_QTYPE_ISCSIT_RXQ; + case CXGB4_ULD_CRYPTO: + return CUDBG_QTYPE_CRYPTO_RXQ; + case CXGB4_ULD_TLS: + return CUDBG_QTYPE_TLS_RXQ; + } + + return CUDBG_QTYPE_UNKNOWN; +} + +static inline u32 cudbg_uld_flq_to_qtype(u32 uld) +{ + switch (uld) { + case CXGB4_ULD_RDMA: + return CUDBG_QTYPE_RDMA_FLQ; + case CXGB4_ULD_ISCSI: + return CUDBG_QTYPE_ISCSI_FLQ; + case CXGB4_ULD_ISCSIT: + return CUDBG_QTYPE_ISCSIT_FLQ; + case CXGB4_ULD_CRYPTO: + return CUDBG_QTYPE_CRYPTO_FLQ; + case CXGB4_ULD_TLS: + return CUDBG_QTYPE_TLS_FLQ; + } + + return CUDBG_QTYPE_UNKNOWN; +} + +static inline u32 cudbg_uld_ciq_to_qtype(u32 uld) +{ + switch (uld) { + case CXGB4_ULD_RDMA: + return CUDBG_QTYPE_RDMA_CIQ; + } + + return CUDBG_QTYPE_UNKNOWN; +} + +static inline void cudbg_fill_qdesc_txq(const struct sge_txq *txq, + enum cudbg_qdesc_qtype type, + struct cudbg_qdesc_entry *entry) +{ + entry->qtype = type; + entry->qid = txq->cntxt_id; + entry->desc_size = sizeof(struct tx_desc); + entry->num_desc = txq->size; + entry->data_size = txq->size * sizeof(struct tx_desc); + memcpy(entry->data, txq->desc, entry->data_size); +} + +static inline void cudbg_fill_qdesc_rxq(const struct sge_rspq *rxq, + enum cudbg_qdesc_qtype type, + struct cudbg_qdesc_entry *entry) +{ + entry->qtype = type; + entry->qid = rxq->cntxt_id; + entry->desc_size = rxq->iqe_len; + entry->num_desc = rxq->size; + entry->data_size = rxq->size * rxq->iqe_len; + memcpy(entry->data, rxq->desc, entry->data_size); +} + +static inline void cudbg_fill_qdesc_flq(const struct sge_fl *flq, + enum cudbg_qdesc_qtype type, + struct cudbg_qdesc_entry *entry) +{ + entry->qtype = type; + entry->qid = flq->cntxt_id; + entry->desc_size = sizeof(__be64); + entry->num_desc = flq->size; + entry->data_size = flq->size * sizeof(__be64); + memcpy(entry->data, flq->desc, entry->data_size); +} + +static inline +struct cudbg_qdesc_entry *cudbg_next_qdesc(struct cudbg_qdesc_entry *e) +{ + return (struct cudbg_qdesc_entry *) + ((u8 *)e + sizeof(*e) + e->data_size); +} #endif /* __CUDBG_LIB_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 76d16747f513..b16f4b3ef4c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -52,6 +52,7 @@ #include <linux/ptp_clock_kernel.h> #include <linux/ptp_classify.h> #include <linux/crash_dump.h> +#include <linux/thermal.h> #include <asm/io.h> #include "t4_chip_type.h" #include "cxgb4_uld.h" @@ -533,6 +534,13 @@ enum { }; enum { + MAX_TXQ_DESC_SIZE = 64, + MAX_RXQ_DESC_SIZE = 128, + MAX_FL_DESC_SIZE = 8, + MAX_CTRL_TXQ_DESC_SIZE = 64, +}; + +enum { INGQ_EXTRAS = 2, /* firmware event queue and */ /* forwarded interrupts */ MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS, @@ -685,6 +693,7 @@ struct sge_eth_stats { /* Ethernet queue statistics */ unsigned long rx_cso; /* # of Rx checksum offloads */ unsigned long vlan_ex; /* # of Rx VLAN extractions */ unsigned long rx_drops; /* # of packets dropped due to no mem */ + unsigned long bad_rx_pkts; /* # of packets with err_vec!=0 */ }; struct sge_eth_rxq { /* SW Ethernet Rx queue */ @@ -882,6 +891,14 @@ struct mps_encap_entry { atomic_t refcnt; }; +#if IS_ENABLED(CONFIG_THERMAL) +struct ch_thermal { + struct thermal_zone_device *tzdev; + int trip_temp; + int trip_type; +}; +#endif + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -1000,6 +1017,9 @@ struct adapter { /* Dump buffer for collecting logs in kdump kernel */ struct vmcoredd_data vmcoredd; +#if IS_ENABLED(CONFIG_THERMAL) + struct ch_thermal ch_thermal; +#endif }; /* Support for "sched-class" command to allow a TX Scheduling Class to be @@ -1854,4 +1874,8 @@ void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n); int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, u16 vlan); int cxgb4_dcb_enabled(const struct net_device *dev); + +int cxgb4_thermal_init(struct adapter *adap); +int cxgb4_thermal_remove(struct adapter *adap); + #endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 5f01c0a7fd98..972f0a124714 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -30,6 +30,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = { static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_MBOX_LOG, cudbg_collect_mbox_log }, + { CUDBG_QDESC, cudbg_collect_qdesc }, { CUDBG_DEV_LOG, cudbg_collect_fw_devlog }, { CUDBG_REG_DUMP, cudbg_collect_reg_dump }, { CUDBG_CIM_LA, cudbg_collect_cim_la }, @@ -311,6 +312,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) } len = cudbg_mbytes_to_bytes(len); break; + case CUDBG_QDESC: + cudbg_fill_qdesc_num_and_size(adap, NULL, &len); + break; default: break; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index b34f0f077a31..9bd5f755a0e0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -114,6 +114,24 @@ void cxgb4_dcb_reset(struct net_device *dev) cxgb4_dcb_state_init(dev); } +/* update the dcb port support, if version is IEEE then set it to + * FW_PORT_DCB_VER_IEEE and if DCB_CAP_DCBX_VER_CEE is already set then + * clear that. and if it is set to CEE then set dcb supported to + * DCB_CAP_DCBX_VER_CEE & if DCB_CAP_DCBX_VER_IEEE is set, clear it + */ +static inline void cxgb4_dcb_update_support(struct port_dcb_info *dcb) +{ + if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { + if (dcb->supported & DCB_CAP_DCBX_VER_CEE) + dcb->supported &= ~DCB_CAP_DCBX_VER_CEE; + dcb->supported |= DCB_CAP_DCBX_VER_IEEE; + } else if (dcb->dcb_version == FW_PORT_DCB_VER_CEE1D01) { + if (dcb->supported & DCB_CAP_DCBX_VER_IEEE) + dcb->supported &= ~DCB_CAP_DCBX_VER_IEEE; + dcb->supported |= DCB_CAP_DCBX_VER_CEE; + } +} + /* Finite State machine for Data Center Bridging. */ void cxgb4_dcb_state_fsm(struct net_device *dev, @@ -165,6 +183,15 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, } case CXGB4_DCB_STATE_FW_INCOMPLETE: { + if (transition_to != CXGB4_DCB_INPUT_FW_DISABLED) { + /* during this CXGB4_DCB_STATE_FW_INCOMPLETE state, + * check if the dcb version is changed (there can be + * mismatch in default config & the negotiated switch + * configuration at FW, so update the dcb support + * accordingly. + */ + cxgb4_dcb_update_support(dcb); + } switch (transition_to) { case CXGB4_DCB_INPUT_FW_ENABLED: { /* we're alreaady in firmware DCB mode */ @@ -273,8 +300,8 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, enum cxgb4_dcb_state_input input = ((pcmd->u.dcb.control.all_syncd_pkd & FW_PORT_CMD_ALL_SYNCD_F) - ? CXGB4_DCB_STATE_FW_ALLSYNCED - : CXGB4_DCB_STATE_FW_INCOMPLETE); + ? CXGB4_DCB_INPUT_FW_ALLSYNCED + : CXGB4_DCB_INPUT_FW_INCOMPLETE); if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) { dcb_running_version = FW_PORT_CMD_DCB_VERSION_G( diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h index 02040b99c78a..484ee8290090 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h @@ -67,7 +67,7 @@ do { \ if ((__dcb)->dcb_version == FW_PORT_DCB_VER_IEEE) \ cxgb4_dcb_state_fsm((__dev), \ - CXGB4_DCB_STATE_FW_ALLSYNCED); \ + CXGB4_DCB_INPUT_FW_ALLSYNCED); \ } while (0) /* States we can be in for a port's Data Center Bridging. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 0f72f9c4ec74..cab492ec8f59 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2784,6 +2784,7 @@ do { \ RL("LROmerged:", stats.lro_merged); RL("LROpackets:", stats.lro_pkts); RL("RxDrops:", stats.rx_drops); + RL("RxBadPkts:", stats.bad_rx_pkts); TL("TSO:", tso); TL("TxCSO:", tx_cso); TL("VLANins:", vlan_ins); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 961e3087d1d3..2de0590a62c4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -62,7 +62,6 @@ #include <net/netevent.h> #include <net/addrconf.h> #include <net/bonding.h> -#include <net/addrconf.h> #include <linux/uaccess.h> #include <linux/crash_dump.h> #include <net/udp_tunnel.h> @@ -2749,6 +2748,27 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf, return -EINVAL; } + if (max_tx_rate == 0) { + /* unbind VF to to any Traffic Class */ + fw_pfvf = + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH)); + fw_class = 0xffffffff; + ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, + &fw_pfvf, &fw_class); + if (ret) { + dev_err(adap->pdev_dev, + "Err %d in unbinding PF %d VF %d from TX Rate Limiting\n", + ret, adap->pf, vf); + return -EINVAL; + } + dev_info(adap->pdev_dev, + "PF %d VF %d is unbound from TX Rate Limiting\n", + adap->pf, vf); + adap->vfinfo[vf].tx_rate = 0; + return 0; + } + ret = t4_get_link_params(pi, &link_ok, &speed, &mtu); if (ret != FW_SUCCESS) { dev_err(adap->pdev_dev, @@ -2798,8 +2818,8 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf, &fw_class); if (ret) { dev_err(adap->pdev_dev, - "Err %d in binding VF %d to Traffic Class %d\n", - ret, vf, class_id); + "Err %d in binding PF %d VF %d to Traffic Class %d\n", + ret, adap->pf, vf, class_id); return -EINVAL; } dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n", @@ -5844,6 +5864,10 @@ fw_attach_fail: if (!is_t4(adapter->params.chip)) cxgb4_ptp_init(adapter); + if (IS_ENABLED(CONFIG_THERMAL) && + !is_t4(adapter->params.chip) && (adapter->flags & FW_OK)) + cxgb4_thermal_init(adapter); + print_adapter_info(adapter); return 0; @@ -5909,6 +5933,8 @@ static void remove_one(struct pci_dev *pdev) if (!is_t4(adapter->params.chip)) cxgb4_ptp_stop(adapter); + if (IS_ENABLED(CONFIG_THERMAL)) + cxgb4_thermal_remove(adapter); /* If we allocated filters, free up state associated with any * valid filters ... diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c new file mode 100644 index 000000000000..28052e7504e5 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2017 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Written by: Ganesh Goudar (ganeshgr@chelsio.com) + */ + +#include "cxgb4.h" + +#define CXGB4_NUM_TRIPS 1 + +static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev, + int *temp) +{ + struct adapter *adap = tzdev->devdata; + u32 param, val; + int ret; + + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) | + FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_TMP)); + + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, + ¶m, &val); + if (ret < 0 || val == 0) + return -1; + + *temp = val * 1000; + return 0; +} + +static int cxgb4_thermal_get_trip_type(struct thermal_zone_device *tzdev, + int trip, enum thermal_trip_type *type) +{ + struct adapter *adap = tzdev->devdata; + + if (!adap->ch_thermal.trip_temp) + return -EINVAL; + + *type = adap->ch_thermal.trip_type; + return 0; +} + +static int cxgb4_thermal_get_trip_temp(struct thermal_zone_device *tzdev, + int trip, int *temp) +{ + struct adapter *adap = tzdev->devdata; + + if (!adap->ch_thermal.trip_temp) + return -EINVAL; + + *temp = adap->ch_thermal.trip_temp; + return 0; +} + +static struct thermal_zone_device_ops cxgb4_thermal_ops = { + .get_temp = cxgb4_thermal_get_temp, + .get_trip_type = cxgb4_thermal_get_trip_type, + .get_trip_temp = cxgb4_thermal_get_trip_temp, +}; + +int cxgb4_thermal_init(struct adapter *adap) +{ + struct ch_thermal *ch_thermal = &adap->ch_thermal; + int num_trip = CXGB4_NUM_TRIPS; + u32 param, val; + int ret; + + /* on older firmwares we may not get the trip temperature, + * set the num of trips to 0. + */ + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) | + FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_MAXTMPTHRESH)); + + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, + ¶m, &val); + if (ret < 0) { + num_trip = 0; /* could not get trip temperature */ + } else { + ch_thermal->trip_temp = val * 1000; + ch_thermal->trip_type = THERMAL_TRIP_CRITICAL; + } + + ch_thermal->tzdev = thermal_zone_device_register("cxgb4", num_trip, + 0, adap, + &cxgb4_thermal_ops, + NULL, 0, 0); + if (IS_ERR(ch_thermal->tzdev)) { + ret = PTR_ERR(ch_thermal->tzdev); + dev_err(adap->pdev_dev, "Failed to register thermal zone\n"); + ch_thermal->tzdev = NULL; + return ret; + } + return 0; +} + +int cxgb4_thermal_remove(struct adapter *adap) +{ + if (adap->ch_thermal.tzdev) + thermal_zone_device_unregister(adap->ch_thermal.tzdev); + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 301c4df8a566..99022c0898b5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -433,10 +433,12 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, else lport = netdev2pinfo(physdev)->lport; - if (is_vlan_dev(neigh->dev)) + if (is_vlan_dev(neigh->dev)) { vlan = vlan_dev_vlan_id(neigh->dev); - else + vlan |= vlan_dev_get_egress_qos_mask(neigh->dev, priority); + } else { vlan = VLAN_NONE; + } write_lock_bh(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 7fc656680299..52edb688942b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -38,7 +38,6 @@ #include "cxgb4.h" #include "sched.h" -/* Spinlock must be held by caller */ static int t4_sched_class_fw_cmd(struct port_info *pi, struct ch_sched_params *p, enum sched_fw_ops op) @@ -67,7 +66,6 @@ static int t4_sched_class_fw_cmd(struct port_info *pi, return err; } -/* Spinlock must be held by caller */ static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg, enum sched_bind_type type, bool bind) { @@ -163,7 +161,6 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) if (e && index >= 0) { int i = 0; - spin_lock(&e->lock); list_for_each_entry(qe, &e->queue_list, list) { if (i == index) break; @@ -171,10 +168,8 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) } err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, false); - if (err) { - spin_unlock(&e->lock); - goto out; - } + if (err) + return err; list_del(&qe->list); kvfree(qe); @@ -182,9 +177,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) e->state = SCHED_STATE_UNUSED; memset(&e->info, 0, sizeof(e->info)); } - spin_unlock(&e->lock); } -out: return err; } @@ -210,10 +203,8 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) /* Unbind queue from any existing class */ err = t4_sched_queue_unbind(pi, p); - if (err) { - kvfree(qe); - goto out; - } + if (err) + goto out_err; /* Bind queue to specified class */ memset(qe, 0, sizeof(*qe)); @@ -221,18 +212,16 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) memcpy(&qe->param, p, sizeof(qe->param)); e = &s->tab[qe->param.class]; - spin_lock(&e->lock); err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true); - if (err) { - kvfree(qe); - spin_unlock(&e->lock); - goto out; - } + if (err) + goto out_err; list_add_tail(&qe->list, &e->queue_list); atomic_inc(&e->refcnt); - spin_unlock(&e->lock); -out: + return err; + +out_err: + kvfree(qe); return err; } @@ -296,8 +285,6 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg, enum sched_bind_type type) { struct port_info *pi = netdev2pinfo(dev); - struct sched_table *s; - int err = 0; u8 class_id; if (!can_sched(dev)) @@ -323,12 +310,8 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg, if (class_id == SCHED_CLS_NONE) return -ENOTSUPP; - s = pi->sched_tbl; - write_lock(&s->rw_lock); - err = t4_sched_class_bind_unbind_op(pi, arg, type, true); - write_unlock(&s->rw_lock); + return t4_sched_class_bind_unbind_op(pi, arg, type, true); - return err; } /** @@ -343,8 +326,6 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, enum sched_bind_type type) { struct port_info *pi = netdev2pinfo(dev); - struct sched_table *s; - int err = 0; u8 class_id; if (!can_sched(dev)) @@ -367,12 +348,7 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, if (!valid_class_id(dev, class_id)) return -EINVAL; - s = pi->sched_tbl; - write_lock(&s->rw_lock); - err = t4_sched_class_bind_unbind_op(pi, arg, type, false); - write_unlock(&s->rw_lock); - - return err; + return t4_sched_class_bind_unbind_op(pi, arg, type, false); } /* If @p is NULL, fetch any available unused class */ @@ -425,7 +401,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, static struct sched_class *t4_sched_class_alloc(struct port_info *pi, struct ch_sched_params *p) { - struct sched_table *s = pi->sched_tbl; struct sched_class *e; u8 class_id; int err; @@ -441,7 +416,6 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi, if (class_id != SCHED_CLS_NONE) return NULL; - write_lock(&s->rw_lock); /* See if there's an exisiting class with same * requested sched params */ @@ -452,27 +426,19 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi, /* Fetch any available unused class */ e = t4_sched_class_lookup(pi, NULL); if (!e) - goto out; + return NULL; memcpy(&np, p, sizeof(np)); np.u.params.class = e->idx; - - spin_lock(&e->lock); /* New class */ err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD); - if (err) { - spin_unlock(&e->lock); - e = NULL; - goto out; - } + if (err) + return NULL; memcpy(&e->info, &np, sizeof(e->info)); atomic_set(&e->refcnt, 0); e->state = SCHED_STATE_ACTIVE; - spin_unlock(&e->lock); } -out: - write_unlock(&s->rw_lock); return e; } @@ -517,14 +483,12 @@ struct sched_table *t4_init_sched(unsigned int sched_size) return NULL; s->sched_size = sched_size; - rwlock_init(&s->rw_lock); for (i = 0; i < s->sched_size; i++) { memset(&s->tab[i], 0, sizeof(struct sched_class)); s->tab[i].idx = i; s->tab[i].state = SCHED_STATE_UNUSED; INIT_LIST_HEAD(&s->tab[i].queue_list); - spin_lock_init(&s->tab[i].lock); atomic_set(&s->tab[i].refcnt, 0); } return s; @@ -545,11 +509,9 @@ void t4_cleanup_sched(struct adapter *adap) for (i = 0; i < s->sched_size; i++) { struct sched_class *e; - write_lock(&s->rw_lock); e = &s->tab[i]; if (e->state == SCHED_STATE_ACTIVE) t4_sched_class_free(pi, e); - write_unlock(&s->rw_lock); } kvfree(s); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h index 3a49e00a38a1..168fb4ce3759 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.h +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h @@ -69,13 +69,11 @@ struct sched_class { u8 idx; struct ch_sched_params info; struct list_head queue_list; - spinlock_t lock; /* Per class lock */ atomic_t refcnt; }; struct sched_table { /* per port scheduling table */ u8 sched_size; - rwlock_t rw_lock; /* Table lock */ struct sched_class tab[0]; }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 6807bc3a44fb..b90188401d4a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2830,6 +2830,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, csum_ok = pkt->csum_calc && !err_vec && (q->netdev->features & NETIF_F_RXCSUM); + + if (err_vec) + rxq->stats.bad_rx_pkts++; + if (((pkt->l2info & htonl(RXF_TCP_F)) || tnl_hdr_len) && (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 5fe5d16dee72..cb523949c812 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3889,7 +3889,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) c.param[0].mnem = cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE)); - c.param[0].val = (__force __be32)op; + c.param[0].val = cpu_to_be32(op); return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); } @@ -4204,6 +4204,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, */ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) { + unsigned int fw_caps = adap->params.fw_caps_support; struct fw_port_cmd c; memset(&c, 0, sizeof(c)); @@ -4211,9 +4212,14 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) FW_CMD_REQUEST_F | FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port)); c.action_to_len16 = - cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | + cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_L1_CFG + : FW_PORT_ACTION_L1_CFG32) | FW_LEN16(c)); - c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG); + if (fw_caps == FW_CAPS16) + c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); + else + c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -10209,7 +10215,9 @@ int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, FW_ACL_VLAN_CMD_VFN_V(vf)); vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd)); /* Drop all packets that donot match vlan id */ - vlan_cmd.dropnovlan_fm = FW_ACL_VLAN_CMD_FM_F; + vlan_cmd.dropnovlan_fm = (enable + ? (FW_ACL_VLAN_CMD_DROPNOVLAN_F | + FW_ACL_VLAN_CMD_FM_F) : 0); if (enable != 0) { vlan_cmd.nvlan = 1; vlan_cmd.vlanid[0] = cpu_to_be16(vlan); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 5dc6c4154af8..57584ab32043 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1332,6 +1332,7 @@ enum fw_params_param_dev_phyfw { enum fw_params_param_dev_diag { FW_PARAM_DEV_DIAG_TMP = 0x00, FW_PARAM_DEV_DIAG_VDD = 0x01, + FW_PARAM_DEV_DIAG_MAXTMPTHRESH = 0x02, }; enum fw_params_param_dev_fwcache { @@ -2464,6 +2465,7 @@ struct fw_acl_vlan_cmd { #define FW_ACL_VLAN_CMD_DROPNOVLAN_S 7 #define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x) ((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S) +#define FW_ACL_VLAN_CMD_DROPNOVLAN_F FW_ACL_VLAN_CMD_DROPNOVLAN_V(1U) #define FW_ACL_VLAN_CMD_FM_S 6 #define FW_ACL_VLAN_CMD_FM_M 0x1 diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 1c9ad3630c77..ceec467f590d 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -372,9 +372,8 @@ static int gmac_setup_phy(struct net_device *netdev) return -ENODEV; netdev->phydev = phy; - phy->supported &= PHY_GBIT_FEATURES; - phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause; - phy->advertising = phy->supported; + phy_set_max_speed(phy, SPEED_1000); + phy_support_asym_pause(phy); /* set PHY interface type */ switch (phy->interface) { diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 5a847941c46b..79521e27f0d1 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -284,13 +284,11 @@ static int dnet_mii_probe(struct net_device *dev) /* mask with MAC supported features */ if (bp->capabilities & DNET_HAS_GIGABIT) - phydev->supported &= PHY_GBIT_FEATURES; + phy_set_max_speed(phydev, SPEED_1000); else - phydev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phydev, SPEED_100); - phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause; - - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); bp->link = 0; bp->speed = 0; diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 60da0499ad66..0f3e7f21c6fa 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -721,10 +721,7 @@ static int ethoc_mdio_probe(struct net_device *dev) return err; } - phy->advertising &= ~(ADVERTISED_1000baseT_Full | - ADVERTISED_1000baseT_Half); - phy->supported &= ~(SUPPORTED_1000baseT_Full | - SUPPORTED_1000baseT_Half); + phy_set_max_speed(phy, SPEED_100); return 0; } diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index ed6c76d20b45..4d673225ed3e 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -712,8 +712,8 @@ static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan) return skb_checksum_help(skb) == 0; } -static int ftgmac100_hard_start_xmit(struct sk_buff *skb, - struct net_device *netdev) +static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb, + struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); struct ftgmac100_txdes *txdes, *first; @@ -1079,8 +1079,7 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf) /* Indicate that we support PAUSE frames (see comment in * Documentation/networking/phy.txt) */ - phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); /* Display what we found */ phy_attached_info(phydev); @@ -1220,22 +1219,11 @@ static int ftgmac100_set_pauseparam(struct net_device *netdev, priv->tx_pause = pause->tx_pause; priv->rx_pause = pause->rx_pause; - if (phydev) { - phydev->advertising &= ~ADVERTISED_Pause; - phydev->advertising &= ~ADVERTISED_Asym_Pause; + if (phydev) + phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause); - if (pause->rx_pause) { - phydev->advertising |= ADVERTISED_Pause; - phydev->advertising |= ADVERTISED_Asym_Pause; - } - - if (pause->tx_pause) - phydev->advertising ^= ADVERTISED_Asym_Pause; - } if (netif_running(netdev)) { - if (phydev && priv->aneg_pause) - phy_start_aneg(phydev); - else + if (!(phydev && priv->aneg_pause)) ftgmac100_config_pause(priv); } diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index a1197d3adbe0..570caeb8ee9e 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -634,8 +634,8 @@ static void ftmac100_tx_complete(struct ftmac100 *priv) ; } -static int ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb, - dma_addr_t map) +static netdev_tx_t ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb, + dma_addr_t map) { struct net_device *netdev = priv->netdev; struct ftmac100_txdes *txdes; @@ -1016,7 +1016,8 @@ static int ftmac100_stop(struct net_device *netdev) return 0; } -static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t +ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ftmac100 *priv = netdev_priv(netdev); dma_addr_t map; diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index a580a3dcbe59..d3a62bc1f1c6 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -96,5 +96,6 @@ config GIANFAR on the 8540. source "drivers/net/ethernet/freescale/dpaa/Kconfig" +source "drivers/net/ethernet/freescale/dpaa2/Kconfig" endif # NET_VENDOR_FREESCALE diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 0914a3ea4405..3b4ff08e3841 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -21,3 +21,5 @@ ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o obj-$(CONFIG_FSL_FMAN) += fman/ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/ + +obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 65a22cd9aef2..6e0f47f2c8a3 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1280,7 +1280,7 @@ static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp, err = bman_release(dpaa_bp->pool, bmb, cnt); /* Should never occur, address anyway to avoid leaking the buffers */ - if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb) + if (WARN_ON(err) && dpaa_bp->free_buf_cb) while (cnt-- > 0) dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]); @@ -1704,10 +1704,8 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, skb = build_skb(vaddr, dpaa_bp->size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); - if (unlikely(!skb)) { - WARN_ONCE(1, "Build skb failure on Rx\n"); + if (WARN_ONCE(!skb, "Build skb failure on Rx\n")) goto free_buffer; - } WARN_ON(fd_off != priv->rx_headroom); skb_reserve(skb, fd_off); skb_put(skb, qm_fd_get_length(fd)); @@ -1770,7 +1768,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, sz = dpaa_bp->size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); skb = build_skb(sg_vaddr, sz); - if (WARN_ON(unlikely(!skb))) + if (WARN_ON(!skb)) goto free_buffers; skb->ip_summed = rx_csum_offload(priv, fd); @@ -2046,7 +2044,8 @@ static inline int dpaa_xmit(struct dpaa_priv *priv, return 0; } -static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) +static netdev_tx_t +dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { const int queue_mapping = skb_get_queue_mapping(skb); bool nonlinear = skb_is_nonlinear(skb); @@ -2493,8 +2492,7 @@ static int dpaa_phy_init(struct net_device *net_dev) /* Remove any features not supported by the controller */ phy_dev->supported &= mac_dev->if_support; - phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); - phy_dev->advertising = phy_dev->supported; + phy_support_asym_pause(phy_dev); mac_dev->phy_dev = phy_dev; net_dev->phydev = phy_dev; @@ -2733,8 +2731,6 @@ out_error: return err; } -static const struct of_device_id dpaa_match[]; - static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) { u16 headroom; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 3184c8f7cdd0..13d6e2272ece 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -182,7 +182,6 @@ static int dpaa_set_pauseparam(struct net_device *net_dev, struct phy_device *phydev; bool rx_pause, tx_pause; struct dpaa_priv *priv; - u32 newadv, oldadv; int err; priv = netdev_priv(net_dev); @@ -194,9 +193,7 @@ static int dpaa_set_pauseparam(struct net_device *net_dev, return -ENODEV; } - if (!(phydev->supported & SUPPORTED_Pause) || - (!(phydev->supported & SUPPORTED_Asym_Pause) && - (epause->rx_pause != epause->tx_pause))) + if (!phy_validate_pause(phydev, epause)) return -EINVAL; /* The MAC should know how to handle PAUSE frame autonegotiation before @@ -210,29 +207,8 @@ static int dpaa_set_pauseparam(struct net_device *net_dev, /* Determine the sym/asym advertised PAUSE capabilities from the desired * rx/tx pause settings. */ - newadv = 0; - if (epause->rx_pause) - newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; - if (epause->tx_pause) - newadv ^= ADVERTISED_Asym_Pause; - oldadv = phydev->advertising & - (ADVERTISED_Pause | ADVERTISED_Asym_Pause); - - /* If there are differences between the old and the new advertised - * values, restart PHY autonegotiation and advertise the new values. - */ - if (oldadv != newadv) { - phydev->advertising &= ~(ADVERTISED_Pause - | ADVERTISED_Asym_Pause); - phydev->advertising |= newadv; - if (phydev->autoneg) { - err = phy_start_aneg(phydev); - if (err < 0) - netdev_err(net_dev, "phy_start_aneg() = %d\n", - err); - } - } + phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig new file mode 100644 index 000000000000..a7f365db0eed --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig @@ -0,0 +1,17 @@ +config FSL_DPAA2_ETH + tristate "Freescale DPAA2 Ethernet" + depends on FSL_MC_BUS && FSL_MC_DPIO + depends on ARCH_LAYERSCAPE || COMPILE_TEST + help + This is the DPAA2 Ethernet driver supporting Freescale SoCs + with DPAA2 (DataPath Acceleration Architecture v2). + The driver manages network objects discovered on the Freescale + MC bus. + +config FSL_DPAA2_PTP_CLOCK + tristate "Freescale DPAA2 PTP Clock" + depends on FSL_DPAA2_ETH && POSIX_TIMERS + select PTP_1588_CLOCK + help + This driver adds support for using the DPAA2 1588 timer module + as a PTP clock. diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile new file mode 100644 index 000000000000..2f424e0a8225 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Freescale DPAA2 Ethernet controller +# + +obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o +obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o + +fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o +fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o + +# Needed by the tracing framework +CFLAGS_dpaa2-eth.o := -I$(src) diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h index 9801528db2a5..9801528db2a5 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 9329fcad95ac..156080d42a6c 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -289,10 +289,11 @@ err_frame_format: * * Observance of NAPI budget is not our concern, leaving that to the caller. */ -static int consume_frames(struct dpaa2_eth_channel *ch) +static int consume_frames(struct dpaa2_eth_channel *ch, + enum dpaa2_eth_fq_type *type) { struct dpaa2_eth_priv *priv = ch->priv; - struct dpaa2_eth_fq *fq; + struct dpaa2_eth_fq *fq = NULL; struct dpaa2_dq *dq; const struct dpaa2_fd *fd; int cleaned = 0; @@ -311,12 +312,23 @@ static int consume_frames(struct dpaa2_eth_channel *ch) fd = dpaa2_dq_fd(dq); fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); - fq->stats.frames++; fq->consume(priv, ch, fd, &ch->napi, fq->flowid); cleaned++; } while (!is_last); + if (!cleaned) + return 0; + + fq->stats.frames += cleaned; + ch->stats.frames += cleaned; + + /* A dequeue operation only pulls frames from a single queue + * into the store. Return the frame queue type as an out param. + */ + if (type) + *type = fq->type; + return cleaned; } @@ -921,14 +933,16 @@ static int pull_channel(struct dpaa2_eth_channel *ch) static int dpaa2_eth_poll(struct napi_struct *napi, int budget) { struct dpaa2_eth_channel *ch; - int cleaned = 0, store_cleaned; struct dpaa2_eth_priv *priv; + int rx_cleaned = 0, txconf_cleaned = 0; + enum dpaa2_eth_fq_type type; + int store_cleaned; int err; ch = container_of(napi, struct dpaa2_eth_channel, napi); priv = ch->priv; - while (cleaned < budget) { + do { err = pull_channel(ch); if (unlikely(err)) break; @@ -936,30 +950,32 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) /* Refill pool if appropriate */ refill_pool(priv, ch, priv->bpid); - store_cleaned = consume_frames(ch); - cleaned += store_cleaned; + store_cleaned = consume_frames(ch, &type); + if (type == DPAA2_RX_FQ) + rx_cleaned += store_cleaned; + else + txconf_cleaned += store_cleaned; - /* If we have enough budget left for a full store, - * try a new pull dequeue, otherwise we're done here + /* If we either consumed the whole NAPI budget with Rx frames + * or we reached the Tx confirmations threshold, we're done. */ - if (store_cleaned == 0 || - cleaned > budget - DPAA2_ETH_STORE_SIZE) - break; - } + if (rx_cleaned >= budget || + txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) + return budget; + } while (store_cleaned); - if (cleaned < budget && napi_complete_done(napi, cleaned)) { - /* Re-enable data available notifications */ - do { - err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); - cpu_relax(); - } while (err == -EBUSY); - WARN_ONCE(err, "CDAN notifications rearm failed on core %d", - ch->nctx.desired_cpu); - } - - ch->stats.frames += cleaned; + /* We didn't consume the entire budget, so finish napi and + * re-enable data availability notifications + */ + napi_complete_done(napi, rx_cleaned); + do { + err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); + cpu_relax(); + } while (err == -EBUSY); + WARN_ONCE(err, "CDAN notifications rearm failed on core %d", + ch->nctx.desired_cpu); - return cleaned; + return max(rx_cleaned, 1); } static void enable_ch_napi(struct dpaa2_eth_priv *priv) @@ -1076,7 +1092,7 @@ static u32 drain_channel(struct dpaa2_eth_priv *priv, do { pull_channel(ch); - drained = consume_frames(ch); + drained = consume_frames(ch, NULL); total += drained; } while (drained); @@ -1143,34 +1159,6 @@ static int dpaa2_eth_stop(struct net_device *net_dev) return 0; } -static int dpaa2_eth_init(struct net_device *net_dev) -{ - u64 supported = 0; - u64 not_supported = 0; - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - u32 options = priv->dpni_attrs.options; - - /* Capabilities listing */ - supported |= IFF_LIVE_ADDR_CHANGE; - - if (options & DPNI_OPT_NO_MAC_FILTER) - not_supported |= IFF_UNICAST_FLT; - else - supported |= IFF_UNICAST_FLT; - - net_dev->priv_flags |= supported; - net_dev->priv_flags &= ~not_supported; - - /* Features */ - net_dev->features = NETIF_F_RXCSUM | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_SG | NETIF_F_HIGHDMA | - NETIF_F_LLTX; - net_dev->hw_features = net_dev->features; - - return 0; -} - static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) { struct dpaa2_eth_priv *priv = netdev_priv(net_dev); @@ -1418,7 +1406,6 @@ static const struct net_device_ops dpaa2_eth_ops = { .ndo_open = dpaa2_eth_open, .ndo_start_xmit = dpaa2_eth_tx, .ndo_stop = dpaa2_eth_stop, - .ndo_init = dpaa2_eth_init, .ndo_set_mac_address = dpaa2_eth_set_addr, .ndo_get_stats64 = dpaa2_eth_get_stats, .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, @@ -1926,6 +1913,11 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) if (err) goto close; + priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * + dpaa2_eth_fs_count(priv), GFP_KERNEL); + if (!priv->cls_rules) + goto close; + return 0; close: @@ -2032,9 +2024,33 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv, return 0; } -/* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */ -static const struct dpaa2_eth_hash_fields hash_fields[] = { +/* Supported header fields for Rx hash distribution key */ +static const struct dpaa2_eth_dist_fields dist_fields[] = { { + /* L2 header */ + .rxnfc_field = RXH_L2DA, + .cls_prot = NET_PROT_ETH, + .cls_field = NH_FLD_ETH_DA, + .size = 6, + }, { + .cls_prot = NET_PROT_ETH, + .cls_field = NH_FLD_ETH_SA, + .size = 6, + }, { + /* This is the last ethertype field parsed: + * depending on frame format, it can be the MAC ethertype + * or the VLAN etype. + */ + .cls_prot = NET_PROT_ETH, + .cls_field = NH_FLD_ETH_TYPE, + .size = 2, + }, { + /* VLAN header */ + .rxnfc_field = RXH_VLAN, + .cls_prot = NET_PROT_VLAN, + .cls_field = NH_FLD_VLAN_TCI, + .size = 2, + }, { /* IP header */ .rxnfc_field = RXH_IP_SRC, .cls_prot = NET_PROT_IP, @@ -2066,32 +2082,122 @@ static const struct dpaa2_eth_hash_fields hash_fields[] = { }, }; -/* Set RX hash options +/* Configure the Rx hash key using the legacy API */ +static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_rx_tc_dist_cfg dist_cfg; + int err; + + memset(&dist_cfg, 0, sizeof(dist_cfg)); + + dist_cfg.key_cfg_iova = key; + dist_cfg.dist_size = dpaa2_eth_queue_count(priv); + dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; + + err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); + if (err) + dev_err(dev, "dpni_set_rx_tc_dist failed\n"); + + return err; +} + +/* Configure the Rx hash key using the new API */ +static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_rx_dist_cfg dist_cfg; + int err; + + memset(&dist_cfg, 0, sizeof(dist_cfg)); + + dist_cfg.key_cfg_iova = key; + dist_cfg.dist_size = dpaa2_eth_queue_count(priv); + dist_cfg.enable = 1; + + err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); + if (err) + dev_err(dev, "dpni_set_rx_hash_dist failed\n"); + + return err; +} + +/* Configure the Rx flow classification key */ +static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_rx_dist_cfg dist_cfg; + int err; + + memset(&dist_cfg, 0, sizeof(dist_cfg)); + + dist_cfg.key_cfg_iova = key; + dist_cfg.dist_size = dpaa2_eth_queue_count(priv); + dist_cfg.enable = 1; + + err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); + if (err) + dev_err(dev, "dpni_set_rx_fs_dist failed\n"); + + return err; +} + +/* Size of the Rx flow classification key */ +int dpaa2_eth_cls_key_size(void) +{ + int i, size = 0; + + for (i = 0; i < ARRAY_SIZE(dist_fields); i++) + size += dist_fields[i].size; + + return size; +} + +/* Offset of header field in Rx classification key */ +int dpaa2_eth_cls_fld_off(int prot, int field) +{ + int i, off = 0; + + for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { + if (dist_fields[i].cls_prot == prot && + dist_fields[i].cls_field == field) + return off; + off += dist_fields[i].size; + } + + WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n"); + return 0; +} + +/* Set Rx distribution (hash or flow classification) key * flags is a combination of RXH_ bits */ -static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) +int dpaa2_eth_set_dist_key(struct net_device *net_dev, + enum dpaa2_eth_rx_dist type, u64 flags) { struct device *dev = net_dev->dev.parent; struct dpaa2_eth_priv *priv = netdev_priv(net_dev); struct dpkg_profile_cfg cls_cfg; - struct dpni_rx_tc_dist_cfg dist_cfg; + u32 rx_hash_fields = 0; + dma_addr_t key_iova; u8 *dma_mem; int i; int err = 0; - if (!dpaa2_eth_hash_enabled(priv)) { - dev_dbg(dev, "Hashing support is not enabled\n"); - return 0; - } - memset(&cls_cfg, 0, sizeof(cls_cfg)); - for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { + for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { struct dpkg_extract *key = &cls_cfg.extracts[cls_cfg.num_extracts]; - if (!(flags & hash_fields[i].rxnfc_field)) - continue; + /* For Rx hashing key we set only the selected fields. + * For Rx flow classification key we set all supported fields + */ + if (type == DPAA2_ETH_RX_DIST_HASH) { + if (!(flags & dist_fields[i].rxnfc_field)) + continue; + rx_hash_fields |= dist_fields[i].rxnfc_field; + } if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { dev_err(dev, "error adding key extraction rule, too many rules?\n"); @@ -2099,12 +2205,10 @@ static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) } key->type = DPKG_EXTRACT_FROM_HDR; - key->extract.from_hdr.prot = hash_fields[i].cls_prot; + key->extract.from_hdr.prot = dist_fields[i].cls_prot; key->extract.from_hdr.type = DPKG_FULL_FIELD; - key->extract.from_hdr.field = hash_fields[i].cls_field; + key->extract.from_hdr.field = dist_fields[i].cls_field; cls_cfg.num_extracts++; - - priv->rx_hash_fields |= hash_fields[i].rxnfc_field; } dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); @@ -2114,36 +2218,73 @@ static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); if (err) { dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); - goto err_prep_key; + goto free_key; } - memset(&dist_cfg, 0, sizeof(dist_cfg)); - /* Prepare for setting the rx dist */ - dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, - DPAA2_CLASSIFIER_DMA_SIZE, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) { + key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, key_iova)) { dev_err(dev, "DMA mapping failed\n"); err = -ENOMEM; - goto err_dma_map; + goto free_key; } - dist_cfg.dist_size = dpaa2_eth_queue_count(priv); - dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; + if (type == DPAA2_ETH_RX_DIST_HASH) { + if (dpaa2_eth_has_legacy_dist(priv)) + err = config_legacy_hash_key(priv, key_iova); + else + err = config_hash_key(priv, key_iova); + } else { + err = config_cls_key(priv, key_iova); + } - err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); - dma_unmap_single(dev, dist_cfg.key_cfg_iova, - DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); - if (err) - dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); + dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, + DMA_TO_DEVICE); + if (!err && type == DPAA2_ETH_RX_DIST_HASH) + priv->rx_hash_fields = rx_hash_fields; -err_dma_map: -err_prep_key: +free_key: kfree(dma_mem); return err; } +int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + if (!dpaa2_eth_hash_enabled(priv)) + return -EOPNOTSUPP; + + return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags); +} + +static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv) +{ + struct device *dev = priv->net_dev->dev.parent; + + /* Check if we actually support Rx flow classification */ + if (dpaa2_eth_has_legacy_dist(priv)) { + dev_dbg(dev, "Rx cls not supported by current MC version\n"); + return -EOPNOTSUPP; + } + + if (priv->dpni_attrs.options & DPNI_OPT_NO_FS || + !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) { + dev_dbg(dev, "Rx cls disabled in DPNI options\n"); + return -EOPNOTSUPP; + } + + if (!dpaa2_eth_hash_enabled(priv)) { + dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n"); + return -EOPNOTSUPP; + } + + priv->rx_cls_enabled = 1; + + return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0); +} + /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, * frame queues and channels */ @@ -2170,9 +2311,16 @@ static int bind_dpni(struct dpaa2_eth_priv *priv) * the default hash key */ err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT); - if (err) + if (err && err != -EOPNOTSUPP) dev_err(dev, "Failed to configure hashing\n"); + /* Configure the flow classification key; it includes all + * supported header fields and cannot be modified at runtime + */ + err = dpaa2_eth_set_cls(priv); + if (err && err != -EOPNOTSUPP) + dev_err(dev, "Failed to configure Rx classification key\n"); + /* Configure handling of error frames */ err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; err_cfg.set_frame_annotation = 1; @@ -2316,11 +2464,14 @@ static int netdev_init(struct net_device *net_dev) { struct device *dev = net_dev->dev.parent; struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + u32 options = priv->dpni_attrs.options; + u64 supported = 0, not_supported = 0; u8 bcast_addr[ETH_ALEN]; u8 num_queues; int err; net_dev->netdev_ops = &dpaa2_eth_ops; + net_dev->ethtool_ops = &dpaa2_ethtool_ops; err = set_mac_addr(priv); if (err) @@ -2356,12 +2507,23 @@ static int netdev_init(struct net_device *net_dev) return err; } - /* Our .ndo_init will be called herein */ - err = register_netdev(net_dev); - if (err < 0) { - dev_err(dev, "register_netdev() failed\n"); - return err; - } + /* Capabilities listing */ + supported |= IFF_LIVE_ADDR_CHANGE; + + if (options & DPNI_OPT_NO_MAC_FILTER) + not_supported |= IFF_UNICAST_FLT; + else + supported |= IFF_UNICAST_FLT; + + net_dev->priv_flags |= supported; + net_dev->priv_flags &= ~not_supported; + + /* Features */ + net_dev->features = NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_SG | NETIF_F_HIGHDMA | + NETIF_F_LLTX; + net_dev->hw_features = net_dev->features; return 0; } @@ -2561,28 +2723,36 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) if (err) goto err_alloc_rings; - net_dev->ethtool_ops = &dpaa2_ethtool_ops; - err = setup_irqs(dpni_dev); if (err) { netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); priv->poll_thread = kthread_run(poll_link_state, priv, "%s_poll_link", net_dev->name); if (IS_ERR(priv->poll_thread)) { - netdev_err(net_dev, "Error starting polling thread\n"); + dev_err(dev, "Error starting polling thread\n"); goto err_poll_thread; } priv->do_link_poll = true; } + err = register_netdev(net_dev); + if (err < 0) { + dev_err(dev, "register_netdev() failed\n"); + goto err_netdev_reg; + } + dev_info(dev, "Probed interface %s\n", net_dev->name); return 0; +err_netdev_reg: + if (priv->do_link_poll) + kthread_stop(priv->poll_thread); + else + fsl_mc_free_irqs(dpni_dev); err_poll_thread: free_rings(priv); err_alloc_rings: err_csum: - unregister_netdev(net_dev); err_netdev_init: free_percpu(priv->percpu_extras); err_alloc_percpu_extras: diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index d54cb0b99d08..452a8e9c4f0e 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -40,6 +40,11 @@ */ #define DPAA2_ETH_TAILDROP_THRESH (64 * 1024) +/* Maximum number of Tx confirmation frames to be processed + * in a single NAPI call + */ +#define DPAA2_ETH_TXCONF_PER_NAPI 256 + /* Buffer quota per queue. Must be large enough such that for minimum sized * frames taildrop kicks in before the bpool gets depleted, so we compute * how many 64B frames fit inside the taildrop threshold and add a margin @@ -290,13 +295,18 @@ struct dpaa2_eth_channel { struct dpaa2_eth_ch_stats stats; }; -struct dpaa2_eth_hash_fields { +struct dpaa2_eth_dist_fields { u64 rxnfc_field; enum net_prot cls_prot; int cls_field; int size; }; +struct dpaa2_eth_cls_rule { + struct ethtool_rx_flow_spec fs; + u8 in_use; +}; + /* Driver private data */ struct dpaa2_eth_priv { struct net_device *net_dev; @@ -340,6 +350,8 @@ struct dpaa2_eth_priv { /* enabled ethtool hashing bits */ u64 rx_hash_fields; + struct dpaa2_eth_cls_rule *cls_rules; + u8 rx_cls_enabled; }; #define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ @@ -367,6 +379,24 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv, return priv->dpni_ver_major - ver_major; } +/* Minimum firmware version that supports a more flexible API + * for configuring the Rx flow hash key + */ +#define DPNI_RX_DIST_KEY_VER_MAJOR 7 +#define DPNI_RX_DIST_KEY_VER_MINOR 5 + +#define dpaa2_eth_has_legacy_dist(priv) \ + (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \ + DPNI_RX_DIST_KEY_VER_MINOR) < 0) + +#define dpaa2_eth_fs_count(priv) \ + ((priv)->dpni_attrs.fs_entries) + +enum dpaa2_eth_rx_dist { + DPAA2_ETH_RX_DIST_HASH, + DPAA2_ETH_RX_DIST_CLS +}; + /* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around * the buffer also needs space for its shared info struct, and we need * to allocate enough to accommodate hardware alignment restrictions @@ -409,4 +439,8 @@ static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) return priv->dpni_attrs.num_queues; } +int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); +int dpaa2_eth_cls_key_size(void); +int dpaa2_eth_cls_fld_off(int prot, int field); + #endif /* __DPAA2_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c new file mode 100644 index 000000000000..26bd5a2bd8ed --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -0,0 +1,630 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2014-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + */ + +#include <linux/net_tstamp.h> + +#include "dpni.h" /* DPNI_LINK_OPT_* */ +#include "dpaa2-eth.h" + +/* To be kept in sync with DPNI statistics */ +static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { + "[hw] rx frames", + "[hw] rx bytes", + "[hw] rx mcast frames", + "[hw] rx mcast bytes", + "[hw] rx bcast frames", + "[hw] rx bcast bytes", + "[hw] tx frames", + "[hw] tx bytes", + "[hw] tx mcast frames", + "[hw] tx mcast bytes", + "[hw] tx bcast frames", + "[hw] tx bcast bytes", + "[hw] rx filtered frames", + "[hw] rx discarded frames", + "[hw] rx nobuffer discards", + "[hw] tx discarded frames", + "[hw] tx confirmed frames", +}; + +#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) + +static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { + /* per-cpu stats */ + "[drv] tx conf frames", + "[drv] tx conf bytes", + "[drv] tx sg frames", + "[drv] tx sg bytes", + "[drv] tx realloc frames", + "[drv] rx sg frames", + "[drv] rx sg bytes", + "[drv] enqueue portal busy", + /* Channel stats */ + "[drv] dequeue portal busy", + "[drv] channel pull errors", + "[drv] cdan", +}; + +#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras) + +static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *drvinfo) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor); + + strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), + sizeof(drvinfo->bus_info)); +} + +static int +dpaa2_eth_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *link_settings) +{ + struct dpni_link_state state = {0}; + int err = 0; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); + if (err) { + netdev_err(net_dev, "ERROR %d getting link state\n", err); + goto out; + } + + /* At the moment, we have no way of interrogating the DPMAC + * from the DPNI side - and for that matter there may exist + * no DPMAC at all. So for now we just don't report anything + * beyond the DPNI attributes. + */ + if (state.options & DPNI_LINK_OPT_AUTONEG) + link_settings->base.autoneg = AUTONEG_ENABLE; + if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX)) + link_settings->base.duplex = DUPLEX_FULL; + link_settings->base.speed = state.rate; + +out: + return err; +} + +#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7 +#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1 +static int +dpaa2_eth_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *link_settings) +{ + struct dpni_link_cfg cfg = {0}; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err = 0; + + /* If using an older MC version, the DPNI must be down + * in order to be able to change link settings. Taking steps to let + * the user know that. + */ + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR, + DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) { + if (netif_running(net_dev)) { + netdev_info(net_dev, "Interface must be brought down first.\n"); + return -EACCES; + } + } + + cfg.rate = link_settings->base.speed; + if (link_settings->base.autoneg == AUTONEG_ENABLE) + cfg.options |= DPNI_LINK_OPT_AUTONEG; + else + cfg.options &= ~DPNI_LINK_OPT_AUTONEG; + if (link_settings->base.duplex == DUPLEX_HALF) + cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX; + else + cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX; + + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); + if (err) + /* ethtool will be loud enough if we return an error; no point + * in putting our own error message on the console by default + */ + netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err); + + return err; +} + +static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { + strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { + strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ + return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; + default: + return -EOPNOTSUPP; + } +} + +/** Fill in hardware counters, as returned by MC. + */ +static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, + struct ethtool_stats *stats, + u64 *data) +{ + int i = 0; + int j, k, err; + int num_cnt; + union dpni_statistics dpni_stats; + u64 cdan = 0; + u64 portal_busy = 0, pull_err = 0; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpaa2_eth_drv_stats *extras; + struct dpaa2_eth_ch_stats *ch_stats; + + memset(data, 0, + sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); + + /* Print standard counters, from DPNI statistics */ + for (j = 0; j <= 2; j++) { + err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, + j, &dpni_stats); + if (err != 0) + netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j); + switch (j) { + case 0: + num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64); + break; + case 1: + num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64); + break; + case 2: + num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64); + break; + } + for (k = 0; k < num_cnt; k++) + *(data + i++) = dpni_stats.raw.counter[k]; + } + + /* Print per-cpu extra stats */ + for_each_online_cpu(k) { + extras = per_cpu_ptr(priv->percpu_extras, k); + for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++) + *((__u64 *)data + i + j) += *((__u64 *)extras + j); + } + i += j; + + for (j = 0; j < priv->num_channels; j++) { + ch_stats = &priv->channel[j]->stats; + cdan += ch_stats->cdan; + portal_busy += ch_stats->dequeue_portal_busy; + pull_err += ch_stats->pull_err; + } + + *(data + i++) = portal_busy; + *(data + i++) = pull_err; + *(data + i++) = cdan; +} + +static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask, + void *key, void *mask) +{ + int off; + + if (eth_mask->h_proto) { + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE); + *(__be16 *)(key + off) = eth_value->h_proto; + *(__be16 *)(mask + off) = eth_mask->h_proto; + } + + if (!is_zero_ether_addr(eth_mask->h_source)) { + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA); + ether_addr_copy(key + off, eth_value->h_source); + ether_addr_copy(mask + off, eth_mask->h_source); + } + + if (!is_zero_ether_addr(eth_mask->h_dest)) { + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA); + ether_addr_copy(key + off, eth_value->h_dest); + ether_addr_copy(mask + off, eth_mask->h_dest); + } + + return 0; +} + +static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value, + struct ethtool_usrip4_spec *uip_mask, + void *key, void *mask) +{ + int off; + u32 tmp_value, tmp_mask; + + if (uip_mask->tos || uip_mask->ip_ver) + return -EOPNOTSUPP; + + if (uip_mask->ip4src) { + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC); + *(__be32 *)(key + off) = uip_value->ip4src; + *(__be32 *)(mask + off) = uip_mask->ip4src; + } + + if (uip_mask->ip4dst) { + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST); + *(__be32 *)(key + off) = uip_value->ip4dst; + *(__be32 *)(mask + off) = uip_mask->ip4dst; + } + + if (uip_mask->proto) { + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO); + *(u8 *)(key + off) = uip_value->proto; + *(u8 *)(mask + off) = uip_mask->proto; + } + + if (uip_mask->l4_4_bytes) { + tmp_value = be32_to_cpu(uip_value->l4_4_bytes); + tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes); + + off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); + *(__be16 *)(key + off) = htons(tmp_value >> 16); + *(__be16 *)(mask + off) = htons(tmp_mask >> 16); + + off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST); + *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF); + *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF); + } + + /* Only apply the rule for IPv4 frames */ + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE); + *(__be16 *)(key + off) = htons(ETH_P_IP); + *(__be16 *)(mask + off) = htons(0xFFFF); + + return 0; +} + +static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value, + struct ethtool_tcpip4_spec *l4_mask, + void *key, void *mask, u8 l4_proto) +{ + int off; + + if (l4_mask->tos) + return -EOPNOTSUPP; + + if (l4_mask->ip4src) { + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC); + *(__be32 *)(key + off) = l4_value->ip4src; + *(__be32 *)(mask + off) = l4_mask->ip4src; + } + + if (l4_mask->ip4dst) { + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST); + *(__be32 *)(key + off) = l4_value->ip4dst; + *(__be32 *)(mask + off) = l4_mask->ip4dst; + } + + if (l4_mask->psrc) { + off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); + *(__be16 *)(key + off) = l4_value->psrc; + *(__be16 *)(mask + off) = l4_mask->psrc; + } + + if (l4_mask->pdst) { + off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST); + *(__be16 *)(key + off) = l4_value->pdst; + *(__be16 *)(mask + off) = l4_mask->pdst; + } + + /* Only apply the rule for IPv4 frames with the specified L4 proto */ + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE); + *(__be16 *)(key + off) = htons(ETH_P_IP); + *(__be16 *)(mask + off) = htons(0xFFFF); + + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO); + *(u8 *)(key + off) = l4_proto; + *(u8 *)(mask + off) = 0xFF; + + return 0; +} + +static int prep_ext_rule(struct ethtool_flow_ext *ext_value, + struct ethtool_flow_ext *ext_mask, + void *key, void *mask) +{ + int off; + + if (ext_mask->vlan_etype) + return -EOPNOTSUPP; + + if (ext_mask->vlan_tci) { + off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI); + *(__be16 *)(key + off) = ext_value->vlan_tci; + *(__be16 *)(mask + off) = ext_mask->vlan_tci; + } + + return 0; +} + +static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value, + struct ethtool_flow_ext *ext_mask, + void *key, void *mask) +{ + int off; + + if (!is_zero_ether_addr(ext_mask->h_dest)) { + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA); + ether_addr_copy(key + off, ext_value->h_dest); + ether_addr_copy(mask + off, ext_mask->h_dest); + } + + return 0; +} + +static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask) +{ + int err; + + switch (fs->flow_type & 0xFF) { + case ETHER_FLOW: + err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec, + key, mask); + break; + case IP_USER_FLOW: + err = prep_uip_rule(&fs->h_u.usr_ip4_spec, + &fs->m_u.usr_ip4_spec, key, mask); + break; + case TCP_V4_FLOW: + err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec, + key, mask, IPPROTO_TCP); + break; + case UDP_V4_FLOW: + err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec, + key, mask, IPPROTO_UDP); + break; + case SCTP_V4_FLOW: + err = prep_l4_rule(&fs->h_u.sctp_ip4_spec, + &fs->m_u.sctp_ip4_spec, key, mask, + IPPROTO_SCTP); + break; + default: + return -EOPNOTSUPP; + } + + if (err) + return err; + + if (fs->flow_type & FLOW_EXT) { + err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask); + if (err) + return err; + } + + if (fs->flow_type & FLOW_MAC_EXT) { + err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask); + if (err) + return err; + } + + return 0; +} + +static int do_cls_rule(struct net_device *net_dev, + struct ethtool_rx_flow_spec *fs, + bool add) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct device *dev = net_dev->dev.parent; + struct dpni_rule_cfg rule_cfg = { 0 }; + struct dpni_fs_action_cfg fs_act = { 0 }; + dma_addr_t key_iova; + void *key_buf; + int err; + + if (fs->ring_cookie != RX_CLS_FLOW_DISC && + fs->ring_cookie >= dpaa2_eth_queue_count(priv)) + return -EINVAL; + + rule_cfg.key_size = dpaa2_eth_cls_key_size(); + + /* allocate twice the key size, for the actual key and for mask */ + key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL); + if (!key_buf) + return -ENOMEM; + + /* Fill the key and mask memory areas */ + err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size); + if (err) + goto free_mem; + + key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, key_iova)) { + err = -ENOMEM; + goto free_mem; + } + + rule_cfg.key_iova = key_iova; + rule_cfg.mask_iova = key_iova + rule_cfg.key_size; + + if (add) { + if (fs->ring_cookie == RX_CLS_FLOW_DISC) + fs_act.options |= DPNI_FS_OPT_DISCARD; + else + fs_act.flow_id = fs->ring_cookie; + err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0, + fs->location, &rule_cfg, &fs_act); + } else { + err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0, + &rule_cfg); + } + + dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE); + +free_mem: + kfree(key_buf); + + return err; +} + +static int update_cls_rule(struct net_device *net_dev, + struct ethtool_rx_flow_spec *new_fs, + int location) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpaa2_eth_cls_rule *rule; + int err = -EINVAL; + + if (!priv->rx_cls_enabled) + return -EOPNOTSUPP; + + if (location >= dpaa2_eth_fs_count(priv)) + return -EINVAL; + + rule = &priv->cls_rules[location]; + + /* If a rule is present at the specified location, delete it. */ + if (rule->in_use) { + err = do_cls_rule(net_dev, &rule->fs, false); + if (err) + return err; + + rule->in_use = 0; + } + + /* If no new entry to add, return here */ + if (!new_fs) + return err; + + err = do_cls_rule(net_dev, new_fs, true); + if (err) + return err; + + rule->in_use = 1; + rule->fs = *new_fs; + + return 0; +} + +static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *rxnfc, u32 *rule_locs) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int max_rules = dpaa2_eth_fs_count(priv); + int i, j = 0; + + switch (rxnfc->cmd) { + case ETHTOOL_GRXFH: + /* we purposely ignore cmd->flow_type for now, because the + * classifier only supports a single set of fields for all + * protocols + */ + rxnfc->data = priv->rx_hash_fields; + break; + case ETHTOOL_GRXRINGS: + rxnfc->data = dpaa2_eth_queue_count(priv); + break; + case ETHTOOL_GRXCLSRLCNT: + rxnfc->rule_cnt = 0; + for (i = 0; i < max_rules; i++) + if (priv->cls_rules[i].in_use) + rxnfc->rule_cnt++; + rxnfc->data = max_rules; + break; + case ETHTOOL_GRXCLSRULE: + if (rxnfc->fs.location >= max_rules) + return -EINVAL; + if (!priv->cls_rules[rxnfc->fs.location].in_use) + return -EINVAL; + rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs; + break; + case ETHTOOL_GRXCLSRLALL: + for (i = 0; i < max_rules; i++) { + if (!priv->cls_rules[i].in_use) + continue; + if (j == rxnfc->rule_cnt) + return -EMSGSIZE; + rule_locs[j++] = i; + } + rxnfc->rule_cnt = j; + rxnfc->data = max_rules; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *rxnfc) +{ + int err = 0; + + switch (rxnfc->cmd) { + case ETHTOOL_SRXFH: + if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data) + return -EOPNOTSUPP; + err = dpaa2_eth_set_hash(net_dev, rxnfc->data); + break; + case ETHTOOL_SRXCLSRLINS: + err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location); + break; + case ETHTOOL_SRXCLSRLDEL: + err = update_cls_rule(net_dev, NULL, rxnfc->fs.location); + break; + default: + err = -EOPNOTSUPP; + } + + return err; +} + +int dpaa2_phc_index = -1; +EXPORT_SYMBOL(dpaa2_phc_index); + +static int dpaa2_eth_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->phc_index = dpaa2_phc_index; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + return 0; +} + +const struct ethtool_ops dpaa2_ethtool_ops = { + .get_drvinfo = dpaa2_eth_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = dpaa2_eth_get_link_ksettings, + .set_link_ksettings = dpaa2_eth_set_link_ksettings, + .get_sset_count = dpaa2_eth_get_sset_count, + .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, + .get_strings = dpaa2_eth_get_strings, + .get_rxnfc = dpaa2_eth_get_rxnfc, + .set_rxnfc = dpaa2_eth_set_rxnfc, + .get_ts_info = dpaa2_eth_get_ts_info, +}; diff --git a/drivers/staging/fsl-dpaa2/rtc/rtc.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c index 0d52cb85441f..84b942b1eccc 100644 --- a/drivers/staging/fsl-dpaa2/rtc/rtc.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -9,10 +9,10 @@ #include <linux/ptp_clock_kernel.h> #include <linux/fsl/mc.h> -#include "rtc.h" +#include "dpaa2-ptp.h" struct ptp_dpaa2_priv { - struct fsl_mc_device *rtc_mc_dev; + struct fsl_mc_device *ptp_mc_dev; struct ptp_clock *clock; struct ptp_clock_info caps; u32 freq_comp; @@ -23,7 +23,7 @@ static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb) { struct ptp_dpaa2_priv *ptp_dpaa2 = container_of(ptp, struct ptp_dpaa2_priv, caps); - struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev; + struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev; struct device *dev = &mc_dev->dev; u64 adj; u32 diff, tmr_add; @@ -46,14 +46,14 @@ static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb) mc_dev->mc_handle, tmr_add); if (err) dev_err(dev, "dprtc_set_freq_compensation err %d\n", err); - return 0; + return err; } static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct ptp_dpaa2_priv *ptp_dpaa2 = container_of(ptp, struct ptp_dpaa2_priv, caps); - struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev; + struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev; struct device *dev = &mc_dev->dev; s64 now; int err = 0; @@ -61,24 +61,22 @@ static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta) err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now); if (err) { dev_err(dev, "dprtc_get_time err %d\n", err); - return 0; + return err; } now += delta; err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now); - if (err) { + if (err) dev_err(dev, "dprtc_set_time err %d\n", err); - return 0; - } - return 0; + return err; } static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct ptp_dpaa2_priv *ptp_dpaa2 = container_of(ptp, struct ptp_dpaa2_priv, caps); - struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev; + struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev; struct device *dev = &mc_dev->dev; u64 ns; u32 remainder; @@ -87,12 +85,12 @@ static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns); if (err) { dev_err(dev, "dprtc_get_time err %d\n", err); - return 0; + return err; } ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); ts->tv_nsec = remainder; - return 0; + return err; } static int ptp_dpaa2_settime(struct ptp_clock_info *ptp, @@ -100,7 +98,7 @@ static int ptp_dpaa2_settime(struct ptp_clock_info *ptp, { struct ptp_dpaa2_priv *ptp_dpaa2 = container_of(ptp, struct ptp_dpaa2_priv, caps); - struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev; + struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev; struct device *dev = &mc_dev->dev; u64 ns; int err = 0; @@ -111,10 +109,10 @@ static int ptp_dpaa2_settime(struct ptp_clock_info *ptp, err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns); if (err) dev_err(dev, "dprtc_set_time err %d\n", err); - return 0; + return err; } -static struct ptp_clock_info ptp_dpaa2_caps = { +static const struct ptp_clock_info ptp_dpaa2_caps = { .owner = THIS_MODULE, .name = "DPAA2 PTP Clock", .max_adj = 512000, @@ -129,14 +127,14 @@ static struct ptp_clock_info ptp_dpaa2_caps = { .settime64 = ptp_dpaa2_settime, }; -static int rtc_probe(struct fsl_mc_device *mc_dev) +static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) { struct device *dev = &mc_dev->dev; struct ptp_dpaa2_priv *ptp_dpaa2; u32 tmr_add = 0; int err; - ptp_dpaa2 = kzalloc(sizeof(*ptp_dpaa2), GFP_KERNEL); + ptp_dpaa2 = devm_kzalloc(dev, sizeof(*ptp_dpaa2), GFP_KERNEL); if (!ptp_dpaa2) return -ENOMEM; @@ -153,7 +151,7 @@ static int rtc_probe(struct fsl_mc_device *mc_dev) goto err_free_mcp; } - ptp_dpaa2->rtc_mc_dev = mc_dev; + ptp_dpaa2->ptp_mc_dev = mc_dev; err = dprtc_get_freq_compensation(mc_dev->mc_io, 0, mc_dev->mc_handle, &tmr_add); @@ -182,12 +180,10 @@ err_close: err_free_mcp: fsl_mc_portal_free(mc_dev->mc_io); err_exit: - kfree(ptp_dpaa2); - dev_set_drvdata(dev, NULL); return err; } -static int rtc_remove(struct fsl_mc_device *mc_dev) +static int dpaa2_ptp_remove(struct fsl_mc_device *mc_dev) { struct ptp_dpaa2_priv *ptp_dpaa2; struct device *dev = &mc_dev->dev; @@ -198,32 +194,29 @@ static int rtc_remove(struct fsl_mc_device *mc_dev) dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); fsl_mc_portal_free(mc_dev->mc_io); - kfree(ptp_dpaa2); - dev_set_drvdata(dev, NULL); - return 0; } -static const struct fsl_mc_device_id rtc_match_id_table[] = { +static const struct fsl_mc_device_id dpaa2_ptp_match_id_table[] = { { .vendor = FSL_MC_VENDOR_FREESCALE, .obj_type = "dprtc", }, {} }; -MODULE_DEVICE_TABLE(fslmc, rtc_match_id_table); +MODULE_DEVICE_TABLE(fslmc, dpaa2_ptp_match_id_table); -static struct fsl_mc_driver rtc_drv = { +static struct fsl_mc_driver dpaa2_ptp_drv = { .driver = { .name = KBUILD_MODNAME, .owner = THIS_MODULE, }, - .probe = rtc_probe, - .remove = rtc_remove, - .match_id_table = rtc_match_id_table, + .probe = dpaa2_ptp_probe, + .remove = dpaa2_ptp_remove, + .match_id_table = dpaa2_ptp_match_id_table, }; -module_fsl_mc_driver(rtc_drv); +module_fsl_mc_driver(dpaa2_ptp_drv); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("DPAA2 PTP Clock Driver"); diff --git a/drivers/staging/fsl-dpaa2/rtc/rtc.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h index ff2e177395d4..ff2e177395d4 100644 --- a/drivers/staging/fsl-dpaa2/rtc/rtc.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h b/drivers/net/ethernet/freescale/dpaa2/dpkg.h index 6de613b13e4d..6de613b13e4d 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpkg.h diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h index 83698abce8b4..7b44d7d9b19a 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h @@ -82,6 +82,9 @@ #define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B) #define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C) +#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273) +#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274) + /* Macros for accessing command fields smaller than 1byte */ #define DPNI_MASK(field) \ GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \ @@ -515,4 +518,52 @@ struct dpni_rsp_get_api_version { __le16 minor; }; +#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0 +#define DPNI_RX_FS_DIST_ENABLE_SIZE 1 +struct dpni_cmd_set_rx_fs_dist { + __le16 dist_size; + u8 enable; + u8 tc; + __le16 miss_flow_id; + __le16 pad; + __le64 key_cfg_iova; +}; + +#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0 +#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1 +struct dpni_cmd_set_rx_hash_dist { + __le16 dist_size; + u8 enable; + u8 tc; + __le32 pad; + __le64 key_cfg_iova; +}; + +struct dpni_cmd_add_fs_entry { + /* cmd word 0 */ + __le16 options; + u8 tc_id; + u8 key_size; + __le16 index; + __le16 flow_id; + /* cmd word 1 */ + __le64 key_iova; + /* cmd word 2 */ + __le64 mask_iova; + /* cmd word 3 */ + __le64 flc; +}; + +struct dpni_cmd_remove_fs_entry { + /* cmd word 0 */ + __le16 pad0; + u8 tc_id; + u8 key_size; + __le32 pad1; + /* cmd word 1 */ + __le64 key_iova; + /* cmd word 2 */ + __le64 mask_iova; +}; + #endif /* _FSL_DPNI_CMD_H */ diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c index d6ac26797cec..220dfc806a24 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c @@ -1598,3 +1598,155 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io, return 0; } + +/** + * dpni_set_rx_fs_dist() - Set Rx flow steering distribution + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Distribution configuration + * + * If the FS is already enabled with a previous call the classification + * key will be changed but all the table rules are kept. If the + * existing rules do not match the key the results will not be + * predictable. It is the user responsibility to keep key integrity. + * If cfg.enable is set to 1 the command will create a flow steering table + * and will classify packets according to this table. The packets that + * miss all the table rules will be classified according to settings + * made in dpni_set_rx_hash_dist() + * If cfg.enable is set to 0 the command will clear flow steering table. + * The packets will be classified according to settings made in + * dpni_set_rx_hash_dist() + */ +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_rx_dist_cfg *cfg) +{ + struct dpni_cmd_set_rx_fs_dist *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params; + cmd_params->dist_size = cpu_to_le16(cfg->dist_size); + dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable); + cmd_params->tc = cfg->tc; + cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id); + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_rx_hash_dist() - Set Rx hash distribution + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Distribution configuration + * If cfg.enable is set to 1 the packets will be classified using a hash + * function based on the key received in cfg.key_cfg_iova parameter. + * If cfg.enable is set to 0 the packets will be sent to the default queue + */ +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_rx_dist_cfg *cfg) +{ + struct dpni_cmd_set_rx_hash_dist *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params; + cmd_params->dist_size = cpu_to_le16(cfg->dist_size); + dpni_set_field(cmd_params->enable, RX_HASH_DIST_ENABLE, cfg->enable); + cmd_params->tc = cfg->tc; + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class + * (to select a flow ID) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @index: Location in the FS table where to insert the entry. + * Only relevant if MASKING is enabled for FS + * classification on this DPNI, it is ignored for exact match. + * @cfg: Flow steering rule to add + * @action: Action to be taken as result of a classification hit + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_fs_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + u16 index, + const struct dpni_rule_cfg *cfg, + const struct dpni_fs_action_cfg *action) +{ + struct dpni_cmd_add_fs_entry *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params; + cmd_params->tc_id = tc_id; + cmd_params->key_size = cfg->key_size; + cmd_params->index = cpu_to_le16(index); + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); + cmd_params->options = cpu_to_le16(action->options); + cmd_params->flow_id = cpu_to_le16(action->flow_id); + cmd_params->flc = cpu_to_le64(action->flc); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific + * traffic class + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @cfg: Flow steering rule to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + const struct dpni_rule_cfg *cfg) +{ + struct dpni_cmd_remove_fs_entry *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params; + cmd_params->tc_id = tc_id; + cmd_params->key_size = cfg->key_size; + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h index b378a00c7c53..a521242e2353 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/dpni.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h @@ -629,6 +629,45 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, const struct dpni_rx_tc_dist_cfg *cfg); /** + * When used for fs_miss_flow_id in function dpni_set_rx_dist, + * will signal to dpni to drop all unclassified frames + */ +#define DPNI_FS_MISS_DROP ((uint16_t)-1) + +/** + * struct dpni_rx_dist_cfg - Rx distribution configuration + * @dist_size: distribution size + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with + * the extractions to be used for the distribution key by calling + * dpni_prepare_key_cfg(); relevant only when enable!=0 otherwise + * it can be '0' + * @enable: enable/disable the distribution. + * @tc: TC id for which distribution is set + * @fs_miss_flow_id: when packet misses all rules from flow steering table and + * hash is disabled it will be put into this queue id; use + * DPNI_FS_MISS_DROP to drop frames. The value of this field is + * used only when flow steering distribution is enabled and hash + * distribution is disabled + */ +struct dpni_rx_dist_cfg { + u16 dist_size; + u64 key_cfg_iova; + u8 enable; + u8 tc; + u16 fs_miss_flow_id; +}; + +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_rx_dist_cfg *cfg); + +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_rx_dist_cfg *cfg); + +/** * enum dpni_dest - DPNI destination types * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and * does not generate FQDAN notifications; user is expected to @@ -816,6 +855,64 @@ struct dpni_rule_cfg { u8 key_size; }; +/** + * Discard matching traffic. If set, this takes precedence over any other + * configuration and matching traffic is always discarded. + */ + #define DPNI_FS_OPT_DISCARD 0x1 + +/** + * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to + * override the FLC value set per queue. + * For more details check the Frame Descriptor section in the hardware + * documentation. + */ +#define DPNI_FS_OPT_SET_FLC 0x2 + +/** + * Indicates whether the 6 lowest significant bits of FLC are used for stash + * control. If set, the 6 least significant bits in value are interpreted as + * follows: + * - bits 0-1: indicates the number of 64 byte units of context that are + * stashed. FLC value is interpreted as a memory address in this case, + * excluding the 6 LS bits. + * - bits 2-3: indicates the number of 64 byte units of frame annotation + * to be stashed. Annotation is placed at FD[ADDR]. + * - bits 4-5: indicates the number of 64 byte units of frame data to be + * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET]. + * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified. + */ +#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4 + +/** + * struct dpni_fs_action_cfg - Action configuration for table look-up + * @flc: FLC value for traffic matching this rule. Please check the + * Frame Descriptor section in the hardware documentation for + * more information. + * @flow_id: Identifies the Rx queue used for matching traffic. Supported + * values are in range 0 to num_queue-1. + * @options: Any combination of DPNI_FS_OPT_ values. + */ +struct dpni_fs_action_cfg { + u64 flc; + u16 flow_id; + u16 options; +}; + +int dpni_add_fs_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + u16 index, + const struct dpni_rule_cfg *cfg, + const struct dpni_fs_action_cfg *action); + +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + const struct dpni_rule_cfg *cfg); + int dpni_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 *major_ver, diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h new file mode 100644 index 000000000000..9af4ac71f347 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2018 NXP + */ + +#ifndef _FSL_DPRTC_CMD_H +#define _FSL_DPRTC_CMD_H + +/* Command versioning */ +#define DPRTC_CMD_BASE_VERSION 1 +#define DPRTC_CMD_ID_OFFSET 4 + +#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION) + +/* Command IDs */ +#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800) +#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810) + +#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1) +#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2) +#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3) +#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4) + +#pragma pack(push, 1) +struct dprtc_cmd_open { + __le32 dprtc_id; +}; + +struct dprtc_get_freq_compensation { + __le32 freq_compensation; +}; + +struct dprtc_time { + __le64 time; +}; + +#pragma pack(pop) + +#endif /* _FSL_DPRTC_CMD_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.c b/drivers/net/ethernet/freescale/dpaa2/dprtc.c new file mode 100644 index 000000000000..c13e09bc7b9d --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2018 NXP + */ + +#include <linux/fsl/mc.h> + +#include "dprtc.h" +#include "dprtc-cmd.h" + +/** + * dprtc_open() - Open a control session for the specified object. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dprtc_id: DPRTC unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dprtc_create function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dprtc_id, + u16 *token) +{ + struct dprtc_cmd_open *cmd_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dprtc_cmd_open *)cmd.params; + cmd_params->dprtc_id = cpu_to_le32(dprtc_id); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dprtc_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags, + token); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_set_freq_compensation() - Sets a new frequency compensation value. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @freq_compensation: The new frequency compensation value to set. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u32 freq_compensation) +{ + struct dprtc_get_freq_compensation *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION, + cmd_flags, + token); + cmd_params = (struct dprtc_get_freq_compensation *)cmd.params; + cmd_params->freq_compensation = cpu_to_le32(freq_compensation); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_get_freq_compensation() - Retrieves the frequency compensation value + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @freq_compensation: Frequency compensation value + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u32 *freq_compensation) +{ + struct dprtc_get_freq_compensation *rsp_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION, + cmd_flags, + token); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dprtc_get_freq_compensation *)cmd.params; + *freq_compensation = le32_to_cpu(rsp_params->freq_compensation); + + return 0; +} + +/** + * dprtc_get_time() - Returns the current RTC time. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @time: Current RTC time. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_time(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + uint64_t *time) +{ + struct dprtc_time *rsp_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME, + cmd_flags, + token); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dprtc_time *)cmd.params; + *time = le64_to_cpu(rsp_params->time); + + return 0; +} + +/** + * dprtc_set_time() - Updates current RTC time. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @time: New RTC time. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_time(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + uint64_t time) +{ + struct dprtc_time *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME, + cmd_flags, + token); + cmd_params = (struct dprtc_time *)cmd.params; + cmd_params->time = cpu_to_le64(time); + + return mc_send_command(mc_io, &cmd); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h new file mode 100644 index 000000000000..fe19618d6cdf --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2018 NXP + */ + +#ifndef __FSL_DPRTC_H +#define __FSL_DPRTC_H + +/* Data Path Real Time Counter API + * Contains initialization APIs and runtime control APIs for RTC + */ + +struct fsl_mc_io; + +int dprtc_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dprtc_id, + u16 *token); + +int dprtc_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u32 freq_compensation); + +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u32 *freq_compensation); + +int dprtc_get_time(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + uint64_t *time); + +int dprtc_set_time(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + uint64_t time); + +#endif /* __FSL_DPRTC_H */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index bf9b9fd6d2a0..a17cc973d9a3 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1946,16 +1946,15 @@ static int fec_enet_mii_probe(struct net_device *ndev) /* mask with MAC supported features */ if (fep->quirks & FEC_QUIRK_HAS_GBIT) { - phy_dev->supported &= PHY_GBIT_FEATURES; - phy_dev->supported &= ~SUPPORTED_1000baseT_Half; + phy_set_max_speed(phy_dev, 1000); + phy_remove_link_mode(phy_dev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); #if !defined(CONFIG_M5272) - phy_dev->supported |= SUPPORTED_Pause; + phy_support_sym_pause(phy_dev); #endif } else - phy_dev->supported &= PHY_BASIC_FEATURES; - - phy_dev->advertising = phy_dev->supported; + phy_set_max_speed(phy_dev, 100); fep->link = 0; fep->full_duplex = 0; @@ -2055,8 +2054,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) node = of_get_child_by_name(pdev->dev.of_node, "mdio"); err = of_mdiobus_register(fep->mii_bus, node); - if (node) - of_node_put(node); + of_node_put(node); if (err) goto err_out_free_mdiobus; @@ -2230,13 +2228,8 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; - if (pause->rx_pause || pause->autoneg) { - ndev->phydev->supported |= ADVERTISED_Pause; - ndev->phydev->advertising |= ADVERTISED_Pause; - } else { - ndev->phydev->supported &= ~ADVERTISED_Pause; - ndev->phydev->advertising &= ~ADVERTISED_Pause; - } + phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause, + pause->autoneg); if (pause->autoneg) { if (netif_running(ndev)) diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 6d7269d87a85..b90bab72efdb 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -305,7 +305,8 @@ static int mpc52xx_fec_close(struct net_device *dev) * invariant will hold if you make sure that the netif_*_queue() * calls are done at the proper times. */ -static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); struct bcom_fec_bd *bd; diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index a847b9c3b31a..d79e4e009d63 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -393,11 +393,7 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, */ /* get local capabilities */ - lcl_adv = 0; - if (phy_dev->advertising & ADVERTISED_Pause) - lcl_adv |= ADVERTISE_PAUSE_CAP; - if (phy_dev->advertising & ADVERTISED_Asym_Pause) - lcl_adv |= ADVERTISE_PAUSE_ASYM; + lcl_adv = ethtool_adv_to_lcl_adv_t(phy_dev->advertising); /* get link partner capabilities */ rmt_adv = 0; diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 2c2976a2dda6..7c548ed535da 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -481,7 +481,8 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, } #endif -static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); cbd_t __iomem *bdp; diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index ac2c3f6a12bc..82722d05fedb 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -446,8 +446,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) goto error; } - snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name, - (unsigned long long)res.start); + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%pOFn@%llx", np, + (unsigned long long)res.start); priv->map = of_iomap(np, 0); if (!priv->map) { diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index f27f9bae1a4a..3c8da1a18ba0 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -102,8 +102,6 @@ #include <linux/phy_fixed.h> #include <linux/of.h> #include <linux/of_net.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> #include "gianfar.h" @@ -112,7 +110,7 @@ const char gfar_driver_version[] = "2.0"; static int gfar_enet_open(struct net_device *dev); -static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); static void gfar_reset_task(struct work_struct *work); static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); @@ -1814,8 +1812,8 @@ static int init_phy(struct net_device *dev) phydev->supported &= (GFAR_SUPPORTED | gigabit_support); phydev->advertising = phydev->supported; - /* Add support for flow control, but don't advertise it by default */ - phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); + /* Add support for flow control */ + phy_support_asym_pause(phydev); /* disable EEE autoneg, EEE not supported by eTSEC */ memset(&edata, 0, sizeof(struct ethtool_eee)); @@ -2334,7 +2332,7 @@ static inline bool gfar_csum_errata_76(struct gfar_private *priv, /* This is called by the kernel when a frame is ready for transmission. * It is pointed to by the dev->hard_start_xmit function pointer */ -static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct gfar_priv_tx_q *tx_queue = NULL; @@ -3658,12 +3656,7 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) if (phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - lcl_adv = 0; - if (phydev->advertising & ADVERTISED_Pause) - lcl_adv |= ADVERTISE_PAUSE_CAP; - if (phydev->advertising & ADVERTISED_Asym_Pause) - lcl_adv |= ADVERTISE_PAUSE_ASYM; - + lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising); flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (flowctrl & FLOW_CTRL_TX) val |= MACCFG1_TX_FLOW; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 395a5266ea30..0d76e15cd6dd 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -230,7 +230,7 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, /* Make sure we return a number greater than 0 * if usecs > 0 */ - return (usecs * 1000 + count - 1) / count; + return DIV_ROUND_UP(usecs * 1000, count); } /* Convert ethernet clock ticks to microseconds */ @@ -503,65 +503,44 @@ static int gfar_spauseparam(struct net_device *dev, struct gfar_private *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 oldadv, newadv; if (!phydev) return -ENODEV; - if (!(phydev->supported & SUPPORTED_Pause) || - (!(phydev->supported & SUPPORTED_Asym_Pause) && - (epause->rx_pause != epause->tx_pause))) + if (!phy_validate_pause(phydev, epause)) return -EINVAL; priv->rx_pause_en = priv->tx_pause_en = 0; + phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); if (epause->rx_pause) { priv->rx_pause_en = 1; if (epause->tx_pause) { priv->tx_pause_en = 1; - /* FLOW_CTRL_RX & TX */ - newadv = ADVERTISED_Pause; - } else /* FLOW_CTLR_RX */ - newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; + } } else if (epause->tx_pause) { priv->tx_pause_en = 1; - /* FLOW_CTLR_TX */ - newadv = ADVERTISED_Asym_Pause; - } else - newadv = 0; + } if (epause->autoneg) priv->pause_aneg_en = 1; else priv->pause_aneg_en = 0; - oldadv = phydev->advertising & - (ADVERTISED_Pause | ADVERTISED_Asym_Pause); - if (oldadv != newadv) { - phydev->advertising &= - ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - phydev->advertising |= newadv; - if (phydev->autoneg) - /* inform link partner of our - * new flow ctrl settings - */ - return phy_start_aneg(phydev); - - if (!epause->autoneg) { - u32 tempval; - tempval = gfar_read(®s->maccfg1); - tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); - - priv->tx_actual_en = 0; - if (priv->tx_pause_en) { - priv->tx_actual_en = 1; - tempval |= MACCFG1_TX_FLOW; - } + if (!epause->autoneg) { + u32 tempval = gfar_read(®s->maccfg1); - if (priv->rx_pause_en) - tempval |= MACCFG1_RX_FLOW; - gfar_write(®s->maccfg1, tempval); + tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + + priv->tx_actual_en = 0; + if (priv->tx_pause_en) { + priv->tx_actual_en = 1; + tempval |= MACCFG1_TX_FLOW; } + + if (priv->rx_pause_en) + tempval |= MACCFG1_RX_FLOW; + gfar_write(®s->maccfg1, tempval); } return 0; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 22a817da861e..32e02700feaa 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -1742,12 +1742,7 @@ static int init_phy(struct net_device *dev) if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) uec_configure_serdes(dev); - phydev->supported &= (SUPPORTED_MII | - SUPPORTED_Autoneg | - ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full); + phy_set_max_speed(phydev, SPEED_100); if (priv->max_speed == SPEED_1000) phydev->supported |= ADVERTISED_1000baseT_Full; @@ -3083,7 +3078,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) /* This is called by the kernel when a frame is ready for transmission. */ /* It is pointed to by the dev->hard_start_xmit function pointer */ -static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ucc_geth_private *ugeth = netdev_priv(dev); #ifdef CONFIG_UGETH_TX_ON_DEMAND diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 14374a856d30..be268dcde8fa 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -422,7 +422,8 @@ static void hip04_start_tx_timer(struct hip04_priv *priv) ns, HRTIMER_MODE_REL); } -static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t +hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct hip04_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index c5727003af8c..471805ea363b 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -736,7 +736,7 @@ static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv, return 0; } -static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct hix5hd2_priv *priv = netdev_priv(dev); struct hix5hd2_desc *desc; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 09e4061d1fa6..aaf72c055711 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -319,7 +319,7 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en) hns_gmac_set_uc_match(mac_drv, en); } -int hns_gmac_wait_fifo_clean(void *mac_drv) +static int hns_gmac_wait_fifo_clean(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; int wait_cnt; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 6ed6f142427e..3613e400e816 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -837,8 +837,8 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) */ put_device(&mac_cb->phy_dev->mdio.dev); - dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", - mac_cb->mac_id, np->name); + dev_dbg(mac_cb->dev, "mac%d phy_node: %pOFn\n", + mac_cb->mac_id, np); } of_node_put(np); @@ -855,8 +855,8 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) * if the phy_dev is found */ put_device(&mac_cb->phy_dev->mdio.dev); - dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", - mac_cb->mac_id, np->name); + dev_dbg(mac_cb->dev, "mac%d phy_node: %pOFn\n", + mac_cb->mac_id, np); } of_node_put(np); diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index be9dc08ccf67..038326cfda93 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -46,9 +46,6 @@ enum hclge_mbx_mac_vlan_subcode { HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */ - HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */ - HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */ - HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */ }; /* below are per-VF vlan cfg subcodes */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index fff5be8078ac..781e5dee3c70 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -29,8 +29,8 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, return false; } -static void hnae3_set_client_init_flag(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev, int inited) +void hnae3_set_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, int inited) { switch (client->type) { case HNAE3_CLIENT_KNIC: @@ -46,6 +46,7 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client, break; } } +EXPORT_SYMBOL(hnae3_set_client_init_flag); static int hnae3_get_client_init_flag(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) @@ -86,14 +87,11 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, /* now, (un-)instantiate client by calling lower layer */ if (is_reg) { ret = ae_dev->ops->init_client_instance(client, ae_dev); - if (ret) { + if (ret) dev_err(&ae_dev->pdev->dev, "fail to instantiate client, ret = %d\n", ret); - return ret; - } - hnae3_set_client_init_flag(client, ae_dev, 1); - return 0; + return ret; } if (hnae3_get_client_init_flag(client, ae_dev)) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 67befff0bfc5..c3bd2a10bc7d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -51,6 +51,7 @@ #define HNAE3_KNIC_CLIENT_INITED_B 0x3 #define HNAE3_UNIC_CLIENT_INITED_B 0x4 #define HNAE3_ROCE_CLIENT_INITED_B 0x5 +#define HNAE3_DEV_SUPPORT_FD_B 0x6 #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) @@ -61,6 +62,9 @@ #define hnae3_dev_dcb_supported(hdev) \ hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) +#define hnae3_dev_fd_supported(hdev) \ + hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B) + #define ring_ptr_move_fw(ring, p) \ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) #define ring_ptr_move_bw(ring, p) \ @@ -84,10 +88,11 @@ struct hnae3_queue { /*hnae3 loop mode*/ enum hnae3_loop { - HNAE3_MAC_INTER_LOOP_MAC, - HNAE3_MAC_INTER_LOOP_SERDES, - HNAE3_MAC_INTER_LOOP_PHY, - HNAE3_MAC_LOOP_NONE, + HNAE3_LOOP_APP, + HNAE3_LOOP_SERIAL_SERDES, + HNAE3_LOOP_PARALLEL_SERDES, + HNAE3_LOOP_PHY, + HNAE3_LOOP_NONE, }; enum hnae3_client_type { @@ -107,6 +112,7 @@ enum hnae3_media_type { HNAE3_MEDIA_TYPE_FIBER, HNAE3_MEDIA_TYPE_COPPER, HNAE3_MEDIA_TYPE_BACKPLANE, + HNAE3_MEDIA_TYPE_NONE, }; enum hnae3_reset_notify_type { @@ -173,6 +179,7 @@ struct hnae3_ae_dev { struct list_head node; u32 flag; enum hnae3_dev_type dev_type; + enum hnae3_reset_type reset_type; void *priv; }; @@ -337,6 +344,8 @@ struct hnae3_ae_ops { void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p); int (*set_mac_addr)(struct hnae3_handle *handle, void *p, bool is_first); + int (*do_ioctl)(struct hnae3_handle *handle, + struct ifreq *ifr, int cmd); int (*add_uc_addr)(struct hnae3_handle *handle, const unsigned char *addr); int (*rm_uc_addr)(struct hnae3_handle *handle, @@ -346,8 +355,6 @@ struct hnae3_ae_ops { const unsigned char *addr); int (*rm_mc_addr)(struct hnae3_handle *handle, const unsigned char *addr); - int (*update_mta_status)(struct hnae3_handle *handle); - void (*set_tso_stats)(struct hnae3_handle *handle, int enable); void (*update_stats)(struct hnae3_handle *handle, struct net_device_stats *net_stats); @@ -399,7 +406,7 @@ struct hnae3_ae_ops { void (*get_channels)(struct hnae3_handle *handle, struct ethtool_channels *ch); void (*get_tqps_and_rss_info)(struct hnae3_handle *h, - u16 *free_tqps, u16 *max_rss_size); + u16 *alloc_tqps, u16 *max_rss_size); int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num); void (*get_flowctrl_adv)(struct hnae3_handle *handle, u32 *flowctrl_adv); @@ -408,7 +415,20 @@ struct hnae3_ae_ops { void (*get_link_mode)(struct hnae3_handle *handle, unsigned long *supported, unsigned long *advertising); - void (*get_port_type)(struct hnae3_handle *handle, u8 *port_type); + int (*add_fd_entry)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*del_fd_entry)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + void (*del_all_fd_entries)(struct hnae3_handle *handle, + bool clear_list); + int (*get_fd_rule_cnt)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*get_fd_rule_info)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*get_fd_all_rules)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd, u32 *rule_locs); + int (*restore_fd_rules)(struct hnae3_handle *handle); + void (*enable_fd)(struct hnae3_handle *handle, bool enable); }; struct hnae3_dcb_ops { @@ -459,6 +479,7 @@ struct hnae3_knic_private_info { const struct hnae3_dcb_ops *dcb_ops; u16 int_rl_setting; + enum pkt_hash_types rss_type; }; struct hnae3_roce_private_info { @@ -476,10 +497,20 @@ struct hnae3_unic_private_info { struct hnae3_queue **tqp; /* array base of all TQPs of this instance */ }; -#define HNAE3_SUPPORT_MAC_LOOPBACK BIT(0) +#define HNAE3_SUPPORT_APP_LOOPBACK BIT(0) #define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1) -#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2) +#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2) #define HNAE3_SUPPORT_VF BIT(3) +#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4) + +#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */ +#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */ +#define HNAE3_BPE BIT(2) /* broadcast promisc enable */ +#define HNAE3_OVERFLOW_UPE BIT(3) /* unicast mac vlan overflow */ +#define HNAE3_OVERFLOW_MPE BIT(4) /* multicast mac vlan overflow */ +#define HNAE3_VLAN_FLTR BIT(5) /* enable vlan filter */ +#define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE) +#define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE) struct hnae3_handle { struct hnae3_client *client; @@ -499,6 +530,8 @@ struct hnae3_handle { }; u32 numa_node_mask; /* for multi-chip support */ + + u8 netdev_flags; }; #define hnae3_set_field(origin, mask, shift, val) \ @@ -521,4 +554,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); void hnae3_unregister_client(struct hnae3_client *client); int hnae3_register_client(struct hnae3_client *client); + +void hnae3_set_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, int inited); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 955c4ab18b03..ce93fcce2732 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -21,6 +21,7 @@ static void hns3_clear_all_ring(struct hnae3_handle *h); static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); +static void hns3_remove_hw_addr(struct net_device *netdev); static const char hns3_driver_name[] = "hns3"; const char hns3_driver_version[] = VERMAGIC_STRING; @@ -66,6 +67,23 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector) return IRQ_HANDLED; } +/* This callback function is used to set affinity changes to the irq affinity + * masks when the irq_set_affinity_notifier function is used. + */ +static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct hns3_enet_tqp_vector *tqp_vectors = + container_of(notify, struct hns3_enet_tqp_vector, + affinity_notify); + + tqp_vectors->affinity_mask = *mask; +} + +static void hns3_nic_irq_affinity_release(struct kref *ref) +{ +} + static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) { struct hns3_enet_tqp_vector *tqp_vectors; @@ -77,6 +95,10 @@ static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) continue; + /* clear the affinity notifier and affinity mask */ + irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL); + irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); + /* release the irq resource */ free_irq(tqp_vectors->vector_irq, tqp_vectors); tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; @@ -127,6 +149,15 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv) return ret; } + tqp_vectors->affinity_notify.notify = + hns3_nic_irq_affinity_notify; + tqp_vectors->affinity_notify.release = + hns3_nic_irq_affinity_release; + irq_set_affinity_notifier(tqp_vectors->vector_irq, + &tqp_vectors->affinity_notify); + irq_set_affinity_hint(tqp_vectors->vector_irq, + &tqp_vectors->affinity_mask); + tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; } @@ -195,8 +226,6 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, struct hns3_nic_priv *priv) { - struct hnae3_handle *h = priv->ae_handle; - /* initialize the configuration for interrupt coalescing. * 1. GL (Interrupt Gap Limiter) * 2. RL (Interrupt Rate Limiter) @@ -209,9 +238,6 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; - /* Default: disable RL */ - h->kinfo.int_rl_setting = 0; - tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; @@ -277,12 +303,12 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) static u16 hns3_get_max_available_channels(struct hnae3_handle *h) { - u16 free_tqps, max_rss_size, max_tqps; + u16 alloc_tqps, max_rss_size, rss_size; - h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); - max_tqps = h->kinfo.num_tc * max_rss_size; + h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); + rss_size = alloc_tqps / h->kinfo.num_tc; - return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); + return min_t(u16, rss_size, max_rss_size); } static int hns3_nic_net_up(struct net_device *netdev) @@ -433,26 +459,81 @@ static int hns3_nic_mc_unsync(struct net_device *netdev, return 0; } +static u8 hns3_get_netdev_flags(struct net_device *netdev) +{ + u8 flags = 0; + + if (netdev->flags & IFF_PROMISC) { + flags = HNAE3_USER_UPE | HNAE3_USER_MPE; + } else { + flags |= HNAE3_VLAN_FLTR; + if (netdev->flags & IFF_ALLMULTI) + flags |= HNAE3_USER_MPE; + } + + return flags; +} + static void hns3_nic_set_rx_mode(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); + u8 new_flags; + int ret; - if (h->ae_algo->ops->set_promisc_mode) { - if (netdev->flags & IFF_PROMISC) - h->ae_algo->ops->set_promisc_mode(h, true, true); - else if (netdev->flags & IFF_ALLMULTI) - h->ae_algo->ops->set_promisc_mode(h, false, true); - else - h->ae_algo->ops->set_promisc_mode(h, false, false); - } - if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) + new_flags = hns3_get_netdev_flags(netdev); + + ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); + if (ret) { netdev_err(netdev, "sync uc address fail\n"); + if (ret == -ENOSPC) + new_flags |= HNAE3_OVERFLOW_UPE; + } + if (netdev->flags & IFF_MULTICAST) { - if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) + ret = __dev_mc_sync(netdev, hns3_nic_mc_sync, + hns3_nic_mc_unsync); + if (ret) { netdev_err(netdev, "sync mc address fail\n"); + if (ret == -ENOSPC) + new_flags |= HNAE3_OVERFLOW_MPE; + } + } - if (h->ae_algo->ops->update_mta_status) - h->ae_algo->ops->update_mta_status(h); + hns3_update_promisc_mode(netdev, new_flags); + /* User mode Promisc mode enable and vlan filtering is disabled to + * let all packets in. MAC-VLAN Table overflow Promisc enabled and + * vlan fitering is enabled + */ + hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR); + h->netdev_flags = new_flags; +} + +void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->set_promisc_mode) { + h->ae_algo->ops->set_promisc_mode(h, + promisc_flags & HNAE3_UPE, + promisc_flags & HNAE3_MPE); + } +} + +void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + bool last_state; + + if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) { + last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; + if (enable != last_state) { + netdev_info(netdev, + "%s vlan filter\n", + enable ? "enable" : "disable"); + h->ae_algo->ops->enable_vlan_filter(h, enable); + } } } @@ -1044,7 +1125,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, /* No. of segments (plus a header) */ buf_num = skb_shinfo(skb)->nr_frags + 1; - if (buf_num > ring_space(ring)) + if (unlikely(ring_space(ring) < buf_num)) return -EBUSY; *bnum = buf_num; @@ -1209,6 +1290,20 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) return 0; } +static int hns3_nic_do_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + if (!h->ae_algo->ops->do_ioctl) + return -EOPNOTSUPP; + + return h->ae_algo->ops->do_ioctl(h, ifr, cmd); +} + static int hns3_nic_set_features(struct net_device *netdev, netdev_features_t features) { @@ -1246,6 +1341,13 @@ static int hns3_nic_set_features(struct net_device *netdev, return ret; } + if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { + if (features & NETIF_F_NTUPLE) + h->ae_algo->ops->enable_fd(h, true); + else + h->ae_algo->ops->enable_fd(h, false); + } + netdev->features = features; return 0; } @@ -1447,13 +1549,11 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) } ret = h->ae_algo->ops->set_mtu(h, new_mtu); - if (ret) { + if (ret) netdev_err(netdev, "failed to change MTU in hardware %d\n", ret); - return ret; - } - - netdev->mtu = new_mtu; + else + netdev->mtu = new_mtu; /* if the netdev was running earlier, bring it up again */ if (if_running && hns3_nic_net_open(netdev)) @@ -1535,6 +1635,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_start_xmit = hns3_nic_net_xmit, .ndo_tx_timeout = hns3_nic_net_timeout, .ndo_set_mac_address = hns3_nic_net_set_mac_address, + .ndo_do_ioctl = hns3_nic_do_ioctl, .ndo_change_mtu = hns3_nic_change_mtu, .ndo_set_features = hns3_nic_set_features, .ndo_get_stats64 = hns3_nic_get_stats64, @@ -1584,6 +1685,13 @@ static void hns3_disable_sriov(struct pci_dev *pdev) pci_disable_sriov(pdev); } +static void hns3_get_dev_capability(struct pci_dev *pdev, + struct hnae3_ae_dev *ae_dev) +{ + if (pdev->revision >= 0x21) + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1); +} + /* hns3_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in hns3_pci_tbl @@ -1609,6 +1717,8 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ae_dev->pdev = pdev; ae_dev->flag = ent->driver_data; ae_dev->dev_type = HNAE3_DEV_KNIC; + ae_dev->reset_type = HNAE3_NONE_RESET; + hns3_get_dev_capability(pdev, ae_dev); pci_set_drvdata(pdev, ae_dev); hnae3_register_ae_dev(ae_dev); @@ -1662,11 +1772,24 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) return 0; } +static void hns3_shutdown(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + hnae3_unregister_ae_dev(ae_dev); + devm_kfree(&pdev->dev, ae_dev); + pci_set_drvdata(pdev, NULL); + + if (system_state == SYSTEM_POWER_OFF) + pci_set_power_state(pdev, PCI_D3hot); +} + static struct pci_driver hns3_driver = { .name = hns3_driver_name, .id_table = hns3_pci_tbl, .probe = hns3_probe, .remove = hns3_remove, + .shutdown = hns3_shutdown, .sriov_configure = hns3_pci_sriov_configure, }; @@ -1682,7 +1805,7 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; @@ -1694,24 +1817,30 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; - if (pdev->revision != 0x20) + if (pdev->revision >= 0x21) { netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + if (!(h->flags & HNAE3_SUPPORT_VF)) { + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->features |= NETIF_F_NTUPLE; + } + } } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, @@ -1749,7 +1878,7 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, cb->length, ring_to_dma_dir(ring)); - if (dma_mapping_error(ring_to_dev(ring), cb->dma)) + if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) return -EIO; return 0; @@ -1912,9 +2041,10 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) return u > c ? (h > c && h <= u) : (h > c || h <= u); } -bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) +void hns3_clean_tx_ring(struct hns3_enet_ring *ring) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); struct netdev_queue *dev_queue; int bytes, pkts; int head; @@ -1923,7 +2053,7 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) rmb(); /* Make sure head is ready before touch any data */ if (is_ring_empty(ring) || head == ring->next_to_clean) - return true; /* no data to poll */ + return; /* no data to poll */ if (unlikely(!is_valid_clean_head(ring, head))) { netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, @@ -1932,16 +2062,15 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) u64_stats_update_begin(&ring->syncp); ring->stats.io_err_cnt++; u64_stats_update_end(&ring->syncp); - return true; + return; } bytes = 0; pkts = 0; - while (head != ring->next_to_clean && budget) { + while (head != ring->next_to_clean) { hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); /* Issue prefetch for next Tx descriptor */ prefetch(&ring->desc_cb[ring->next_to_clean]); - budget--; } ring->tqp_vector->tx_group.total_bytes += bytes; @@ -1961,13 +2090,12 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) * sees the new next_to_clean. */ smp_mb(); - if (netif_tx_queue_stopped(dev_queue)) { + if (netif_tx_queue_stopped(dev_queue) && + !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { netif_tx_wake_queue(dev_queue); ring->stats.restart_queue++; } } - - return !!budget; } static int hns3_desc_unused(struct hns3_enet_ring *ring) @@ -2092,7 +2220,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, hnae3_get_bit(l234info, HNS3_RXD_L4E_B) || hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) || hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) { - netdev_err(netdev, "L3/L4 error pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.l3l4_csum_err++; u64_stats_update_end(&ring->syncp); @@ -2121,6 +2248,8 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, l4_type == HNS3_L4_TYPE_SCTP)) skb->ip_summed = CHECKSUM_UNNECESSARY; break; + default: + break; } } @@ -2129,18 +2258,18 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) napi_gro_receive(&ring->tqp_vector->napi, skb); } -static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, - struct hns3_desc *desc, u32 l234info) +static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, + struct hns3_desc *desc, u32 l234info, + u16 *vlan_tag) { struct pci_dev *pdev = ring->tqp->handle->pdev; - u16 vlan_tag; if (pdev->revision == 0x20) { - vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - if (!(vlan_tag & VLAN_VID_MASK)) - vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + if (!(*vlan_tag & VLAN_VID_MASK)) + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); - return vlan_tag; + return (*vlan_tag != 0); } #define HNS3_STRP_OUTER_VLAN 0x1 @@ -2149,17 +2278,29 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, HNS3_RXD_STRP_TAGP_S)) { case HNS3_STRP_OUTER_VLAN: - vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - break; + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + return true; case HNS3_STRP_INNER_VLAN: - vlan_tag = le16_to_cpu(desc->rx.vlan_tag); - break; + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + return true; default: - vlan_tag = 0; - break; + return false; } +} - return vlan_tag; +static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, + struct sk_buff *skb) +{ + struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; + struct hnae3_handle *handle = ring->tqp->handle; + enum pkt_hash_types rss_type; + + if (le32_to_cpu(desc->rx.rss_hash)) + rss_type = handle->kinfo.rss_type; + else + rss_type = PKT_HASH_TYPE_NONE; + + skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type); } static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, @@ -2261,16 +2402,13 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { u16 vlan_tag; - vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info); - if (vlan_tag & VLAN_VID_MASK) + if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { - netdev_err(netdev, "no valid bd,%016llx,%016llx\n", - ((u64 *)desc)[0], ((u64 *)desc)[1]); u64_stats_update_begin(&ring->syncp); ring->stats.non_vld_descs++; u64_stats_update_end(&ring->syncp); @@ -2281,7 +2419,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, if (unlikely((!desc->rx.pkt_len) || hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { - netdev_err(netdev, "truncated pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.err_pkt_len++; u64_stats_update_end(&ring->syncp); @@ -2291,7 +2428,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, } if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) { - netdev_err(netdev, "L2 error pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.l2_err++; u64_stats_update_end(&ring->syncp); @@ -2308,6 +2444,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ring->tqp_vector->rx_group.total_bytes += skb->len; hns3_rx_checksum(ring, skb, desc); + hns3_set_rx_skb_rss_type(ring, skb); + return 0; } @@ -2501,10 +2639,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ - hns3_for_each_ring(ring, tqp_vector->tx_group) { - if (!hns3_clean_tx_ring(ring, budget)) - clean_complete = false; - } + hns3_for_each_ring(ring, tqp_vector->tx_group) + hns3_clean_tx_ring(ring); /* make sure rx ring budget not smaller than 1 */ rx_budget = max(budget / tqp_vector->num_tqps, 1); @@ -2627,6 +2763,23 @@ static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, group->count++; } +static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) +{ + struct pci_dev *pdev = priv->ae_handle->pdev; + struct hns3_enet_tqp_vector *tqp_vector; + int num_vectors = priv->vector_num; + int numa_node; + int vector_i; + + numa_node = dev_to_node(&pdev->dev); + + for (vector_i = 0; vector_i < num_vectors; vector_i++) { + tqp_vector = &priv->tqp_vector[vector_i]; + cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), + &tqp_vector->affinity_mask); + } +} + static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) { struct hnae3_ring_chain_node vector_ring_chain; @@ -2635,6 +2788,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) int ret = 0; u16 i; + hns3_nic_set_cpumask(priv); + for (i = 0; i < priv->vector_num; i++) { tqp_vector = &priv->tqp_vector[i]; hns3_vector_gl_rl_init_hw(tqp_vector, priv); @@ -3069,13 +3224,23 @@ static void hns3_init_mac_addr(struct net_device *netdev, bool init) } -static void hns3_uninit_mac_addr(struct net_device *netdev) +static int hns3_restore_fd_rules(struct net_device *netdev) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = 0; - if (h->ae_algo->ops->rm_uc_addr) - h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr); + if (h->ae_algo->ops->restore_fd_rules) + ret = h->ae_algo->ops->restore_fd_rules(h); + + return ret; +} + +static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->del_all_fd_entries) + h->ae_algo->ops->del_all_fd_entries(h, clear_list); } static void hns3_nic_set_priv_ops(struct net_device *netdev) @@ -3095,12 +3260,14 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev) static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; + u16 alloc_tqps, max_rss_size; struct hns3_nic_priv *priv; struct net_device *netdev; int ret; - netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), - hns3_get_max_available_channels(handle)); + handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, + &max_rss_size); + netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); if (!netdev) return -ENOMEM; @@ -3189,9 +3356,13 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; + hns3_remove_hw_addr(netdev); + if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); + hns3_del_all_fd_rules(netdev, true); + hns3_force_clear_all_rx_ring(handle); ret = hns3_nic_uninit_vector_data(priv); @@ -3210,8 +3381,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) priv->ring_data = NULL; - hns3_uninit_mac_addr(netdev); - free_netdev(netdev); } @@ -3283,6 +3452,25 @@ static void hns3_recover_hw_addr(struct net_device *ndev) hns3_nic_mc_sync(ndev, ha->addr); } +static void hns3_remove_hw_addr(struct net_device *netdev) +{ + struct netdev_hw_addr_list *list; + struct netdev_hw_addr *ha, *tmp; + + hns3_nic_uc_unsync(netdev, netdev->dev_addr); + + /* go through and unsync uc_addr entries to the device */ + list = &netdev->uc; + list_for_each_entry_safe(ha, tmp, &list->list, list) + hns3_nic_uc_unsync(netdev, ha->addr); + + /* go through and unsync mc_addr entries to the device */ + list = &netdev->mc; + list_for_each_entry_safe(ha, tmp, &list->list, list) + if (ha->refcount > 1) + hns3_nic_mc_unsync(netdev, ha->addr); +} + static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) { while (ring->next_to_clean != ring->next_to_use) { @@ -3419,6 +3607,31 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h) return 0; } +static void hns3_store_coal(struct hns3_nic_priv *priv) +{ + /* ethtool only support setting and querying one coal + * configuation for now, so save the vector 0' coal + * configuation here in order to restore it. + */ + memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal, + sizeof(struct hns3_enet_coalesce)); + memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal, + sizeof(struct hns3_enet_coalesce)); +} + +static void hns3_restore_coal(struct hns3_nic_priv *priv) +{ + u16 vector_num = priv->vector_num; + int i; + + for (i = 0; i < vector_num; i++) { + memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal, + sizeof(struct hns3_enet_coalesce)); + memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal, + sizeof(struct hns3_enet_coalesce)); + } +} + static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; @@ -3452,19 +3665,27 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) { struct net_device *netdev = handle->kinfo.netdev; struct hns3_nic_priv *priv = netdev_priv(netdev); + bool vlan_filter_enable; int ret; hns3_init_mac_addr(netdev, false); - hns3_nic_set_rx_mode(netdev); hns3_recover_hw_addr(netdev); + hns3_update_promisc_mode(netdev, handle->netdev_flags); + vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true; + hns3_enable_vlan_filter(netdev, vlan_filter_enable); + /* Hardware table is only clear when pf resets */ if (!(handle->flags & HNAE3_SUPPORT_VF)) hns3_restore_vlan(netdev); + hns3_restore_fd_rules(netdev); + /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); + hns3_restore_coal(priv); + ret = hns3_nic_init_vector_data(priv); if (ret) return ret; @@ -3480,6 +3701,7 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct net_device *netdev = handle->kinfo.netdev; struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; @@ -3492,11 +3714,20 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) return ret; } + hns3_store_coal(priv); + ret = hns3_uninit_all_ring(priv); if (ret) netdev_err(netdev, "uninit ring error\n"); - hns3_uninit_mac_addr(netdev); + /* it is cumbersome for hardware to pick-and-choose entries for deletion + * from table space. Hence, for function reset software intervention is + * required to delete the entries + */ + if (hns3_dev_ongoing_func_reset(ae_dev)) { + hns3_remove_hw_addr(netdev); + hns3_del_all_fd_rules(netdev, false); + } return ret; } @@ -3526,24 +3757,7 @@ static int hns3_reset_notify(struct hnae3_handle *handle, return ret; } -static void hns3_restore_coal(struct hns3_nic_priv *priv, - struct hns3_enet_coalesce *tx, - struct hns3_enet_coalesce *rx) -{ - u16 vector_num = priv->vector_num; - int i; - - for (i = 0; i < vector_num; i++) { - memcpy(&priv->tqp_vector[i].tx_group.coal, tx, - sizeof(struct hns3_enet_coalesce)); - memcpy(&priv->tqp_vector[i].rx_group.coal, rx, - sizeof(struct hns3_enet_coalesce)); - } -} - -static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num, - struct hns3_enet_coalesce *tx, - struct hns3_enet_coalesce *rx) +static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = hns3_get_handle(netdev); @@ -3561,7 +3775,7 @@ static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num, if (ret) goto err_alloc_vector; - hns3_restore_coal(priv, tx, rx); + hns3_restore_coal(priv); ret = hns3_nic_init_vector_data(priv); if (ret) @@ -3593,7 +3807,6 @@ int hns3_set_channels(struct net_device *netdev, struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; - struct hns3_enet_coalesce tx_coal, rx_coal; bool if_running = netif_running(netdev); u32 new_tqp_num = ch->combined_count; u16 org_tqp_num; @@ -3625,15 +3838,7 @@ int hns3_set_channels(struct net_device *netdev, goto open_netdev; } - /* Changing the tqp num may also change the vector num, - * ethtool only support setting and querying one coal - * configuation for now, so save the vector 0' coal - * configuation here in order to restore it. - */ - memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal, - sizeof(struct hns3_enet_coalesce)); - memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal, - sizeof(struct hns3_enet_coalesce)); + hns3_store_coal(priv); hns3_nic_dealloc_vector_data(priv); @@ -3641,10 +3846,9 @@ int hns3_set_channels(struct net_device *netdev, hns3_put_ring_config(priv); org_tqp_num = h->kinfo.num_tqps; - ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal); + ret = hns3_modify_tqp_num(netdev, new_tqp_num); if (ret) { - ret = hns3_modify_tqp_num(netdev, org_tqp_num, - &tx_coal, &rx_coal); + ret = hns3_modify_tqp_num(netdev, org_tqp_num); if (ret) { /* If revert to old tqp failed, fatal error occurred */ dev_err(&netdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index cb450d7ec8c1..f25b281ff2aa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -491,7 +491,9 @@ struct hns3_enet_tqp_vector { struct hns3_enet_ring_group rx_group; struct hns3_enet_ring_group tx_group; + cpumask_t affinity_mask; u16 num_tqps; /* total number of tqps in TQP vector */ + struct irq_affinity_notify affinity_notify; char name[HNAE3_INT_NAME_LEN]; @@ -541,6 +543,8 @@ struct hns3_nic_priv { /* Vxlan/Geneve information */ struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct hns3_enet_coalesce tx_coal; + struct hns3_enet_coalesce rx_coal; }; union l3_hdr_info { @@ -581,6 +585,11 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) writel(value, reg_addr + reg); } +static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev) +{ + return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET)); +} + #define hns3_write_dev(a, reg, value) \ hns3_write_reg((a)->io_base, (reg), (value)) @@ -615,7 +624,7 @@ void hns3_ethtool_set_ops(struct net_device *netdev); int hns3_set_channels(struct net_device *netdev, struct ethtool_channels *ch); -bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); +void hns3_clean_tx_ring(struct hns3_enet_ring *ring); int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv); int hns3_nic_reset_all_ring(struct hnae3_handle *h); @@ -631,6 +640,9 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, u32 rl_value); +void hns3_enable_vlan_filter(struct net_device *netdev, bool enable); +void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags); + #ifdef CONFIG_HNS3_DCB void hns3_dcbnl_setup(struct hnae3_handle *handle); #else diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index f70ee6910ee2..a4762c2b8ba1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -22,13 +22,13 @@ struct hns3_stats { static const struct hns3_stats hns3_txq_stats[] = { /* Tx per-queue statistics */ HNS3_TQP_STAT("io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("tx_dropped", sw_err_cnt), + HNS3_TQP_STAT("dropped", sw_err_cnt), HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), HNS3_TQP_STAT("packets", tx_pkts), HNS3_TQP_STAT("bytes", tx_bytes), HNS3_TQP_STAT("errors", tx_err_cnt), - HNS3_TQP_STAT("tx_wake", restart_queue), - HNS3_TQP_STAT("tx_busy", tx_busy), + HNS3_TQP_STAT("wake", restart_queue), + HNS3_TQP_STAT("busy", tx_busy), }; #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) @@ -36,7 +36,7 @@ static const struct hns3_stats hns3_txq_stats[] = { static const struct hns3_stats hns3_rxq_stats[] = { /* Rx per-queue statistics */ HNS3_TQP_STAT("io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("rx_dropped", sw_err_cnt), + HNS3_TQP_STAT("dropped", sw_err_cnt), HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), HNS3_TQP_STAT("packets", rx_pkts), HNS3_TQP_STAT("bytes", rx_bytes), @@ -53,7 +53,7 @@ static const struct hns3_stats hns3_rxq_stats[] = { #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) -#define HNS3_SELF_TEST_TYPE_NUM 2 +#define HNS3_SELF_TEST_TYPE_NUM 3 #define HNS3_NIC_LB_TEST_PKT_NUM 1 #define HNS3_NIC_LB_TEST_RING_ID 0 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128 @@ -71,6 +71,7 @@ struct hns3_link_mode_mapping { static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) { struct hnae3_handle *h = hns3_get_handle(ndev); + bool vlan_filter_enable; int ret; if (!h->ae_algo->ops->set_loopback || @@ -78,8 +79,9 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) return -EOPNOTSUPP; switch (loop) { - case HNAE3_MAC_INTER_LOOP_SERDES: - case HNAE3_MAC_INTER_LOOP_MAC: + case HNAE3_LOOP_SERIAL_SERDES: + case HNAE3_LOOP_PARALLEL_SERDES: + case HNAE3_LOOP_APP: ret = h->ae_algo->ops->set_loopback(h, loop, en); break; default: @@ -90,7 +92,14 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) if (ret) return ret; - h->ae_algo->ops->set_promisc_mode(h, en, en); + if (en) { + h->ae_algo->ops->set_promisc_mode(h, true, true); + } else { + /* recover promisc mode before loopback test */ + hns3_update_promisc_mode(ndev, h->netdev_flags); + vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true; + hns3_enable_vlan_filter(ndev, vlan_filter_enable); + } return ret; } @@ -100,41 +109,26 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) struct hnae3_handle *h = hns3_get_handle(ndev); int ret; - if (!h->ae_algo->ops->start) - return -EOPNOTSUPP; - ret = hns3_nic_reset_all_ring(h); if (ret) return ret; - ret = h->ae_algo->ops->start(h); - if (ret) { - netdev_err(ndev, - "hns3_lb_up ae start return error: %d\n", ret); - return ret; - } - ret = hns3_lp_setup(ndev, loop_mode, true); usleep_range(10000, 20000); - return ret; + return 0; } static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) { - struct hnae3_handle *h = hns3_get_handle(ndev); int ret; - if (!h->ae_algo->ops->stop) - return -EOPNOTSUPP; - ret = hns3_lp_setup(ndev, loop_mode, false); if (ret) { netdev_err(ndev, "lb_setup return error: %d\n", ret); return ret; } - h->ae_algo->ops->stop(h); usleep_range(10000, 20000); return 0; @@ -152,6 +146,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb) packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); + ethh->h_dest[5] += 0x1f; eth_zero_addr(ethh->h_source); ethh->h_proto = htons(ETH_P_ARP); skb_reset_mac_header(skb); @@ -214,7 +209,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, for (i = start_ringid; i <= end_ringid; i++) { struct hns3_enet_ring *ring = priv->ring_data[i].ring; - hns3_clean_tx_ring(ring, budget); + hns3_clean_tx_ring(ring); } } @@ -300,16 +295,21 @@ static void hns3_self_test(struct net_device *ndev, if (eth_test->flags != ETH_TEST_FL_OFFLINE) return; - st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC; - st_param[HNAE3_MAC_INTER_LOOP_MAC][1] = - h->flags & HNAE3_SUPPORT_MAC_LOOPBACK; + st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP; + st_param[HNAE3_LOOP_APP][1] = + h->flags & HNAE3_SUPPORT_APP_LOOPBACK; - st_param[HNAE3_MAC_INTER_LOOP_SERDES][0] = HNAE3_MAC_INTER_LOOP_SERDES; - st_param[HNAE3_MAC_INTER_LOOP_SERDES][1] = - h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK; + st_param[HNAE3_LOOP_SERIAL_SERDES][0] = HNAE3_LOOP_SERIAL_SERDES; + st_param[HNAE3_LOOP_SERIAL_SERDES][1] = + h->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + + st_param[HNAE3_LOOP_PARALLEL_SERDES][0] = + HNAE3_LOOP_PARALLEL_SERDES; + st_param[HNAE3_LOOP_PARALLEL_SERDES][1] = + h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; if (if_running) - dev_close(ndev); + ndev->netdev_ops->ndo_stop(ndev); #if IS_ENABLED(CONFIG_VLAN_8021Q) /* Disable the vlan filter for selftest does not support it */ @@ -347,7 +347,7 @@ static void hns3_self_test(struct net_device *ndev, #endif if (if_running) - dev_open(ndev); + ndev->netdev_ops->ndo_open(ndev); } static int hns3_get_sset_count(struct net_device *netdev, int stringset) @@ -365,9 +365,10 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset) case ETH_SS_TEST: return ops->get_sset_count(h, stringset); - } - return 0; + default: + return -EOPNOTSUPP; + } } static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, @@ -383,7 +384,7 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, data[ETH_GSTRING_LEN - 1] = '\0'; /* first, prepend the prefix string */ - n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_", + n1 = snprintf(data, MAX_PREFIX_SIZE, "%s%d_", prefix, i); n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); size_left = (ETH_GSTRING_LEN - 1) - n1; @@ -431,6 +432,8 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) case ETH_SS_TEST: ops->get_strings(h, stringset, data); break; + default: + break; } } @@ -556,30 +559,72 @@ static int hns3_set_pauseparam(struct net_device *netdev, return -EOPNOTSUPP; } +static void hns3_get_ksettings(struct hnae3_handle *h, + struct ethtool_link_ksettings *cmd) +{ + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + + /* 1.auto_neg & speed & duplex from cmd */ + if (ops->get_ksettings_an_result) + ops->get_ksettings_an_result(h, + &cmd->base.autoneg, + &cmd->base.speed, + &cmd->base.duplex); + + /* 2.get link mode*/ + if (ops->get_link_mode) + ops->get_link_mode(h, + cmd->link_modes.supported, + cmd->link_modes.advertising); + + /* 3.mdix_ctrl&mdix get from phy reg */ + if (ops->get_mdix_mode) + ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, + &cmd->base.eth_tp_mdix); +} + static int hns3_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct hnae3_handle *h = hns3_get_handle(netdev); - u32 flowctrl_adv = 0; + const struct hnae3_ae_ops *ops; + u8 media_type; u8 link_stat; if (!h->ae_algo || !h->ae_algo->ops) return -EOPNOTSUPP; - /* 1.auto_neg & speed & duplex from cmd */ - if (netdev->phydev) { + ops = h->ae_algo->ops; + if (ops->get_media_type) + ops->get_media_type(h, &media_type); + else + return -EOPNOTSUPP; + + switch (media_type) { + case HNAE3_MEDIA_TYPE_NONE: + cmd->base.port = PORT_NONE; + hns3_get_ksettings(h, cmd); + break; + case HNAE3_MEDIA_TYPE_FIBER: + cmd->base.port = PORT_FIBRE; + hns3_get_ksettings(h, cmd); + break; + case HNAE3_MEDIA_TYPE_COPPER: + if (!netdev->phydev) + return -EOPNOTSUPP; + + cmd->base.port = PORT_TP; phy_ethtool_ksettings_get(netdev->phydev, cmd); + break; + default: + + netdev_warn(netdev, "Unknown media type"); return 0; } - if (h->ae_algo->ops->get_ksettings_an_result) - h->ae_algo->ops->get_ksettings_an_result(h, - &cmd->base.autoneg, - &cmd->base.speed, - &cmd->base.duplex); - else - return -EOPNOTSUPP; + /* mdio_support */ + cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; link_stat = hns3_get_link(netdev); if (!link_stat) { @@ -587,36 +632,6 @@ static int hns3_get_link_ksettings(struct net_device *netdev, cmd->base.duplex = DUPLEX_UNKNOWN; } - /* 2.get link mode and port type*/ - if (h->ae_algo->ops->get_link_mode) - h->ae_algo->ops->get_link_mode(h, - cmd->link_modes.supported, - cmd->link_modes.advertising); - - cmd->base.port = PORT_NONE; - if (h->ae_algo->ops->get_port_type) - h->ae_algo->ops->get_port_type(h, - &cmd->base.port); - - /* 3.mdix_ctrl&mdix get from phy reg */ - if (h->ae_algo->ops->get_mdix_mode) - h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, - &cmd->base.eth_tp_mdix); - /* 4.mdio_support */ - cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; - - /* 5.get flow control setttings */ - if (h->ae_algo->ops->get_flowctrl_adv) - h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv); - - if (flowctrl_adv & ADVERTISED_Pause) - ethtool_link_ksettings_add_link_mode(cmd, advertising, - Pause); - - if (flowctrl_adv & ADVERTISED_Asym_Pause) - ethtool_link_ksettings_add_link_mode(cmd, advertising, - Asym_Pause); - return 0; } @@ -671,12 +686,13 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir, if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) return -EOPNOTSUPP; - /* currently we only support Toeplitz hash */ - if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) { - netdev_err(netdev, - "hash func not supported (only Toeplitz hash)\n"); + if ((h->pdev->revision == 0x20 && + hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE && + hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) { + netdev_err(netdev, "hash func not supported\n"); return -EOPNOTSUPP; } + if (!indir) { netdev_err(netdev, "set rss failed for indir is empty\n"); @@ -692,20 +708,33 @@ static int hns3_get_rxnfc(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple) + if (!h->ae_algo || !h->ae_algo->ops) return -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = h->kinfo.rss_size; - break; + cmd->data = h->kinfo.num_tqps; + return 0; case ETHTOOL_GRXFH: - return h->ae_algo->ops->get_rss_tuple(h, cmd); + if (h->ae_algo->ops->get_rss_tuple) + return h->ae_algo->ops->get_rss_tuple(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_GRXCLSRLCNT: + if (h->ae_algo->ops->get_fd_rule_cnt) + return h->ae_algo->ops->get_fd_rule_cnt(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_GRXCLSRULE: + if (h->ae_algo->ops->get_fd_rule_info) + return h->ae_algo->ops->get_fd_rule_info(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_GRXCLSRLALL: + if (h->ae_algo->ops->get_fd_all_rules) + return h->ae_algo->ops->get_fd_all_rules(h, cmd, + rule_locs); + return -EOPNOTSUPP; default: return -EOPNOTSUPP; } - - return 0; } static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, @@ -788,12 +817,22 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) { struct hnae3_handle *h = hns3_get_handle(netdev); - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple) + if (!h->ae_algo || !h->ae_algo->ops) return -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_SRXFH: - return h->ae_algo->ops->set_rss_tuple(h, cmd); + if (h->ae_algo->ops->set_rss_tuple) + return h->ae_algo->ops->set_rss_tuple(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_SRXCLSRLINS: + if (h->ae_algo->ops->add_fd_entry) + return h->ae_algo->ops->add_fd_entry(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_SRXCLSRLDEL: + if (h->ae_algo->ops->del_fd_entry) + return h->ae_algo->ops->del_fd_entry(h, cmd); + return -EOPNOTSUPP; default: return -EOPNOTSUPP; } @@ -1047,6 +1086,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .get_ethtool_stats = hns3_get_stats, .get_sset_count = hns3_get_sset_count, .get_rxnfc = hns3_get_rxnfc, + .set_rxnfc = hns3_set_rxnfc, .get_rxfh_key_size = hns3_get_rss_key_size, .get_rxfh_indir_size = hns3_get_rss_indir_size, .get_rxfh = hns3_get_rss, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 821d4c2f84bd..1ccde67db770 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -175,21 +175,22 @@ enum hclge_opcode_type { HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001, HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002, HCLGE_OPC_MAC_VLAN_INSERT = 0x1003, + HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004, HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010, HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, - HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012, - - /* Multicast linear table commands */ - HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020, - HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021, - HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022, - HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023, /* VLAN commands */ HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100, HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101, HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102, + /* Flow Director commands */ + HCLGE_OPC_FD_MODE_CTRL = 0x1200, + HCLGE_OPC_FD_GET_ALLOCATION = 0x1201, + HCLGE_OPC_FD_KEY_CONFIG = 0x1202, + HCLGE_OPC_FD_TCAM_OP = 0x1203, + HCLGE_OPC_FD_AD_OP = 0x1204, + /* MDIO command */ HCLGE_OPC_MDIO_CONFIG = 0x1900, @@ -395,6 +396,8 @@ struct hclge_pf_res_cmd { #define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24) #define HCLGE_CFG_SPEED_ABILITY_S 0 #define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0) +#define HCLGE_CFG_UMV_TBL_SPACE_S 16 +#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16) struct hclge_cfg_param_cmd { __le32 offset; @@ -584,13 +587,12 @@ struct hclge_mac_vlan_tbl_entry_cmd { u8 rsv2[6]; }; -#define HCLGE_VLAN_MASK_EN_B 0 -struct hclge_mac_vlan_mask_entry_cmd { - u8 rsv0[2]; - u8 vlan_mask; - u8 rsv1; - u8 mac_mask[6]; - u8 rsv2[14]; +#define HCLGE_UMV_SPC_ALC_B 0 +struct hclge_umv_spc_alc_cmd { + u8 allocate; + u8 rsv1[3]; + __le32 space_size; + u8 rsv2[16]; }; #define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0) @@ -615,30 +617,6 @@ struct hclge_mac_mgr_tbl_entry_cmd { u8 rsv3[2]; }; -#define HCLGE_CFG_MTA_MAC_SEL_S 0 -#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0) -#define HCLGE_CFG_MTA_MAC_EN_B 7 -struct hclge_mta_filter_mode_cmd { - u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */ - u8 rsv[23]; -}; - -#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0 -struct hclge_cfg_func_mta_filter_cmd { - u8 accept; /* Only used lowest 1 bit */ - u8 function_id; - u8 rsv[22]; -}; - -#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0 -#define HCLGE_CFG_MTA_ITEM_IDX_S 0 -#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0) -struct hclge_cfg_func_mta_item_cmd { - __le16 item_idx; /* Only used lowest 12 bit */ - u8 accept; /* Only used lowest 1 bit */ - u8 rsv[21]; -}; - struct hclge_mac_vlan_add_cmd { __le16 flags; __le16 mac_addr_hi16; @@ -778,6 +756,7 @@ struct hclge_reset_cmd { }; #define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0) +#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2) #define HCLGE_CMD_SERDES_DONE_B BIT(0) #define HCLGE_CMD_SERDES_SUCCESS_B BIT(1) struct hclge_serdes_lb_cmd { @@ -818,6 +797,76 @@ struct hclge_set_led_state_cmd { u8 rsv2[20]; }; +struct hclge_get_fd_mode_cmd { + u8 mode; + u8 enable; + u8 rsv[22]; +}; + +struct hclge_get_fd_allocation_cmd { + __le32 stage1_entry_num; + __le32 stage2_entry_num; + __le16 stage1_counter_num; + __le16 stage2_counter_num; + u8 rsv[12]; +}; + +struct hclge_set_fd_key_config_cmd { + u8 stage; + u8 key_select; + u8 inner_sipv6_word_en; + u8 inner_dipv6_word_en; + u8 outer_sipv6_word_en; + u8 outer_dipv6_word_en; + u8 rsv1[2]; + __le32 tuple_mask; + __le32 meta_data_mask; + u8 rsv2[8]; +}; + +#define HCLGE_FD_EPORT_SW_EN_B 0 +struct hclge_fd_tcam_config_1_cmd { + u8 stage; + u8 xy_sel; + u8 port_info; + u8 rsv1[1]; + __le32 index; + u8 entry_vld; + u8 rsv2[7]; + u8 tcam_data[8]; +}; + +struct hclge_fd_tcam_config_2_cmd { + u8 tcam_data[24]; +}; + +struct hclge_fd_tcam_config_3_cmd { + u8 tcam_data[20]; + u8 rsv[4]; +}; + +#define HCLGE_FD_AD_DROP_B 0 +#define HCLGE_FD_AD_DIRECT_QID_B 1 +#define HCLGE_FD_AD_QID_S 2 +#define HCLGE_FD_AD_QID_M GENMASK(12, 2) +#define HCLGE_FD_AD_USE_COUNTER_B 12 +#define HCLGE_FD_AD_COUNTER_NUM_S 13 +#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13) +#define HCLGE_FD_AD_NXT_STEP_B 20 +#define HCLGE_FD_AD_NXT_KEY_S 21 +#define HCLGE_FD_AD_NXT_KEY_M GENMASK(26, 21) +#define HCLGE_FD_AD_WR_RULE_ID_B 0 +#define HCLGE_FD_AD_RULE_ID_S 1 +#define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1) + +struct hclge_fd_ad_config_cmd { + u8 stage; + u8 rsv1[3]; + __le32 index; + __le64 ad_data; + u8 rsv2[8]; +}; + int hclge_cmd_init(struct hclge_dev *hdev); static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index f08ebb7caaaf..e72f724123d7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -73,6 +73,7 @@ static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets) static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, u8 *tc, bool *changed) { + bool has_ets_tc = false; u32 total_ets_bw = 0; u8 max_tc = 0; u8 i; @@ -100,13 +101,14 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, *changed = true; total_ets_bw += ets->tc_tx_bw[i]; - break; + has_ets_tc = true; + break; default: return -EINVAL; } } - if (total_ets_bw != BW_PERCENT) + if (has_ets_tc && total_ets_bw != BW_PERCENT) return -EINVAL; *tc = max_tc + 1; @@ -182,7 +184,9 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) if (ret) return ret; - hclge_tm_schd_info_update(hdev, num_tc); + ret = hclge_tm_schd_info_update(hdev, num_tc); + if (ret) + return ret; ret = hclge_ieee_ets_to_tm_info(hdev, ets); if (ret) @@ -308,7 +312,9 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) return -EINVAL; } - hclge_tm_schd_info_update(hdev, tc); + ret = hclge_tm_schd_info_update(hdev, tc); + if (ret) + return ret; ret = hclge_tm_prio_tc_info_update(hdev, prio_tc); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 8577dfc799ad..1bd83e8268fc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -24,15 +24,12 @@ #define HCLGE_NAME "hclge" #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) -#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) -#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) -static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, - enum hclge_mta_dmac_sel_type mta_mac_sel, - bool enable); static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); +static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, + u16 *allocated_size, bool is_alloc); static struct hnae3_ae_algo ae_algo; @@ -51,175 +48,12 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { - "Mac Loopback test", - "Serdes Loopback test", + "App Loopback test", + "Serdes serial Loopback test", + "Serdes parallel Loopback test", "Phy Loopback test" }; -static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = { - {"igu_rx_oversize_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)}, - {"igu_rx_undersize_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)}, - {"igu_rx_out_all_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)}, - {"igu_rx_uni_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)}, - {"igu_rx_multi_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)}, - {"igu_rx_broad_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)}, - {"egu_tx_out_all_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)}, - {"egu_tx_uni_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)}, - {"egu_tx_multi_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)}, - {"egu_tx_broad_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)}, - {"ssu_ppp_mac_key_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)}, - {"ssu_ppp_host_key_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)}, - {"ppp_ssu_mac_rlt_num", - HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)}, - {"ppp_ssu_host_rlt_num", - HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)}, - {"ssu_tx_in_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)}, - {"ssu_tx_out_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)}, - {"ssu_rx_in_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)}, - {"ssu_rx_out_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)} -}; - -static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = { - {"igu_rx_err_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)}, - {"igu_rx_no_eof_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)}, - {"igu_rx_no_sof_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)}, - {"egu_tx_1588_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)}, - {"ssu_full_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)}, - {"ssu_part_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)}, - {"ppp_key_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)}, - {"ppp_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)}, - {"ssu_key_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)}, - {"pkt_curr_buf_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)}, - {"qcn_fb_rcv_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)}, - {"qcn_fb_drop_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)}, - {"qcn_fb_invaild_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)}, - {"rx_packet_tc0_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)}, - {"rx_packet_tc1_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)}, - {"rx_packet_tc2_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)}, - {"rx_packet_tc3_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)}, - {"rx_packet_tc4_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)}, - {"rx_packet_tc5_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)}, - {"rx_packet_tc6_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)}, - {"rx_packet_tc7_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)}, - {"rx_packet_tc0_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)}, - {"rx_packet_tc1_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)}, - {"rx_packet_tc2_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)}, - {"rx_packet_tc3_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)}, - {"rx_packet_tc4_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)}, - {"rx_packet_tc5_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)}, - {"rx_packet_tc6_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)}, - {"rx_packet_tc7_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)}, - {"tx_packet_tc0_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)}, - {"tx_packet_tc1_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)}, - {"tx_packet_tc2_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)}, - {"tx_packet_tc3_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)}, - {"tx_packet_tc4_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)}, - {"tx_packet_tc5_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)}, - {"tx_packet_tc6_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)}, - {"tx_packet_tc7_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)}, - {"tx_packet_tc0_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)}, - {"tx_packet_tc1_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)}, - {"tx_packet_tc2_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)}, - {"tx_packet_tc3_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)}, - {"tx_packet_tc4_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)}, - {"tx_packet_tc5_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)}, - {"tx_packet_tc6_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)}, - {"tx_packet_tc7_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)}, - {"pkt_curr_buf_tc0_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)}, - {"pkt_curr_buf_tc1_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)}, - {"pkt_curr_buf_tc2_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)}, - {"pkt_curr_buf_tc3_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)}, - {"pkt_curr_buf_tc4_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)}, - {"pkt_curr_buf_tc5_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)}, - {"pkt_curr_buf_tc6_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)}, - {"pkt_curr_buf_tc7_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)}, - {"mb_uncopy_num", - HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)}, - {"lo_pri_unicast_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)}, - {"hi_pri_multicast_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)}, - {"lo_pri_multicast_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)}, - {"rx_oq_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)}, - {"tx_oq_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)}, - {"nic_l2_err_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)}, - {"roc_l2_err_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)} -}; - static const struct hclge_comm_stats_str g_mac_stats_string[] = { {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, @@ -394,109 +228,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { }, }; -static int hclge_64_bit_update_stats(struct hclge_dev *hdev) -{ -#define HCLGE_64_BIT_CMD_NUM 5 -#define HCLGE_64_BIT_RTN_DATANUM 4 - u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); - struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; - __le64 *desc_data; - int i, k, n; - int ret; - - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true); - ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get 64 bit pkt stats fail, status = %d.\n", ret); - return ret; - } - - for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { - if (unlikely(i == 0)) { - desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_64_BIT_RTN_DATANUM - 1; - } else { - desc_data = (__le64 *)(&desc[i]); - n = HCLGE_64_BIT_RTN_DATANUM; - } - for (k = 0; k < n; k++) { - *data++ += le64_to_cpu(*desc_data); - desc_data++; - } - } - - return 0; -} - -static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats) -{ - stats->pkt_curr_buf_cnt = 0; - stats->pkt_curr_buf_tc0_cnt = 0; - stats->pkt_curr_buf_tc1_cnt = 0; - stats->pkt_curr_buf_tc2_cnt = 0; - stats->pkt_curr_buf_tc3_cnt = 0; - stats->pkt_curr_buf_tc4_cnt = 0; - stats->pkt_curr_buf_tc5_cnt = 0; - stats->pkt_curr_buf_tc6_cnt = 0; - stats->pkt_curr_buf_tc7_cnt = 0; -} - -static int hclge_32_bit_update_stats(struct hclge_dev *hdev) -{ -#define HCLGE_32_BIT_CMD_NUM 8 -#define HCLGE_32_BIT_RTN_DATANUM 8 - - struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; - struct hclge_32_bit_stats *all_32_bit_stats; - __le32 *desc_data; - int i, k, n; - u64 *data; - int ret; - - all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats; - data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt); - - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true); - ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get 32 bit pkt stats fail, status = %d.\n", ret); - - return ret; - } - - hclge_reset_partial_32bit_counter(all_32_bit_stats); - for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { - if (unlikely(i == 0)) { - __le16 *desc_data_16bit; - - all_32_bit_stats->igu_rx_err_pkt += - le32_to_cpu(desc[i].data[0]); - - desc_data_16bit = (__le16 *)&desc[i].data[1]; - all_32_bit_stats->igu_rx_no_eof_pkt += - le16_to_cpu(*desc_data_16bit); - - desc_data_16bit++; - all_32_bit_stats->igu_rx_no_sof_pkt += - le16_to_cpu(*desc_data_16bit); - - desc_data = &desc[i].data[2]; - n = HCLGE_32_BIT_RTN_DATANUM - 4; - } else { - desc_data = (__le32 *)&desc[i]; - n = HCLGE_32_BIT_RTN_DATANUM; - } - for (k = 0; k < n; k++) { - *data++ += le32_to_cpu(*desc_data); - desc_data++; - } - } - - return 0; -} - static int hclge_mac_update_stats(struct hclge_dev *hdev) { #define HCLGE_MAC_CMD_NUM 21 @@ -623,7 +354,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) for (i = 0; i < kinfo->num_tqps; i++) { struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], struct hclge_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", + snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", tqp->index); buff = buff + ETH_GSTRING_LEN; } @@ -631,7 +362,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) for (i = 0; i < kinfo->num_tqps; i++) { struct hclge_tqp *tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", + snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", tqp->index); buff = buff + ETH_GSTRING_LEN; } @@ -675,14 +406,8 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, struct net_device_stats *net_stats) { net_stats->tx_dropped = 0; - net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num; - net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; - net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; - net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; - net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; - net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; @@ -717,12 +442,6 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev) dev_err(&hdev->pdev->dev, "Update MAC stats fail, status = %d.\n", status); - status = hclge_32_bit_update_stats(hdev); - if (status) - dev_err(&hdev->pdev->dev, - "Update 32 bit stats fail, status = %d.\n", - status); - hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); } @@ -743,18 +462,6 @@ static void hclge_update_stats(struct hnae3_handle *handle, "Update MAC stats fail, status = %d.\n", status); - status = hclge_32_bit_update_stats(hdev); - if (status) - dev_err(&hdev->pdev->dev, - "Update 32 bit stats fail, status = %d.\n", - status); - - status = hclge_64_bit_update_stats(hdev); - if (status) - dev_err(&hdev->pdev->dev, - "Update 64 bit stats fail, status = %d.\n", - status); - status = hclge_tqps_update_stats(handle); if (status) dev_err(&hdev->pdev->dev, @@ -768,7 +475,10 @@ static void hclge_update_stats(struct hnae3_handle *handle, static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) { -#define HCLGE_LOOPBACK_TEST_FLAGS 0x7 +#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\ + HNAE3_SUPPORT_PHY_LOOPBACK |\ + HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\ + HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -782,19 +492,19 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) if (stringset == ETH_SS_TEST) { /* clear loopback bit flags at first */ handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); - if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || + if (hdev->pdev->revision >= 0x21 || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { count += 1; - handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; + handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; } - count++; - handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK; + count += 2; + handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; } else if (stringset == ETH_SS_STATS) { count = ARRAY_SIZE(g_mac_stats_string) + - ARRAY_SIZE(g_all_32bit_stats_string) + - ARRAY_SIZE(g_all_64bit_stats_string) + hclge_tqps_get_sset_count(handle, stringset); } @@ -814,33 +524,29 @@ static void hclge_get_strings(struct hnae3_handle *handle, g_mac_stats_string, size, p); - size = ARRAY_SIZE(g_all_32bit_stats_string); - p = hclge_comm_get_strings(stringset, - g_all_32bit_stats_string, - size, - p); - size = ARRAY_SIZE(g_all_64bit_stats_string); - p = hclge_comm_get_strings(stringset, - g_all_64bit_stats_string, - size, - p); p = hclge_tqps_get_strings(handle, p); } else if (stringset == ETH_SS_TEST) { - if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) { + if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { + memcpy(p, + hns3_nic_test_strs[HNAE3_LOOP_APP], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { memcpy(p, - hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC], + hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) { + if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { memcpy(p, - hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES], + hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { memcpy(p, - hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY], + hns3_nic_test_strs[HNAE3_LOOP_PHY], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } @@ -857,14 +563,6 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) g_mac_stats_string, ARRAY_SIZE(g_mac_stats_string), data); - p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats, - g_all_32bit_stats_string, - ARRAY_SIZE(g_all_32bit_stats_string), - p); - p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats, - g_all_64bit_stats_string, - ARRAY_SIZE(g_all_64bit_stats_string), - p); p = hclge_tqps_get_stats(handle, p); } @@ -1079,6 +777,11 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), HCLGE_CFG_SPEED_ABILITY_M, HCLGE_CFG_SPEED_ABILITY_S); + cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_UMV_TBL_SPACE_M, + HCLGE_CFG_UMV_TBL_SPACE_S); + if (!cfg->umv_space) + cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; } /* hclge_get_cfg: query the static parameter from flash @@ -1157,6 +860,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->tm_info.num_pg = 1; hdev->tc_max = cfg.tc_num; hdev->tm_info.hw_pfc_map = 0; + hdev->wanted_umv_size = cfg.umv_space; ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); if (ret) { @@ -1657,11 +1361,13 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, static int hclge_rx_buffer_calc(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) { - u32 rx_all = hdev->pkt_buf_size; +#define HCLGE_BUF_SIZE_UNIT 128 + u32 rx_all = hdev->pkt_buf_size, aligned_mps; int no_pfc_priv_num, pfc_priv_num; struct hclge_priv_buf *priv; int i; + aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); rx_all -= hclge_get_tx_buff_alloced(buf_alloc); /* When DCB is not supported, rx private @@ -1680,13 +1386,13 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, if (hdev->hw_tc_map & BIT(i)) { priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { - priv->wl.low = hdev->mps; - priv->wl.high = priv->wl.low + hdev->mps; + priv->wl.low = aligned_mps; + priv->wl.high = priv->wl.low + aligned_mps; priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; } else { priv->wl.low = 0; - priv->wl.high = 2 * hdev->mps; + priv->wl.high = 2 * aligned_mps; priv->buf_size = priv->wl.high; } } else { @@ -1718,11 +1424,11 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, if (hdev->tm_info.hw_pfc_map & BIT(i)) { priv->wl.low = 128; - priv->wl.high = priv->wl.low + hdev->mps; + priv->wl.high = priv->wl.low + aligned_mps; priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; } else { priv->wl.low = 0; - priv->wl.high = hdev->mps; + priv->wl.high = aligned_mps; priv->buf_size = priv->wl.high; } } @@ -2066,19 +1772,17 @@ static int hclge_init_msi(struct hclge_dev *hdev) return 0; } -static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) +static u8 hclge_check_speed_dup(u8 duplex, int speed) { - struct hclge_mac *mac = &hdev->hw.mac; - if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) - mac->duplex = (u8)duplex; - else - mac->duplex = HCLGE_MAC_FULL; + if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) + duplex = HCLGE_MAC_FULL; - mac->speed = speed; + return duplex; } -int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) +static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, + u8 duplex) { struct hclge_config_mac_speed_dup_cmd *req; struct hclge_desc desc; @@ -2138,7 +1842,23 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) return ret; } - hclge_check_speed_dup(hdev, duplex, speed); + return 0; +} + +int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) +{ + int ret; + + duplex = hclge_check_speed_dup(duplex, speed); + if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) + return 0; + + ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); + if (ret) + return ret; + + hdev->hw.mac.speed = speed; + hdev->hw.mac.duplex = duplex; return 0; } @@ -2224,42 +1944,17 @@ static int hclge_get_autoneg(struct hnae3_handle *handle) return hdev->hw.mac.autoneg; } -static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, - bool mask_vlan, - u8 *mac_mask) -{ - struct hclge_mac_vlan_mask_entry_cmd *req; - struct hclge_desc desc; - int status; - - req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); - - hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, - mask_vlan ? 1 : 0); - ether_addr_copy(req->mac_mask, mac_mask); - - status = hclge_cmd_send(&hdev->hw, &desc, 1); - if (status) - dev_err(&hdev->pdev->dev, - "Config mac_vlan_mask failed for cmd_send, ret =%d\n", - status); - - return status; -} - static int hclge_mac_init(struct hclge_dev *hdev) { struct hnae3_handle *handle = &hdev->vport[0].nic; struct net_device *netdev = handle->kinfo.netdev; struct hclge_mac *mac = &hdev->hw.mac; - u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; - struct hclge_vport *vport; int mtu; int ret; - int i; - ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); + hdev->hw.mac.duplex = HCLGE_MAC_FULL; + ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, + hdev->hw.mac.duplex); if (ret) { dev_err(&hdev->pdev->dev, "Config mac speed dup fail ret=%d\n", ret); @@ -2268,39 +1963,6 @@ static int hclge_mac_init(struct hclge_dev *hdev) mac->link = 0; - /* Initialize the MTA table work mode */ - hdev->enable_mta = true; - hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; - - ret = hclge_set_mta_filter_mode(hdev, - hdev->mta_mac_sel_type, - hdev->enable_mta); - if (ret) { - dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", - ret); - return ret; - } - - for (i = 0; i < hdev->num_alloc_vport; i++) { - vport = &hdev->vport[i]; - vport->accept_mta_mc = false; - - memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow)); - ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false); - if (ret) { - dev_err(&hdev->pdev->dev, - "set mta filter mode fail ret=%d\n", ret); - return ret; - } - } - - ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); - if (ret) { - dev_err(&hdev->pdev->dev, - "set default mac_vlan_mask fail ret=%d\n", ret); - return ret; - } - if (netdev) mtu = netdev->mtu; else @@ -2360,10 +2022,13 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev) int mac_state; int link_stat; + if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) + return 0; + mac_state = hclge_get_mac_link_status(hdev); if (hdev->hw.mac.phydev) { - if (!genphy_read_status(hdev->hw.mac.phydev)) + if (hdev->hw.mac.phydev->state == PHY_RUNNING) link_stat = mac_state & hdev->hw.mac.phydev->link; else @@ -2415,13 +2080,11 @@ static int hclge_update_speed_duplex(struct hclge_dev *hdev) return ret; } - if ((mac.speed != speed) || (mac.duplex != duplex)) { - ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac speed/duplex config failed %d\n", ret); - return ret; - } + ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac speed/duplex config failed %d\n", ret); + return ret; } return 0; @@ -2520,6 +2183,8 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, case HCLGE_VECTOR0_EVENT_MBX: hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); break; + default: + break; } } @@ -2793,8 +2458,13 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) static void hclge_reset(struct hclge_dev *hdev) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hnae3_handle *handle; + /* Initialize ae_dev reset status as well, in case enet layer wants to + * know if device is undergoing reset + */ + ae_dev->reset_type = hdev->reset_type; /* perform reset of the stack & ae device for a client */ handle = &hdev->vport[0].nic; rtnl_lock(); @@ -2815,6 +2485,7 @@ static void hclge_reset(struct hclge_dev *hdev) hclge_notify_client(hdev, HNAE3_UP_CLIENT); handle->last_reset_time = jiffies; rtnl_unlock(); + ae_dev->reset_type = HNAE3_NONE_RESET; } static void hclge_reset_event(struct hnae3_handle *handle) @@ -3102,6 +2773,22 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, return ret; } +static void hclge_get_rss_type(struct hclge_vport *vport) +{ + if (vport->rss_tuple_sets.ipv4_tcp_en || + vport->rss_tuple_sets.ipv4_udp_en || + vport->rss_tuple_sets.ipv4_sctp_en || + vport->rss_tuple_sets.ipv6_tcp_en || + vport->rss_tuple_sets.ipv6_udp_en || + vport->rss_tuple_sets.ipv6_sctp_en) + vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4; + else if (vport->rss_tuple_sets.ipv4_fragment_en || + vport->rss_tuple_sets.ipv6_fragment_en) + vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3; + else + vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE; +} + static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) { struct hclge_rss_input_tuple_cmd *req; @@ -3121,6 +2808,7 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; + hclge_get_rss_type(&hdev->vport[0]); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) dev_err(&hdev->pdev->dev, @@ -3135,8 +2823,19 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, int i; /* Get hash algorithm */ - if (hfunc) - *hfunc = vport->rss_algo; + if (hfunc) { + switch (vport->rss_algo) { + case HCLGE_RSS_HASH_ALGO_TOEPLITZ: + *hfunc = ETH_RSS_HASH_TOP; + break; + case HCLGE_RSS_HASH_ALGO_SIMPLE: + *hfunc = ETH_RSS_HASH_XOR; + break; + default: + *hfunc = ETH_RSS_HASH_UNKNOWN; + break; + } + } /* Get the RSS Key required by the user */ if (key) @@ -3160,12 +2859,20 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, /* Set the RSS Hash Key if specififed by the user */ if (key) { - - if (hfunc == ETH_RSS_HASH_TOP || - hfunc == ETH_RSS_HASH_NO_CHANGE) + switch (hfunc) { + case ETH_RSS_HASH_TOP: hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; - else + break; + case ETH_RSS_HASH_XOR: + hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; + break; + case ETH_RSS_HASH_NO_CHANGE: + hash_algo = vport->rss_algo; + break; + default: return -EINVAL; + } + ret = hclge_set_rss_algo_key(hdev, hash_algo, key); if (ret) return ret; @@ -3283,6 +2990,7 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle, vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; + hclge_get_rss_type(vport); return 0; } @@ -3608,6 +3316,1281 @@ static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, hclge_cmd_set_promisc_mode(hdev, ¶m); } +static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) +{ + struct hclge_get_fd_mode_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); + + req = (struct hclge_get_fd_mode_cmd *)desc.data; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); + return ret; + } + + *fd_mode = req->mode; + + return ret; +} + +static int hclge_get_fd_allocation(struct hclge_dev *hdev, + u32 *stage1_entry_num, + u32 *stage2_entry_num, + u16 *stage1_counter_num, + u16 *stage2_counter_num) +{ + struct hclge_get_fd_allocation_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); + + req = (struct hclge_get_fd_allocation_cmd *)desc.data; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", + ret); + return ret; + } + + *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); + *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); + *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); + *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); + + return ret; +} + +static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num) +{ + struct hclge_set_fd_key_config_cmd *req; + struct hclge_fd_key_cfg *stage; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); + + req = (struct hclge_set_fd_key_config_cmd *)desc.data; + stage = &hdev->fd_cfg.key_cfg[stage_num]; + req->stage = stage_num; + req->key_select = stage->key_sel; + req->inner_sipv6_word_en = stage->inner_sipv6_word_en; + req->inner_dipv6_word_en = stage->inner_dipv6_word_en; + req->outer_sipv6_word_en = stage->outer_sipv6_word_en; + req->outer_dipv6_word_en = stage->outer_dipv6_word_en; + req->tuple_mask = cpu_to_le32(~stage->tuple_active); + req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); + + return ret; +} + +static int hclge_init_fd_config(struct hclge_dev *hdev) +{ +#define LOW_2_WORDS 0x03 + struct hclge_fd_key_cfg *key_cfg; + int ret; + + if (!hnae3_dev_fd_supported(hdev)) + return 0; + + ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); + if (ret) + return ret; + + switch (hdev->fd_cfg.fd_mode) { + case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: + hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; + break; + case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: + hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; + break; + default: + dev_err(&hdev->pdev->dev, + "Unsupported flow director mode %d\n", + hdev->fd_cfg.fd_mode); + return -EOPNOTSUPP; + } + + hdev->fd_cfg.fd_en = true; + hdev->fd_cfg.proto_support = + TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW | + UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW; + key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; + key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, + key_cfg->inner_sipv6_word_en = LOW_2_WORDS; + key_cfg->inner_dipv6_word_en = LOW_2_WORDS; + key_cfg->outer_sipv6_word_en = 0; + key_cfg->outer_dipv6_word_en = 0; + + key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | + BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | + BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + /* If use max 400bit key, we can support tuples for ether type */ + if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) { + hdev->fd_cfg.proto_support |= ETHER_FLOW; + key_cfg->tuple_active |= + BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); + } + + /* roce_type is used to filter roce frames + * dst_vport is used to specify the rule + */ + key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); + + ret = hclge_get_fd_allocation(hdev, + &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], + &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], + &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], + &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); + if (ret) + return ret; + + return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); +} + +static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, + int loc, u8 *key, bool is_add) +{ + struct hclge_fd_tcam_config_1_cmd *req1; + struct hclge_fd_tcam_config_2_cmd *req2; + struct hclge_fd_tcam_config_3_cmd *req3; + struct hclge_desc desc[3]; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); + + req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; + req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; + req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; + + req1->stage = stage; + req1->xy_sel = sel_x ? 1 : 0; + hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); + req1->index = cpu_to_le32(loc); + req1->entry_vld = sel_x ? is_add : 0; + + if (key) { + memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); + memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], + sizeof(req2->tcam_data)); + memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + + sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); + } + + ret = hclge_cmd_send(&hdev->hw, desc, 3); + if (ret) + dev_err(&hdev->pdev->dev, + "config tcam key fail, ret=%d\n", + ret); + + return ret; +} + +static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, + struct hclge_fd_ad_data *action) +{ + struct hclge_fd_ad_config_cmd *req; + struct hclge_desc desc; + u64 ad_data = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); + + req = (struct hclge_fd_ad_config_cmd *)desc.data; + req->index = cpu_to_le32(loc); + req->stage = stage; + + hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, + action->write_rule_id_to_bd); + hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, + action->rule_id); + ad_data <<= 32; + hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); + hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, + action->forward_to_direct_queue); + hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, + action->queue_id); + hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); + hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, + HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); + hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); + hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, + action->counter_id); + + req->ad_data = cpu_to_le64(ad_data); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); + + return ret; +} + +static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, + struct hclge_fd_rule *rule) +{ + u16 tmp_x_s, tmp_y_s; + u32 tmp_x_l, tmp_y_l; + int i; + + if (rule->unused_tuple & tuple_bit) + return true; + + switch (tuple_bit) { + case 0: + return false; + case BIT(INNER_DST_MAC): + for (i = 0; i < 6; i++) { + calc_x(key_x[5 - i], rule->tuples.dst_mac[i], + rule->tuples_mask.dst_mac[i]); + calc_y(key_y[5 - i], rule->tuples.dst_mac[i], + rule->tuples_mask.dst_mac[i]); + } + + return true; + case BIT(INNER_SRC_MAC): + for (i = 0; i < 6; i++) { + calc_x(key_x[5 - i], rule->tuples.src_mac[i], + rule->tuples.src_mac[i]); + calc_y(key_y[5 - i], rule->tuples.src_mac[i], + rule->tuples.src_mac[i]); + } + + return true; + case BIT(INNER_VLAN_TAG_FST): + calc_x(tmp_x_s, rule->tuples.vlan_tag1, + rule->tuples_mask.vlan_tag1); + calc_y(tmp_y_s, rule->tuples.vlan_tag1, + rule->tuples_mask.vlan_tag1); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + case BIT(INNER_ETH_TYPE): + calc_x(tmp_x_s, rule->tuples.ether_proto, + rule->tuples_mask.ether_proto); + calc_y(tmp_y_s, rule->tuples.ether_proto, + rule->tuples_mask.ether_proto); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + case BIT(INNER_IP_TOS): + calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); + calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); + + return true; + case BIT(INNER_IP_PROTO): + calc_x(*key_x, rule->tuples.ip_proto, + rule->tuples_mask.ip_proto); + calc_y(*key_y, rule->tuples.ip_proto, + rule->tuples_mask.ip_proto); + + return true; + case BIT(INNER_SRC_IP): + calc_x(tmp_x_l, rule->tuples.src_ip[3], + rule->tuples_mask.src_ip[3]); + calc_y(tmp_y_l, rule->tuples.src_ip[3], + rule->tuples_mask.src_ip[3]); + *(__le32 *)key_x = cpu_to_le32(tmp_x_l); + *(__le32 *)key_y = cpu_to_le32(tmp_y_l); + + return true; + case BIT(INNER_DST_IP): + calc_x(tmp_x_l, rule->tuples.dst_ip[3], + rule->tuples_mask.dst_ip[3]); + calc_y(tmp_y_l, rule->tuples.dst_ip[3], + rule->tuples_mask.dst_ip[3]); + *(__le32 *)key_x = cpu_to_le32(tmp_x_l); + *(__le32 *)key_y = cpu_to_le32(tmp_y_l); + + return true; + case BIT(INNER_SRC_PORT): + calc_x(tmp_x_s, rule->tuples.src_port, + rule->tuples_mask.src_port); + calc_y(tmp_y_s, rule->tuples.src_port, + rule->tuples_mask.src_port); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + case BIT(INNER_DST_PORT): + calc_x(tmp_x_s, rule->tuples.dst_port, + rule->tuples_mask.dst_port); + calc_y(tmp_y_s, rule->tuples.dst_port, + rule->tuples_mask.dst_port); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + default: + return false; + } +} + +static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, + u8 vf_id, u8 network_port_id) +{ + u32 port_number = 0; + + if (port_type == HOST_PORT) { + hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, + pf_id); + hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, + vf_id); + hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); + } else { + hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, + HCLGE_NETWORK_PORT_ID_S, network_port_id); + hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); + } + + return port_number; +} + +static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, + __le32 *key_x, __le32 *key_y, + struct hclge_fd_rule *rule) +{ + u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; + u8 cur_pos = 0, tuple_size, shift_bits; + int i; + + for (i = 0; i < MAX_META_DATA; i++) { + tuple_size = meta_data_key_info[i].key_length; + tuple_bit = key_cfg->meta_data_active & BIT(i); + + switch (tuple_bit) { + case BIT(ROCE_TYPE): + hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); + cur_pos += tuple_size; + break; + case BIT(DST_VPORT): + port_number = hclge_get_port_number(HOST_PORT, 0, + rule->vf_id, 0); + hnae3_set_field(meta_data, + GENMASK(cur_pos + tuple_size, cur_pos), + cur_pos, port_number); + cur_pos += tuple_size; + break; + default: + break; + } + } + + calc_x(tmp_x, meta_data, 0xFFFFFFFF); + calc_y(tmp_y, meta_data, 0xFFFFFFFF); + shift_bits = sizeof(meta_data) * 8 - cur_pos; + + *key_x = cpu_to_le32(tmp_x << shift_bits); + *key_y = cpu_to_le32(tmp_y << shift_bits); +} + +/* A complete key is combined with meta data key and tuple key. + * Meta data key is stored at the MSB region, and tuple key is stored at + * the LSB region, unused bits will be filled 0. + */ +static int hclge_config_key(struct hclge_dev *hdev, u8 stage, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; + u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; + u8 *cur_key_x, *cur_key_y; + int i, ret, tuple_size; + u8 meta_data_region; + + memset(key_x, 0, sizeof(key_x)); + memset(key_y, 0, sizeof(key_y)); + cur_key_x = key_x; + cur_key_y = key_y; + + for (i = 0 ; i < MAX_TUPLE; i++) { + bool tuple_valid; + u32 check_tuple; + + tuple_size = tuple_key_info[i].key_length / 8; + check_tuple = key_cfg->tuple_active & BIT(i); + + tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, + cur_key_y, rule); + if (tuple_valid) { + cur_key_x += tuple_size; + cur_key_y += tuple_size; + } + } + + meta_data_region = hdev->fd_cfg.max_key_length / 8 - + MAX_META_DATA_LENGTH / 8; + + hclge_fd_convert_meta_data(key_cfg, + (__le32 *)(key_x + meta_data_region), + (__le32 *)(key_y + meta_data_region), + rule); + + ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, + true); + if (ret) { + dev_err(&hdev->pdev->dev, + "fd key_y config fail, loc=%d, ret=%d\n", + rule->queue_id, ret); + return ret; + } + + ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, + true); + if (ret) + dev_err(&hdev->pdev->dev, + "fd key_x config fail, loc=%d, ret=%d\n", + rule->queue_id, ret); + return ret; +} + +static int hclge_config_action(struct hclge_dev *hdev, u8 stage, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_ad_data ad_data; + + ad_data.ad_id = rule->location; + + if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { + ad_data.drop_packet = true; + ad_data.forward_to_direct_queue = false; + ad_data.queue_id = 0; + } else { + ad_data.drop_packet = false; + ad_data.forward_to_direct_queue = true; + ad_data.queue_id = rule->queue_id; + } + + ad_data.use_counter = false; + ad_data.counter_id = 0; + + ad_data.use_next_stage = false; + ad_data.next_input_key = 0; + + ad_data.write_rule_id_to_bd = true; + ad_data.rule_id = rule->location; + + return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); +} + +static int hclge_fd_check_spec(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, u32 *unused) +{ + struct ethtool_tcpip4_spec *tcp_ip4_spec; + struct ethtool_usrip4_spec *usr_ip4_spec; + struct ethtool_tcpip6_spec *tcp_ip6_spec; + struct ethtool_usrip6_spec *usr_ip6_spec; + struct ethhdr *ether_spec; + + if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) + return -EINVAL; + + if (!(fs->flow_type & hdev->fd_cfg.proto_support)) + return -EOPNOTSUPP; + + if ((fs->flow_type & FLOW_EXT) && + (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { + dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); + return -EOPNOTSUPP; + } + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + tcp_ip4_spec = &fs->h_u.tcp_ip4_spec; + *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); + + if (!tcp_ip4_spec->ip4src) + *unused |= BIT(INNER_SRC_IP); + + if (!tcp_ip4_spec->ip4dst) + *unused |= BIT(INNER_DST_IP); + + if (!tcp_ip4_spec->psrc) + *unused |= BIT(INNER_SRC_PORT); + + if (!tcp_ip4_spec->pdst) + *unused |= BIT(INNER_DST_PORT); + + if (!tcp_ip4_spec->tos) + *unused |= BIT(INNER_IP_TOS); + + break; + case IP_USER_FLOW: + usr_ip4_spec = &fs->h_u.usr_ip4_spec; + *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + if (!usr_ip4_spec->ip4src) + *unused |= BIT(INNER_SRC_IP); + + if (!usr_ip4_spec->ip4dst) + *unused |= BIT(INNER_DST_IP); + + if (!usr_ip4_spec->tos) + *unused |= BIT(INNER_IP_TOS); + + if (!usr_ip4_spec->proto) + *unused |= BIT(INNER_IP_PROTO); + + if (usr_ip4_spec->l4_4_bytes) + return -EOPNOTSUPP; + + if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4) + return -EOPNOTSUPP; + + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + tcp_ip6_spec = &fs->h_u.tcp_ip6_spec; + *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_IP_TOS); + + if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] && + !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3]) + *unused |= BIT(INNER_SRC_IP); + + if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] && + !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3]) + *unused |= BIT(INNER_DST_IP); + + if (!tcp_ip6_spec->psrc) + *unused |= BIT(INNER_SRC_PORT); + + if (!tcp_ip6_spec->pdst) + *unused |= BIT(INNER_DST_PORT); + + if (tcp_ip6_spec->tclass) + return -EOPNOTSUPP; + + break; + case IPV6_USER_FLOW: + usr_ip6_spec = &fs->h_u.usr_ip6_spec; + *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | + BIT(INNER_DST_PORT); + + if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] && + !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3]) + *unused |= BIT(INNER_SRC_IP); + + if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] && + !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3]) + *unused |= BIT(INNER_DST_IP); + + if (!usr_ip6_spec->l4_proto) + *unused |= BIT(INNER_IP_PROTO); + + if (usr_ip6_spec->tclass) + return -EOPNOTSUPP; + + if (usr_ip6_spec->l4_4_bytes) + return -EOPNOTSUPP; + + break; + case ETHER_FLOW: + ether_spec = &fs->h_u.ether_spec; + *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | + BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); + + if (is_zero_ether_addr(ether_spec->h_source)) + *unused |= BIT(INNER_SRC_MAC); + + if (is_zero_ether_addr(ether_spec->h_dest)) + *unused |= BIT(INNER_DST_MAC); + + if (!ether_spec->h_proto) + *unused |= BIT(INNER_ETH_TYPE); + + break; + default: + return -EOPNOTSUPP; + } + + if ((fs->flow_type & FLOW_EXT)) { + if (fs->h_ext.vlan_etype) + return -EOPNOTSUPP; + if (!fs->h_ext.vlan_tci) + *unused |= BIT(INNER_VLAN_TAG_FST); + + if (fs->m_ext.vlan_tci) { + if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) + return -EINVAL; + } + } else { + *unused |= BIT(INNER_VLAN_TAG_FST); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + if (!(hdev->fd_cfg.proto_support & ETHER_FLOW)) + return -EOPNOTSUPP; + + if (is_zero_ether_addr(fs->h_ext.h_dest)) + *unused |= BIT(INNER_DST_MAC); + else + *unused &= ~(BIT(INNER_DST_MAC)); + } + + return 0; +} + +static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) +{ + struct hclge_fd_rule *rule = NULL; + struct hlist_node *node2; + + hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { + if (rule->location >= location) + break; + } + + return rule && rule->location == location; +} + +static int hclge_fd_update_rule_list(struct hclge_dev *hdev, + struct hclge_fd_rule *new_rule, + u16 location, + bool is_add) +{ + struct hclge_fd_rule *rule = NULL, *parent = NULL; + struct hlist_node *node2; + + if (is_add && !new_rule) + return -EINVAL; + + hlist_for_each_entry_safe(rule, node2, + &hdev->fd_rule_list, rule_node) { + if (rule->location >= location) + break; + parent = rule; + } + + if (rule && rule->location == location) { + hlist_del(&rule->rule_node); + kfree(rule); + hdev->hclge_fd_rule_num--; + + if (!is_add) + return 0; + + } else if (!is_add) { + dev_err(&hdev->pdev->dev, + "delete fail, rule %d is inexistent\n", + location); + return -EINVAL; + } + + INIT_HLIST_NODE(&new_rule->rule_node); + + if (parent) + hlist_add_behind(&new_rule->rule_node, &parent->rule_node); + else + hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); + + hdev->hclge_fd_rule_num++; + + return 0; +} + +static int hclge_fd_get_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + + switch (flow_type) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + rule->tuples.src_ip[3] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); + rule->tuples_mask.src_ip[3] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); + + rule->tuples.dst_ip[3] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[3] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); + + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); + rule->tuples_mask.src_port = + be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); + + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); + rule->tuples_mask.dst_port = + be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); + + rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; + + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; + + break; + case IP_USER_FLOW: + rule->tuples.src_ip[3] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); + rule->tuples_mask.src_ip[3] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); + + rule->tuples.dst_ip[3] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[3] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); + + rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; + + rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; + + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; + + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + be32_to_cpu_array(rule->tuples.src_ip, + fs->h_u.tcp_ip6_spec.ip6src, 4); + be32_to_cpu_array(rule->tuples_mask.src_ip, + fs->m_u.tcp_ip6_spec.ip6src, 4); + + be32_to_cpu_array(rule->tuples.dst_ip, + fs->h_u.tcp_ip6_spec.ip6dst, 4); + be32_to_cpu_array(rule->tuples_mask.dst_ip, + fs->m_u.tcp_ip6_spec.ip6dst, 4); + + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); + rule->tuples_mask.src_port = + be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); + + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); + rule->tuples_mask.dst_port = + be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); + + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; + + break; + case IPV6_USER_FLOW: + be32_to_cpu_array(rule->tuples.src_ip, + fs->h_u.usr_ip6_spec.ip6src, 4); + be32_to_cpu_array(rule->tuples_mask.src_ip, + fs->m_u.usr_ip6_spec.ip6src, 4); + + be32_to_cpu_array(rule->tuples.dst_ip, + fs->h_u.usr_ip6_spec.ip6dst, 4); + be32_to_cpu_array(rule->tuples_mask.dst_ip, + fs->m_u.usr_ip6_spec.ip6dst, 4); + + rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; + + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; + + break; + case ETHER_FLOW: + ether_addr_copy(rule->tuples.src_mac, + fs->h_u.ether_spec.h_source); + ether_addr_copy(rule->tuples_mask.src_mac, + fs->m_u.ether_spec.h_source); + + ether_addr_copy(rule->tuples.dst_mac, + fs->h_u.ether_spec.h_dest); + ether_addr_copy(rule->tuples_mask.dst_mac, + fs->m_u.ether_spec.h_dest); + + rule->tuples.ether_proto = + be16_to_cpu(fs->h_u.ether_spec.h_proto); + rule->tuples_mask.ether_proto = + be16_to_cpu(fs->m_u.ether_spec.h_proto); + + break; + default: + return -EOPNOTSUPP; + } + + switch (flow_type) { + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + rule->tuples.ip_proto = IPPROTO_SCTP; + rule->tuples_mask.ip_proto = 0xFF; + break; + case TCP_V4_FLOW: + case TCP_V6_FLOW: + rule->tuples.ip_proto = IPPROTO_TCP; + rule->tuples_mask.ip_proto = 0xFF; + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + rule->tuples.ip_proto = IPPROTO_UDP; + rule->tuples_mask.ip_proto = 0xFF; + break; + default: + break; + } + + if ((fs->flow_type & FLOW_EXT)) { + rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); + rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); + ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); + } + + return 0; +} + +static int hclge_add_fd_entry(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u16 dst_vport_id = 0, q_index = 0; + struct ethtool_rx_flow_spec *fs; + struct hclge_fd_rule *rule; + u32 unused = 0; + u8 action; + int ret; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + if (!hdev->fd_cfg.fd_en) { + dev_warn(&hdev->pdev->dev, + "Please enable flow director first\n"); + return -EOPNOTSUPP; + } + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + ret = hclge_fd_check_spec(hdev, fs, &unused); + if (ret) { + dev_err(&hdev->pdev->dev, "Check fd spec failed\n"); + return ret; + } + + if (fs->ring_cookie == RX_CLS_FLOW_DISC) { + action = HCLGE_FD_ACTION_DROP_PACKET; + } else { + u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + u16 tqps; + + dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; + tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; + + if (ring >= tqps) { + dev_err(&hdev->pdev->dev, + "Error: queue id (%d) > max tqp num (%d)\n", + ring, tqps - 1); + return -EINVAL; + } + + if (vf > hdev->num_req_vfs) { + dev_err(&hdev->pdev->dev, + "Error: vf id (%d) > max vf num (%d)\n", + vf, hdev->num_req_vfs); + return -EINVAL; + } + + action = HCLGE_FD_ACTION_ACCEPT_PACKET; + q_index = ring; + } + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + ret = hclge_fd_get_tuple(hdev, fs, rule); + if (ret) + goto free_rule; + + rule->flow_type = fs->flow_type; + + rule->location = fs->location; + rule->unused_tuple = unused; + rule->vf_id = dst_vport_id; + rule->queue_id = q_index; + rule->action = action; + + ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); + if (ret) + goto free_rule; + + ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); + if (ret) + goto free_rule; + + ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true); + if (ret) + goto free_rule; + + return ret; + +free_rule: + kfree(rule); + return ret; +} + +static int hclge_del_fd_entry(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct ethtool_rx_flow_spec *fs; + int ret; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) + return -EINVAL; + + if (!hclge_fd_rule_exist(hdev, fs->location)) { + dev_err(&hdev->pdev->dev, + "Delete fail, rule %d is inexistent\n", + fs->location); + return -ENOENT; + } + + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + fs->location, NULL, false); + if (ret) + return ret; + + return hclge_fd_update_rule_list(hdev, NULL, fs->location, + false); +} + +static void hclge_del_all_fd_entries(struct hnae3_handle *handle, + bool clear_list) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node; + + if (!hnae3_dev_fd_supported(hdev)) + return; + + if (clear_list) { + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, + rule_node) { + hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + rule->location, NULL, false); + hlist_del(&rule->rule_node); + kfree(rule); + hdev->hclge_fd_rule_num--; + } + } else { + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, + rule_node) + hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + rule->location, NULL, false); + } +} + +static int hclge_restore_fd_entries(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node; + int ret; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); + if (!ret) + ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); + + if (ret) { + dev_warn(&hdev->pdev->dev, + "Restore rule %d failed, remove it\n", + rule->location); + hlist_del(&rule->rule_node); + kfree(rule); + hdev->hclge_fd_rule_num--; + } + } + return 0; +} + +static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + cmd->rule_cnt = hdev->hclge_fd_rule_num; + cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; + + return 0; +} + +static int hclge_get_fd_rule_info(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_fd_rule *rule = NULL; + struct hclge_dev *hdev = vport->back; + struct ethtool_rx_flow_spec *fs; + struct hlist_node *node2; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { + if (rule->location >= fs->location) + break; + } + + if (!rule || fs->location != rule->location) + return -ENOENT; + + fs->flow_type = rule->flow_type; + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + fs->h_u.tcp_ip4_spec.ip4src = + cpu_to_be32(rule->tuples.src_ip[3]); + fs->m_u.tcp_ip4_spec.ip4src = + rule->unused_tuple & BIT(INNER_SRC_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); + + fs->h_u.tcp_ip4_spec.ip4dst = + cpu_to_be32(rule->tuples.dst_ip[3]); + fs->m_u.tcp_ip4_spec.ip4dst = + rule->unused_tuple & BIT(INNER_DST_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); + + fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port); + fs->m_u.tcp_ip4_spec.psrc = + rule->unused_tuple & BIT(INNER_SRC_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.src_port); + + fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port); + fs->m_u.tcp_ip4_spec.pdst = + rule->unused_tuple & BIT(INNER_DST_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.dst_port); + + fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos; + fs->m_u.tcp_ip4_spec.tos = + rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + + break; + case IP_USER_FLOW: + fs->h_u.usr_ip4_spec.ip4src = + cpu_to_be32(rule->tuples.src_ip[3]); + fs->m_u.tcp_ip4_spec.ip4src = + rule->unused_tuple & BIT(INNER_SRC_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); + + fs->h_u.usr_ip4_spec.ip4dst = + cpu_to_be32(rule->tuples.dst_ip[3]); + fs->m_u.usr_ip4_spec.ip4dst = + rule->unused_tuple & BIT(INNER_DST_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); + + fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos; + fs->m_u.usr_ip4_spec.tos = + rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + + fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto; + fs->m_u.usr_ip4_spec.proto = + rule->unused_tuple & BIT(INNER_IP_PROTO) ? + 0 : rule->tuples_mask.ip_proto; + + fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src, + rule->tuples.src_ip, 4); + if (rule->unused_tuple & BIT(INNER_SRC_IP)) + memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4); + else + cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src, + rule->tuples_mask.src_ip, 4); + + cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst, + rule->tuples.dst_ip, 4); + if (rule->unused_tuple & BIT(INNER_DST_IP)) + memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4); + else + cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst, + rule->tuples_mask.dst_ip, 4); + + fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port); + fs->m_u.tcp_ip6_spec.psrc = + rule->unused_tuple & BIT(INNER_SRC_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.src_port); + + fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port); + fs->m_u.tcp_ip6_spec.pdst = + rule->unused_tuple & BIT(INNER_DST_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.dst_port); + + break; + case IPV6_USER_FLOW: + cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src, + rule->tuples.src_ip, 4); + if (rule->unused_tuple & BIT(INNER_SRC_IP)) + memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4); + else + cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src, + rule->tuples_mask.src_ip, 4); + + cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst, + rule->tuples.dst_ip, 4); + if (rule->unused_tuple & BIT(INNER_DST_IP)) + memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4); + else + cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst, + rule->tuples_mask.dst_ip, 4); + + fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto; + fs->m_u.usr_ip6_spec.l4_proto = + rule->unused_tuple & BIT(INNER_IP_PROTO) ? + 0 : rule->tuples_mask.ip_proto; + + break; + case ETHER_FLOW: + ether_addr_copy(fs->h_u.ether_spec.h_source, + rule->tuples.src_mac); + if (rule->unused_tuple & BIT(INNER_SRC_MAC)) + eth_zero_addr(fs->m_u.ether_spec.h_source); + else + ether_addr_copy(fs->m_u.ether_spec.h_source, + rule->tuples_mask.src_mac); + + ether_addr_copy(fs->h_u.ether_spec.h_dest, + rule->tuples.dst_mac); + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(fs->m_u.ether_spec.h_dest); + else + ether_addr_copy(fs->m_u.ether_spec.h_dest, + rule->tuples_mask.dst_mac); + + fs->h_u.ether_spec.h_proto = + cpu_to_be16(rule->tuples.ether_proto); + fs->m_u.ether_spec.h_proto = + rule->unused_tuple & BIT(INNER_ETH_TYPE) ? + 0 : cpu_to_be16(rule->tuples_mask.ether_proto); + + break; + default: + return -EOPNOTSUPP; + } + + if (fs->flow_type & FLOW_EXT) { + fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); + fs->m_ext.vlan_tci = + rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? + cpu_to_be16(VLAN_VID_MASK) : + cpu_to_be16(rule->tuples_mask.vlan_tag1); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(fs->m_u.ether_spec.h_dest); + else + ether_addr_copy(fs->m_u.ether_spec.h_dest, + rule->tuples_mask.dst_mac); + } + + if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { + fs->ring_cookie = RX_CLS_FLOW_DISC; + } else { + u64 vf_id; + + fs->ring_cookie = rule->queue_id; + vf_id = rule->vf_id; + vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + fs->ring_cookie |= vf_id; + } + + return 0; +} + +static int hclge_get_all_rules(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node2; + int cnt = 0; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; + + hlist_for_each_entry_safe(rule, node2, + &hdev->fd_rule_list, rule_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + + rule_locs[cnt] = rule->location; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hdev->fd_cfg.fd_en = enable; + if (!enable) + hclge_del_all_fd_entries(handle, false); + else + hclge_restore_fd_entries(handle); +} + static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) { struct hclge_desc desc; @@ -3639,7 +4622,7 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) "mac enable fail, ret =%d.\n", ret); } -static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) +static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) { struct hclge_config_mac_mode_cmd *req; struct hclge_desc desc; @@ -3659,6 +4642,8 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) /* 2 Then setup the loopback flag */ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0); req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); @@ -3673,22 +4658,37 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) return ret; } -static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en) +static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) { #define HCLGE_SERDES_RETRY_MS 10 #define HCLGE_SERDES_RETRY_NUM 100 struct hclge_serdes_lb_cmd *req; struct hclge_desc desc; int ret, i = 0; + u8 loop_mode_b; - req = (struct hclge_serdes_lb_cmd *)&desc.data[0]; + req = (struct hclge_serdes_lb_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); + switch (loop_mode) { + case HNAE3_LOOP_SERIAL_SERDES: + loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + break; + case HNAE3_LOOP_PARALLEL_SERDES: + loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; + break; + default: + dev_err(&hdev->pdev->dev, + "unsupported serdes loopback mode %d\n", loop_mode); + return -ENOTSUPP; + } + if (en) { - req->enable = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; - req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + req->enable = loop_mode_b; + req->mask = loop_mode_b; } else { - req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + req->mask = loop_mode_b; } ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -3719,33 +4719,10 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en) return -EIO; } + hclge_cfg_mac_mode(hdev, en); return 0; } -static int hclge_set_loopback(struct hnae3_handle *handle, - enum hnae3_loop loop_mode, bool en) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - int ret; - - switch (loop_mode) { - case HNAE3_MAC_INTER_LOOP_MAC: - ret = hclge_set_mac_loopback(hdev, en); - break; - case HNAE3_MAC_INTER_LOOP_SERDES: - ret = hclge_set_serdes_loopback(hdev, en); - break; - default: - ret = -ENOTSUPP; - dev_err(&hdev->pdev->dev, - "loop_mode %d is not supported\n", loop_mode); - break; - } - - return ret; -} - static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, int stream_id, bool enable) { @@ -3766,6 +4743,37 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, return ret; } +static int hclge_set_loopback(struct hnae3_handle *handle, + enum hnae3_loop loop_mode, bool en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int i, ret; + + switch (loop_mode) { + case HNAE3_LOOP_APP: + ret = hclge_set_app_loopback(hdev, en); + break; + case HNAE3_LOOP_SERIAL_SERDES: + case HNAE3_LOOP_PARALLEL_SERDES: + ret = hclge_set_serdes_loopback(hdev, en, loop_mode); + break; + default: + ret = -ENOTSUPP; + dev_err(&hdev->pdev->dev, + "loop_mode %d is not supported\n", loop_mode); + break; + } + + for (i = 0; i < vport->alloc_tqps; i++) { + ret = hclge_tqp_enable(hdev, i, 0, en); + if (ret) + return ret; + } + + return 0; +} + static void hclge_reset_tqp_stats(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -3809,6 +4817,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle) struct hclge_dev *hdev = vport->back; int i; + set_bit(HCLGE_STATE_DOWN, &hdev->state); + del_timer_sync(&hdev->service_timer); cancel_work_sync(&hdev->service_task); clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); @@ -3950,174 +4960,6 @@ static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); } -static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, - const u8 *addr) -{ - u16 high_val = addr[1] | (addr[0] << 8); - struct hclge_dev *hdev = vport->back; - u32 rsh = 4 - hdev->mta_mac_sel_type; - u16 ret_val = (high_val >> rsh) & 0xfff; - - return ret_val; -} - -static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, - enum hclge_mta_dmac_sel_type mta_mac_sel, - bool enable) -{ - struct hclge_mta_filter_mode_cmd *req; - struct hclge_desc desc; - int ret; - - req = (struct hclge_mta_filter_mode_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); - - hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, - enable); - hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, - HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Config mat filter mode failed for cmd_send, ret =%d.\n", - ret); - - return ret; -} - -int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, - u8 func_id, - bool enable) -{ - struct hclge_cfg_func_mta_filter_cmd *req; - struct hclge_desc desc; - int ret; - - req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); - - hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, - enable); - req->function_id = func_id; - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Config func_id enable failed for cmd_send, ret =%d.\n", - ret); - - return ret; -} - -static int hclge_set_mta_table_item(struct hclge_vport *vport, - u16 idx, - bool enable) -{ - struct hclge_dev *hdev = vport->back; - struct hclge_cfg_func_mta_item_cmd *req; - struct hclge_desc desc; - u16 item_idx = 0; - int ret; - - req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); - hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); - - hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, - HCLGE_CFG_MTA_ITEM_IDX_S, idx); - req->item_idx = cpu_to_le16(item_idx); - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Config mta table item failed for cmd_send, ret =%d.\n", - ret); - return ret; - } - - if (enable) - set_bit(idx, vport->mta_shadow); - else - clear_bit(idx, vport->mta_shadow); - - return 0; -} - -static int hclge_update_mta_status(struct hnae3_handle *handle) -{ - unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; - struct hclge_vport *vport = hclge_get_vport(handle); - struct net_device *netdev = handle->kinfo.netdev; - struct netdev_hw_addr *ha; - u16 tbl_idx; - - memset(mta_status, 0, sizeof(mta_status)); - - /* update mta_status from mc addr list */ - netdev_for_each_mc_addr(ha, netdev) { - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr); - set_bit(tbl_idx, mta_status); - } - - return hclge_update_mta_status_common(vport, mta_status, - 0, HCLGE_MTA_TBL_SIZE, true); -} - -int hclge_update_mta_status_common(struct hclge_vport *vport, - unsigned long *status, - u16 idx, - u16 count, - bool update_filter) -{ - struct hclge_dev *hdev = vport->back; - u16 update_max = idx + count; - u16 check_max; - int ret = 0; - bool used; - u16 i; - - /* setup mta check range */ - if (update_filter) { - i = 0; - check_max = HCLGE_MTA_TBL_SIZE; - } else { - i = idx; - check_max = update_max; - } - - used = false; - /* check and update all mta item */ - for (; i < check_max; i++) { - /* ignore unused item */ - if (!test_bit(i, vport->mta_shadow)) - continue; - - /* if i in update range then update it */ - if (i >= idx && i < update_max) - if (!test_bit(i - idx, status)) - hclge_set_mta_table_item(vport, i, false); - - if (!used && test_bit(i, vport->mta_shadow)) - used = true; - } - - /* no longer use mta, disable it */ - if (vport->accept_mta_mc && update_filter && !used) { - ret = hclge_cfg_func_mta_filter(hdev, - vport->vport_id, - false); - if (ret) - dev_err(&hdev->pdev->dev, - "disable func mta filter fail ret=%d\n", - ret); - else - vport->accept_mta_mc = false; - } - - return ret; -} - static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, struct hclge_mac_vlan_tbl_entry_cmd *req) { @@ -4241,6 +5083,118 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, return cfg_status; } +static int hclge_init_umv_space(struct hclge_dev *hdev) +{ + u16 allocated_size = 0; + int ret; + + ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, + true); + if (ret) + return ret; + + if (allocated_size < hdev->wanted_umv_size) + dev_warn(&hdev->pdev->dev, + "Alloc umv space failed, want %d, get %d\n", + hdev->wanted_umv_size, allocated_size); + + mutex_init(&hdev->umv_mutex); + hdev->max_umv_size = allocated_size; + hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_req_vfs + 2); + + return 0; +} + +static int hclge_uninit_umv_space(struct hclge_dev *hdev) +{ + int ret; + + if (hdev->max_umv_size > 0) { + ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL, + false); + if (ret) + return ret; + hdev->max_umv_size = 0; + } + mutex_destroy(&hdev->umv_mutex); + + return 0; +} + +static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, + u16 *allocated_size, bool is_alloc) +{ + struct hclge_umv_spc_alc_cmd *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_umv_spc_alc_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); + hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc); + req->space_size = cpu_to_le32(space_size); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "%s umv space failed for cmd_send, ret =%d\n", + is_alloc ? "allocate" : "free", ret); + return ret; + } + + if (is_alloc && allocated_size) + *allocated_size = le32_to_cpu(desc.data[1]); + + return 0; +} + +static void hclge_reset_umv_space(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + vport->used_umv_num = 0; + } + + mutex_lock(&hdev->umv_mutex); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_req_vfs + 2); + mutex_unlock(&hdev->umv_mutex); +} + +static bool hclge_is_umv_space_full(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + bool is_full; + + mutex_lock(&hdev->umv_mutex); + is_full = (vport->used_umv_num >= hdev->priv_umv_size && + hdev->share_umv_size == 0); + mutex_unlock(&hdev->umv_mutex); + + return is_full; +} + +static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) +{ + struct hclge_dev *hdev = vport->back; + + mutex_lock(&hdev->umv_mutex); + if (is_free) { + if (vport->used_umv_num > hdev->priv_umv_size) + hdev->share_umv_size++; + vport->used_umv_num--; + } else { + if (vport->used_umv_num >= hdev->priv_umv_size) + hdev->share_umv_size--; + vport->used_umv_num++; + } + mutex_unlock(&hdev->umv_mutex); +} + static int hclge_add_uc_addr(struct hnae3_handle *handle, const unsigned char *addr) { @@ -4286,8 +5240,19 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, * is not allowed in the mac vlan table. */ ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); - if (ret == -ENOENT) - return hclge_add_mac_vlan_tbl(vport, &req, NULL); + if (ret == -ENOENT) { + if (!hclge_is_umv_space_full(vport)) { + ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); + if (!ret) + hclge_update_umv_space(vport, false); + return ret; + } + + dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", + hdev->priv_umv_size); + + return -ENOSPC; + } /* check if we just hit the duplicate */ if (!ret) @@ -4330,6 +5295,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); ret = hclge_remove_mac_vlan_tbl(vport, &req); + if (!ret) + hclge_update_umv_space(vport, true); return ret; } @@ -4348,7 +5315,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc[3]; - u16 tbl_idx; int status; /* mac addr check */ @@ -4362,7 +5328,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4378,25 +5344,8 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, status = hclge_add_mac_vlan_tbl(vport, &req, desc); } - /* If mc mac vlan table is full, use MTA table */ - if (status == -ENOSPC) { - if (!vport->accept_mta_mc) { - status = hclge_cfg_func_mta_filter(hdev, - vport->vport_id, - true); - if (status) { - dev_err(&hdev->pdev->dev, - "set mta filter mode fail ret=%d\n", - status); - return status; - } - vport->accept_mta_mc = true; - } - - /* Set MTA table for this MAC address */ - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); - status = hclge_set_mta_table_item(vport, tbl_idx, true); - } + if (status == -ENOSPC) + dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); return status; } @@ -4429,7 +5378,7 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4598,8 +5547,20 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, return 0; } +static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, + int cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (!hdev->hw.mac.phydev) + return -EOPNOTSUPP; + + return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); +} + static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, - bool filter_en) + u8 fe_type, bool filter_en) { struct hclge_vlan_filter_ctrl_cmd *req; struct hclge_desc desc; @@ -4609,7 +5570,7 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; req->vlan_type = vlan_type; - req->vlan_fe = filter_en; + req->vlan_fe = filter_en ? fe_type : 0; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -4621,13 +5582,34 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, #define HCLGE_FILTER_TYPE_VF 0 #define HCLGE_FILTER_TYPE_PORT 1 +#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0) +#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0) +#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1) +#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2) +#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3) +#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \ + | HCLGE_FILTER_FE_ROCE_EGRESS_B) +#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \ + | HCLGE_FILTER_FE_ROCE_INGRESS_B) static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable); + if (hdev->pdev->revision >= 0x21) { + hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, enable); + hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, enable); + } else { + hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, enable); + } + if (enable) + handle->netdev_flags |= HNAE3_VLAN_FLTR; + else + handle->netdev_flags &= ~HNAE3_VLAN_FLTR; } static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, @@ -4686,9 +5668,17 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, "Add vf vlan filter fail, ret =%d.\n", req0->resp_code); } else { +#define HCLGE_VF_VLAN_DEL_NO_FOUND 1 if (!req0->resp_code) return 0; + if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) { + dev_warn(&hdev->pdev->dev, + "vlan %d filter is not in vf vlan table\n", + vlan); + return 0; + } + dev_err(&hdev->pdev->dev, "Kill vf vlan filter fail, ret =%d.\n", req0->resp_code); @@ -4732,6 +5722,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, u16 vport_idx, vport_num = 0; int ret; + if (is_kill && !vlan_id) + return 0; + ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, 0, proto); if (ret) { @@ -4761,7 +5754,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, return -EINVAL; } - for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID) + for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) vport_num++; if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) @@ -4896,7 +5889,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); - tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data; + tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); @@ -4913,18 +5906,30 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) { #define HCLGE_DEF_VLAN_TYPE 0x8100 - struct hnae3_handle *handle; + struct hnae3_handle *handle = &hdev->vport[0].nic; struct hclge_vport *vport; int ret; int i; - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true); - if (ret) - return ret; + if (hdev->pdev->revision >= 0x21) { + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, true); + if (ret) + return ret; - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true); - if (ret) - return ret; + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, true); + if (ret) + return ret; + } else { + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, + true); + if (ret) + return ret; + } + + handle->netdev_flags |= HNAE3_VLAN_FLTR; hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; @@ -4970,7 +5975,6 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return ret; } - handle = &hdev->vport[0].nic; return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } @@ -5187,20 +6191,6 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle) return hdev->fw_version; } -static void hclge_get_flowctrl_adv(struct hnae3_handle *handle, - u32 *flowctrl_adv) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - struct phy_device *phydev = hdev->hw.mac.phydev; - - if (!phydev) - return; - - *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) | - (phydev->advertising & ADVERTISED_Asym_Pause); -} - static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) { struct phy_device *phydev = hdev->hw.mac.phydev; @@ -5208,13 +6198,7 @@ static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) if (!phydev) return; - phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - - if (rx_en) - phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - - if (tx_en) - phydev->advertising ^= ADVERTISED_Asym_Pause; + phy_set_asym_pause(phydev, rx_en, tx_en); } static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) @@ -5256,11 +6240,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev) if (!phydev->link || !phydev->autoneg) return 0; - if (phydev->advertising & ADVERTISED_Pause) - local_advertising = ADVERTISE_PAUSE_CAP; - - if (phydev->advertising & ADVERTISED_Asym_Pause) - local_advertising |= ADVERTISE_PAUSE_ASYM; + local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising); if (phydev->pause) remote_advertising = LPA_PAUSE_CAP; @@ -5444,26 +6424,31 @@ static int hclge_init_client_instance(struct hnae3_client *client, vport->nic.client = client; ret = client->ops->init_instance(&vport->nic); if (ret) - return ret; + goto clear_nic; ret = hclge_init_instance_hw(hdev); if (ret) { client->ops->uninit_instance(&vport->nic, 0); - return ret; + goto clear_nic; } + hnae3_set_client_init_flag(client, ae_dev, 1); + if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { struct hnae3_client *rc = hdev->roce_client; ret = hclge_init_roce_base_info(vport); if (ret) - return ret; + goto clear_roce; ret = rc->ops->init_instance(&vport->roce); if (ret) - return ret; + goto clear_roce; + + hnae3_set_client_init_flag(hdev->roce_client, + ae_dev, 1); } break; @@ -5473,7 +6458,9 @@ static int hclge_init_client_instance(struct hnae3_client *client, ret = client->ops->init_instance(&vport->nic); if (ret) - return ret; + goto clear_nic; + + hnae3_set_client_init_flag(client, ae_dev, 1); break; case HNAE3_CLIENT_ROCE: @@ -5485,16 +6472,31 @@ static int hclge_init_client_instance(struct hnae3_client *client, if (hdev->roce_client && hdev->nic_client) { ret = hclge_init_roce_base_info(vport); if (ret) - return ret; + goto clear_roce; ret = client->ops->init_instance(&vport->roce); if (ret) - return ret; + goto clear_roce; + + hnae3_set_client_init_flag(client, ae_dev, 1); } + + break; + default: + return -EINVAL; } } return 0; + +clear_nic: + hdev->nic_client = NULL; + vport->nic.client = NULL; + return ret; +clear_roce: + hdev->roce_client = NULL; + vport->roce.client = NULL; + return ret; } static void hclge_uninit_client_instance(struct hnae3_client *client, @@ -5514,7 +6516,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, } if (client->type == HNAE3_CLIENT_ROCE) return; - if (client->ops->uninit_instance) { + if (hdev->nic_client && client->ops->uninit_instance) { hclge_uninit_instance_hw(hdev); client->ops->uninit_instance(&vport->nic, 0); hdev->nic_client = NULL; @@ -5697,6 +6699,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) } } + ret = hclge_init_umv_space(hdev); + if (ret) { + dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret); + goto err_msi_irq_uninit; + } + ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); @@ -5734,6 +6742,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } + ret = hclge_init_fd_config(hdev); + if (ret) { + dev_err(&pdev->dev, + "fd table init fail, ret=%d\n", ret); + goto err_mdiobus_unreg; + } + hclge_dcb_ops_set(hdev); timer_setup(&hdev->service_timer, hclge_service_timer, 0); @@ -5810,6 +6825,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + hclge_reset_umv_space(hdev); + ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); @@ -5840,6 +6857,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = hclge_init_fd_config(hdev); + if (ret) { + dev_err(&pdev->dev, + "fd table init fail, ret=%d\n", ret); + return ret; + } + dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", HCLGE_DRIVER_NAME); @@ -5856,6 +6880,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) if (mac->phydev) mdiobus_unregister(mac->mdio_bus); + hclge_uninit_umv_space(hdev); + /* Disable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, false); synchronize_irq(hdev->misc_vector.vector_irq); @@ -5887,18 +6913,12 @@ static void hclge_get_channels(struct hnae3_handle *handle, } static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, - u16 *free_tqps, u16 *max_rss_size) + u16 *alloc_tqps, u16 *max_rss_size) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - u16 temp_tqps = 0; - int i; - for (i = 0; i < hdev->num_tqps; i++) { - if (!hdev->htqp[i].alloced) - temp_tqps++; - } - *free_tqps = temp_tqps; + *alloc_tqps = vport->alloc_tqps; *max_rss_size = hdev->rss_size_max; } @@ -6228,27 +7248,6 @@ static void hclge_get_link_mode(struct hnae3_handle *handle, } } -static void hclge_get_port_type(struct hnae3_handle *handle, - u8 *port_type) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - u8 media_type = hdev->hw.mac.media_type; - - switch (media_type) { - case HNAE3_MEDIA_TYPE_FIBER: - *port_type = PORT_FIBRE; - break; - case HNAE3_MEDIA_TYPE_COPPER: - *port_type = PORT_TP; - break; - case HNAE3_MEDIA_TYPE_UNKNOWN: - default: - *port_type = PORT_OTHER; - break; - } -} - static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, @@ -6276,11 +7275,11 @@ static const struct hnae3_ae_ops hclge_ops = { .get_tc_size = hclge_get_tc_size, .get_mac_addr = hclge_get_mac_addr, .set_mac_addr = hclge_set_mac_addr, + .do_ioctl = hclge_do_ioctl, .add_uc_addr = hclge_add_uc_addr, .rm_uc_addr = hclge_rm_uc_addr, .add_mc_addr = hclge_add_mc_addr, .rm_mc_addr = hclge_rm_mc_addr, - .update_mta_status = hclge_update_mta_status, .set_autoneg = hclge_set_autoneg, .get_autoneg = hclge_get_autoneg, .get_pauseparam = hclge_get_pauseparam, @@ -6301,12 +7300,18 @@ static const struct hnae3_ae_ops hclge_ops = { .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, .set_channels = hclge_set_channels, .get_channels = hclge_get_channels, - .get_flowctrl_adv = hclge_get_flowctrl_adv, .get_regs_len = hclge_get_regs_len, .get_regs = hclge_get_regs, .set_led_id = hclge_set_led_id, .get_link_mode = hclge_get_link_mode, - .get_port_type = hclge_get_port_type, + .add_fd_entry = hclge_add_fd_entry, + .del_fd_entry = hclge_del_fd_entry, + .del_all_fd_entries = hclge_del_all_fd_entries, + .get_fd_rule_cnt = hclge_get_fd_rule_cnt, + .get_fd_rule_info = hclge_get_fd_rule_info, + .get_fd_all_rules = hclge_get_all_rules, + .restore_fd_rules = hclge_restore_fd_entries, + .enable_fd = hclge_enable_fd, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 1528fb3fa6be..e3dfd654eca9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -14,6 +14,8 @@ #define HCLGE_MOD_VERSION "1.0" #define HCLGE_DRIVER_NAME "hclge" +#define HCLGE_MAX_PF_NUM 8 + #define HCLGE_INVALID_VPORT 0xffff #define HCLGE_PF_CFG_BLOCK_SIZE 32 @@ -53,7 +55,9 @@ #define HCLGE_RSS_TC_SIZE_6 64 #define HCLGE_RSS_TC_SIZE_7 128 -#define HCLGE_MTA_TBL_SIZE 4096 +#define HCLGE_UMV_TBL_SIZE 3072 +#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \ + (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM) #define HCLGE_TQP_RESET_TRY_TIMES 10 @@ -79,6 +83,19 @@ #define HCLGE_VF_NUM_PER_CMD 64 #define HCLGE_VF_NUM_PER_BYTE 8 +enum HLCGE_PORT_TYPE { + HOST_PORT, + NETWORK_PORT +}; + +#define HCLGE_PF_ID_S 0 +#define HCLGE_PF_ID_M GENMASK(2, 0) +#define HCLGE_VF_ID_S 3 +#define HCLGE_VF_ID_M GENMASK(10, 3) +#define HCLGE_PORT_TYPE_B 11 +#define HCLGE_NETWORK_PORT_ID_S 0 +#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0) + /* Reset related Registers */ #define HCLGE_MISC_RESET_STS_REG 0x20700 #define HCLGE_MISC_VECTOR_INT_STS 0x20800 @@ -149,13 +166,6 @@ enum HCLGE_MAC_DUPLEX { HCLGE_MAC_FULL }; -enum hclge_mta_dmac_sel_type { - HCLGE_MAC_ADDR_47_36, - HCLGE_MAC_ADDR_46_35, - HCLGE_MAC_ADDR_45_34, - HCLGE_MAC_ADDR_44_33, -}; - struct hclge_mac { u8 phy_addr; u8 flag; @@ -238,6 +248,7 @@ struct hclge_cfg { u8 default_speed; u32 numa_node_map; u8 speed_ability; + u16 umv_space; }; struct hclge_tm_info { @@ -256,109 +267,6 @@ struct hclge_comm_stats_str { unsigned long offset; }; -/* all 64bit stats, opcode id: 0x0030 */ -struct hclge_64_bit_stats { - /* query_igu_stat */ - u64 igu_rx_oversize_pkt; - u64 igu_rx_undersize_pkt; - u64 igu_rx_out_all_pkt; - u64 igu_rx_uni_pkt; - u64 igu_rx_multi_pkt; - u64 igu_rx_broad_pkt; - u64 rsv0; - - /* query_egu_stat */ - u64 egu_tx_out_all_pkt; - u64 egu_tx_uni_pkt; - u64 egu_tx_multi_pkt; - u64 egu_tx_broad_pkt; - - /* ssu_ppp packet stats */ - u64 ssu_ppp_mac_key_num; - u64 ssu_ppp_host_key_num; - u64 ppp_ssu_mac_rlt_num; - u64 ppp_ssu_host_rlt_num; - - /* ssu_tx_in_out_dfx_stats */ - u64 ssu_tx_in_num; - u64 ssu_tx_out_num; - /* ssu_rx_in_out_dfx_stats */ - u64 ssu_rx_in_num; - u64 ssu_rx_out_num; -}; - -/* all 32bit stats, opcode id: 0x0031 */ -struct hclge_32_bit_stats { - u64 igu_rx_err_pkt; - u64 igu_rx_no_eof_pkt; - u64 igu_rx_no_sof_pkt; - u64 egu_tx_1588_pkt; - u64 egu_tx_err_pkt; - u64 ssu_full_drop_num; - u64 ssu_part_drop_num; - u64 ppp_key_drop_num; - u64 ppp_rlt_drop_num; - u64 ssu_key_drop_num; - u64 pkt_curr_buf_cnt; - u64 qcn_fb_rcv_cnt; - u64 qcn_fb_drop_cnt; - u64 qcn_fb_invaild_cnt; - u64 rsv0; - u64 rx_packet_tc0_in_cnt; - u64 rx_packet_tc1_in_cnt; - u64 rx_packet_tc2_in_cnt; - u64 rx_packet_tc3_in_cnt; - u64 rx_packet_tc4_in_cnt; - u64 rx_packet_tc5_in_cnt; - u64 rx_packet_tc6_in_cnt; - u64 rx_packet_tc7_in_cnt; - u64 rx_packet_tc0_out_cnt; - u64 rx_packet_tc1_out_cnt; - u64 rx_packet_tc2_out_cnt; - u64 rx_packet_tc3_out_cnt; - u64 rx_packet_tc4_out_cnt; - u64 rx_packet_tc5_out_cnt; - u64 rx_packet_tc6_out_cnt; - u64 rx_packet_tc7_out_cnt; - - /* Tx packet level statistics */ - u64 tx_packet_tc0_in_cnt; - u64 tx_packet_tc1_in_cnt; - u64 tx_packet_tc2_in_cnt; - u64 tx_packet_tc3_in_cnt; - u64 tx_packet_tc4_in_cnt; - u64 tx_packet_tc5_in_cnt; - u64 tx_packet_tc6_in_cnt; - u64 tx_packet_tc7_in_cnt; - u64 tx_packet_tc0_out_cnt; - u64 tx_packet_tc1_out_cnt; - u64 tx_packet_tc2_out_cnt; - u64 tx_packet_tc3_out_cnt; - u64 tx_packet_tc4_out_cnt; - u64 tx_packet_tc5_out_cnt; - u64 tx_packet_tc6_out_cnt; - u64 tx_packet_tc7_out_cnt; - - /* packet buffer statistics */ - u64 pkt_curr_buf_tc0_cnt; - u64 pkt_curr_buf_tc1_cnt; - u64 pkt_curr_buf_tc2_cnt; - u64 pkt_curr_buf_tc3_cnt; - u64 pkt_curr_buf_tc4_cnt; - u64 pkt_curr_buf_tc5_cnt; - u64 pkt_curr_buf_tc6_cnt; - u64 pkt_curr_buf_tc7_cnt; - - u64 mb_uncopy_num; - u64 lo_pri_unicast_rlt_drop_num; - u64 hi_pri_multicast_rlt_drop_num; - u64 lo_pri_multicast_rlt_drop_num; - u64 rx_oq_drop_pkt_cnt; - u64 tx_oq_drop_pkt_cnt; - u64 nic_l2_err_drop_pkt_cnt; - u64 roc_l2_err_drop_pkt_cnt; -}; - /* mac stats ,opcode id: 0x0032 */ struct hclge_mac_stats { u64 mac_tx_mac_pause_num; @@ -450,8 +358,6 @@ struct hclge_mac_stats { #define HCLGE_STATS_TIMER_INTERVAL (60 * 5) struct hclge_hw_stats { struct hclge_mac_stats mac_stats; - struct hclge_64_bit_stats all_64_bit_stats; - struct hclge_32_bit_stats all_32_bit_stats; u32 stats_timer; }; @@ -464,6 +370,221 @@ struct hclge_vlan_type_cfg { u16 tx_in_vlan_type; }; +enum HCLGE_FD_MODE { + HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1, + HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2, + HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1, + HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2, +}; + +enum HCLGE_FD_KEY_TYPE { + HCLGE_FD_KEY_BASE_ON_PTYPE, + HCLGE_FD_KEY_BASE_ON_TUPLE, +}; + +enum HCLGE_FD_STAGE { + HCLGE_FD_STAGE_1, + HCLGE_FD_STAGE_2, +}; + +/* OUTER_XXX indicates tuples in tunnel header of tunnel packet + * INNER_XXX indicate tuples in tunneled header of tunnel packet or + * tuples of non-tunnel packet + */ +enum HCLGE_FD_TUPLE { + OUTER_DST_MAC, + OUTER_SRC_MAC, + OUTER_VLAN_TAG_FST, + OUTER_VLAN_TAG_SEC, + OUTER_ETH_TYPE, + OUTER_L2_RSV, + OUTER_IP_TOS, + OUTER_IP_PROTO, + OUTER_SRC_IP, + OUTER_DST_IP, + OUTER_L3_RSV, + OUTER_SRC_PORT, + OUTER_DST_PORT, + OUTER_L4_RSV, + OUTER_TUN_VNI, + OUTER_TUN_FLOW_ID, + INNER_DST_MAC, + INNER_SRC_MAC, + INNER_VLAN_TAG_FST, + INNER_VLAN_TAG_SEC, + INNER_ETH_TYPE, + INNER_L2_RSV, + INNER_IP_TOS, + INNER_IP_PROTO, + INNER_SRC_IP, + INNER_DST_IP, + INNER_L3_RSV, + INNER_SRC_PORT, + INNER_DST_PORT, + INNER_L4_RSV, + MAX_TUPLE, +}; + +enum HCLGE_FD_META_DATA { + PACKET_TYPE_ID, + IP_FRAGEMENT, + ROCE_TYPE, + NEXT_KEY, + VLAN_NUMBER, + SRC_VPORT, + DST_VPORT, + TUNNEL_PACKET, + MAX_META_DATA, +}; + +struct key_info { + u8 key_type; + u8 key_length; +}; + +static const struct key_info meta_data_key_info[] = { + { PACKET_TYPE_ID, 6}, + { IP_FRAGEMENT, 1}, + { ROCE_TYPE, 1}, + { NEXT_KEY, 5}, + { VLAN_NUMBER, 2}, + { SRC_VPORT, 12}, + { DST_VPORT, 12}, + { TUNNEL_PACKET, 1}, +}; + +static const struct key_info tuple_key_info[] = { + { OUTER_DST_MAC, 48}, + { OUTER_SRC_MAC, 48}, + { OUTER_VLAN_TAG_FST, 16}, + { OUTER_VLAN_TAG_SEC, 16}, + { OUTER_ETH_TYPE, 16}, + { OUTER_L2_RSV, 16}, + { OUTER_IP_TOS, 8}, + { OUTER_IP_PROTO, 8}, + { OUTER_SRC_IP, 32}, + { OUTER_DST_IP, 32}, + { OUTER_L3_RSV, 16}, + { OUTER_SRC_PORT, 16}, + { OUTER_DST_PORT, 16}, + { OUTER_L4_RSV, 32}, + { OUTER_TUN_VNI, 24}, + { OUTER_TUN_FLOW_ID, 8}, + { INNER_DST_MAC, 48}, + { INNER_SRC_MAC, 48}, + { INNER_VLAN_TAG_FST, 16}, + { INNER_VLAN_TAG_SEC, 16}, + { INNER_ETH_TYPE, 16}, + { INNER_L2_RSV, 16}, + { INNER_IP_TOS, 8}, + { INNER_IP_PROTO, 8}, + { INNER_SRC_IP, 32}, + { INNER_DST_IP, 32}, + { INNER_L3_RSV, 16}, + { INNER_SRC_PORT, 16}, + { INNER_DST_PORT, 16}, + { INNER_L4_RSV, 32}, +}; + +#define MAX_KEY_LENGTH 400 +#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4) +#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) +#define MAX_META_DATA_LENGTH 32 + +enum HCLGE_FD_PACKET_TYPE { + NIC_PACKET, + ROCE_PACKET, +}; + +enum HCLGE_FD_ACTION { + HCLGE_FD_ACTION_ACCEPT_PACKET, + HCLGE_FD_ACTION_DROP_PACKET, +}; + +struct hclge_fd_key_cfg { + u8 key_sel; + u8 inner_sipv6_word_en; + u8 inner_dipv6_word_en; + u8 outer_sipv6_word_en; + u8 outer_dipv6_word_en; + u32 tuple_active; + u32 meta_data_active; +}; + +struct hclge_fd_cfg { + u8 fd_mode; + u8 fd_en; + u16 max_key_length; + u32 proto_support; + u32 rule_num[2]; /* rule entry number */ + u16 cnt_num[2]; /* rule hit counter number */ + struct hclge_fd_key_cfg key_cfg[2]; +}; + +struct hclge_fd_rule_tuples { + u8 src_mac[6]; + u8 dst_mac[6]; + u32 src_ip[4]; + u32 dst_ip[4]; + u16 src_port; + u16 dst_port; + u16 vlan_tag1; + u16 ether_proto; + u8 ip_tos; + u8 ip_proto; +}; + +struct hclge_fd_rule { + struct hlist_node rule_node; + struct hclge_fd_rule_tuples tuples; + struct hclge_fd_rule_tuples tuples_mask; + u32 unused_tuple; + u32 flow_type; + u8 action; + u16 vf_id; + u16 queue_id; + u16 location; +}; + +struct hclge_fd_ad_data { + u16 ad_id; + u8 drop_packet; + u8 forward_to_direct_queue; + u16 queue_id; + u8 use_counter; + u8 counter_id; + u8 use_next_stage; + u8 write_rule_id_to_bd; + u8 next_input_key; + u16 rule_id; +}; + +/* For each bit of TCAM entry, it uses a pair of 'x' and + * 'y' to indicate which value to match, like below: + * ---------------------------------- + * | bit x | bit y | search value | + * ---------------------------------- + * | 0 | 0 | always hit | + * ---------------------------------- + * | 1 | 0 | match '0' | + * ---------------------------------- + * | 0 | 1 | match '1' | + * ---------------------------------- + * | 1 | 1 | invalid | + * ---------------------------------- + * Then for input key(k) and mask(v), we can calculate the value by + * the formulae: + * x = (~k) & v + * y = (k ^ ~v) & k + */ +#define calc_x(x, k, v) ((x) = (~(k) & (v))) +#define calc_y(y, k, v) \ + do { \ + const typeof(k) _k_ = (k); \ + const typeof(v) _v_ = (v); \ + (y) = (_k_ ^ ~_v_) & (_k_); \ + } while (0) + #define HCLGE_VPORT_NUM 256 struct hclge_dev { struct pci_dev *pdev; @@ -547,12 +668,22 @@ struct hclge_dev { u32 pkt_buf_size; /* Total pf buf size for tx/rx */ u32 mps; /* Max packet size */ - enum hclge_mta_dmac_sel_type mta_mac_sel_type; - bool enable_mta; /* Multicast filter enable */ - struct hclge_vlan_type_cfg vlan_type_cfg; unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + + struct hclge_fd_cfg fd_cfg; + struct hlist_head fd_rule_list; + u16 hclge_fd_rule_num; + + u16 wanted_umv_size; + /* max available unicast mac vlan space */ + u16 max_umv_size; + /* private unicast mac vlan space, it's same for PF and its VFs */ + u16 priv_umv_size; + /* unicast mac vlan space shared by PF and its VFs */ + u16 share_umv_size; + struct mutex umv_mutex; /* protect share_umv_size */ }; /* VPort level vlan tag configuration for TX direction */ @@ -605,13 +736,12 @@ struct hclge_vport { struct hclge_tx_vtag_cfg txvlan_cfg; struct hclge_rx_vtag_cfg rxvlan_cfg; + u16 used_umv_num; + int vport_id; struct hclge_dev *back; /* Back reference to associated dev */ struct hnae3_handle nic; struct hnae3_handle roce; - - bool accept_mta_mc; /* whether to accept mta filter multicast */ - unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; }; void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, @@ -626,15 +756,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, int hclge_rm_mc_addr_common(struct hclge_vport *vport, const unsigned char *addr); -int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, - u8 func_id, - bool enable); -int hclge_update_mta_status_common(struct hclge_vport *vport, - unsigned long *status, - u16 idx, - u16 count, - bool update_filter); - struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); int hclge_bind_ring_with_vector(struct hclge_vport *vport, int vector_id, bool en, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index f34851c91eb3..04462a347a94 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -233,43 +233,6 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, return 0; } -static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport, - u8 *msg, u8 idx, bool is_end) -{ -#define HCLGE_MTA_STATUS_MSG_SIZE 13 -#define HCLGE_MTA_STATUS_MSG_BITS \ - (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) -#define HCLGE_MTA_STATUS_MSG_END_BITS \ - (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS) - unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)]; - u16 tbl_cnt; - u16 tbl_idx; - u8 msg_ofs; - u8 msg_bit; - - tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS : - HCLGE_MTA_STATUS_MSG_BITS; - - /* set msg field */ - msg_ofs = 0; - msg_bit = 0; - memset(status, 0, sizeof(status)); - for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) { - if (msg[msg_ofs] & BIT(msg_bit)) - set_bit(tbl_idx, status); - - msg_bit++; - if (msg_bit == BITS_PER_BYTE) { - msg_bit = 0; - msg_ofs++; - } - } - - return hclge_update_mta_status_common(vport, - status, idx * HCLGE_MTA_STATUS_MSG_BITS, - tbl_cnt, is_end); -} - static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) @@ -284,27 +247,6 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, status = hclge_add_mc_addr_common(vport, mac_addr); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { status = hclge_rm_mc_addr_common(vport, mac_addr); - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) { - u8 func_id = vport->vport_id; - bool enable = mbx_req->msg[2]; - - status = hclge_cfg_func_mta_filter(hdev, func_id, enable); - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) { - resp_data = hdev->mta_mac_sel_type; - resp_len = sizeof(u8); - gen_resp = true; - status = 0; - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) { - /* mta status update msg format - * msg[2.6 : 2.0] msg index - * msg[2.7] msg is end - * msg[15 : 3] mta status bits[103 : 0] - */ - bool is_end = (mbx_req->msg[2] & 0x80) ? true : false; - - status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3], - mbx_req->msg[2] & 0x7F, - is_end); } else { dev_err(&hdev->pdev->dev, "failed to set mcast mac addr, unknown subcode %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 398971a062f4..24b1f2a0c32a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -10,8 +10,6 @@ #define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \ SUPPORTED_TP | \ - SUPPORTED_Pause | \ - SUPPORTED_Asym_Pause | \ PHY_10BT_FEATURES | \ PHY_100BT_FEATURES | \ PHY_1000BT_FEATURES) @@ -213,7 +211,7 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev) } phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES; - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 5db70a1451c5..aa5cb9834d73 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -172,7 +172,7 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, u8 pfc_bitmap) { struct hclge_desc desc; - struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)&desc.data; + struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); @@ -188,11 +188,12 @@ static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, struct hclge_cfg_pause_param_cmd *pause_param; struct hclge_desc desc; - pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data; + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); ether_addr_copy(pause_param->mac_addr, addr); + ether_addr_copy(pause_param->mac_addr_extra, addr); pause_param->pause_trans_gap = pause_trans_gap; pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); @@ -207,7 +208,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) u8 trans_gap; int ret; - pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data; + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); @@ -297,7 +298,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, } static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, - u8 q_id, u16 qs_id) + u16 q_id, u16 qs_id) { struct hclge_nq_to_qs_link_cmd *map; struct hclge_desc desc; @@ -1279,10 +1280,15 @@ int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) return 0; } -void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) +int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) { u8 i, bit_map = 0; + for (i = 0; i < hdev->num_alloc_vport; i++) { + if (num_tc > hdev->vport[i].alloc_tqps) + return -EINVAL; + } + hdev->tm_info.num_tc = num_tc; for (i = 0; i < hdev->tm_info.num_tc; i++) @@ -1296,6 +1302,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) hdev->hw_tc_map = bit_map; hclge_tm_schd_info_init(hdev); + + return 0; } int hclge_tm_init_hw(struct hclge_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index dd4c194747c1..25eef13a3e14 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -106,6 +106,10 @@ struct hclge_cfg_pause_param_cmd { u8 pause_trans_gap; u8 rsvd; __le16 pause_trans_time; + u8 rsvd1[6]; + /* extra mac address to do double check for pause frame */ + u8 mac_addr_extra[ETH_ALEN]; + u16 rsvd2; }; struct hclge_pfc_stats_cmd { @@ -128,7 +132,7 @@ int hclge_tm_schd_init(struct hclge_dev *hdev); int hclge_pause_setup_hw(struct hclge_dev *hdev); int hclge_tm_schd_mode_hw(struct hclge_dev *hdev); int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); -void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); +int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); int hclge_tm_map_cfg(struct hclge_dev *hdev); int hclge_tm_init_hw(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index fb471fe2c494..0d3b445f6799 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -132,9 +132,9 @@ static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, reg_val |= HCLGEVF_NIC_CMQ_ENABLE; hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); - break; + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); + return 0; case HCLGEVF_TYPE_CRQ: reg_val = (u32)ring->desc_dma_addr; hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); @@ -145,12 +145,12 @@ static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, reg_val |= HCLGEVF_NIC_CMQ_ENABLE; hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); - break; + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); + return 0; + default: + return -EINVAL; } - - return 0; } void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 19b32860309c..bc294b0c8b62 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -89,6 +89,7 @@ enum hclgevf_opcode_type { HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20, /* RSS cmd */ HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01, + HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02, HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07, HCLGEVF_OPC_RSS_TC_MODE = 0x0D08, /* Mailbox cmd */ @@ -148,7 +149,8 @@ struct hclgevf_query_res_cmd { __le16 rsv[7]; }; -#define HCLGEVF_RSS_HASH_KEY_OFFSET 4 +#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4 +#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4 #define HCLGEVF_RSS_HASH_KEY_NUM 16 struct hclgevf_rss_config_cmd { u8 hash_config; @@ -159,11 +161,11 @@ struct hclgevf_rss_config_cmd { struct hclgevf_rss_input_tuple_cmd { u8 ipv4_tcp_en; u8 ipv4_udp_en; - u8 ipv4_stcp_en; + u8 ipv4_sctp_en; u8 ipv4_fragment_en; u8 ipv6_tcp_en; u8 ipv6_udp_en; - u8 ipv6_stcp_en; + u8 ipv6_sctp_en; u8 ipv6_fragment_en; u8 rsv[16]; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 9c0091f2addf..ac67fecb9408 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -31,16 +31,15 @@ static inline struct hclgevf_dev *hclgevf_ae_get_hdev( static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) { + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hnae3_queue *queue; struct hclgevf_desc desc; struct hclgevf_tqp *tqp; int status; int i; - for (i = 0; i < hdev->num_tqps; i++) { - queue = handle->kinfo.tqp[i]; - tqp = container_of(queue, struct hclgevf_tqp, q); + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_RX_STATUS, true); @@ -77,17 +76,16 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_tqp *tqp; u64 *buff = data; int i; - for (i = 0; i < hdev->num_tqps; i++) { - tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; } for (i = 0; i < kinfo->num_tqps; i++) { - tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; } @@ -96,29 +94,29 @@ static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_knic_private_info *kinfo = &handle->kinfo; - return hdev->num_tqps * 2; + return kinfo->num_tqps * 2; } static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_knic_private_info *kinfo = &handle->kinfo; u8 *buff = data; int i = 0; - for (i = 0; i < hdev->num_tqps; i++) { - struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], - struct hclgevf_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], + struct hclgevf_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", tqp->index); buff += ETH_GSTRING_LEN; } - for (i = 0; i < hdev->num_tqps; i++) { - struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], - struct hclgevf_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], + struct hclgevf_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", tqp->index); buff += ETH_GSTRING_LEN; } @@ -182,7 +180,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) return 0; } -static int hclge_get_queue_info(struct hclgevf_dev *hdev) +static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_RSS_INFO_LEN 8 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; @@ -299,6 +297,9 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) client = handle->client; + link_state = + test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; + if (link_state != hdev->hw.mac.link) { client->ops->link_status_change(handle, !!link_state); hdev->hw.mac.link = link_state; @@ -385,6 +386,47 @@ static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) return -EINVAL; } +static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, + const u8 hfunc, const u8 *key) +{ + struct hclgevf_rss_config_cmd *req; + struct hclgevf_desc desc; + int key_offset; + int key_size; + int ret; + + req = (struct hclgevf_rss_config_cmd *)desc.data; + + for (key_offset = 0; key_offset < 3; key_offset++) { + hclgevf_cmd_setup_basic_desc(&desc, + HCLGEVF_OPC_RSS_GENERIC_CONFIG, + false); + + req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); + req->hash_config |= + (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); + + if (key_offset == 2) + key_size = + HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; + else + key_size = HCLGEVF_RSS_HASH_KEY_NUM; + + memcpy(req->hash_key, + key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure RSS config fail, status = %d\n", + ret); + return ret; + } + } + + return 0; +} + static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) { return HCLGEVF_RSS_KEY_SIZE; @@ -465,68 +507,40 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) return status; } -static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, - u8 *key) +static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, + u8 *hfunc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_config_cmd *req; - int lkup_times = key ? 3 : 1; - struct hclgevf_desc desc; - int key_offset; - int key_size; - int status; - - req = (struct hclgevf_rss_config_cmd *)desc.data; - lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); - - for (key_offset = 0; key_offset < lkup_times; key_offset++) { - hclgevf_cmd_setup_basic_desc(&desc, - HCLGEVF_OPC_RSS_GENERIC_CONFIG, - true); - req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + int i; - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) { - dev_err(&hdev->pdev->dev, - "failed to get hardware RSS cfg, status = %d\n", - status); - return status; + if (handle->pdev->revision >= 0x21) { + /* Get hash algorithm */ + if (hfunc) { + switch (rss_cfg->hash_algo) { + case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: + *hfunc = ETH_RSS_HASH_TOP; + break; + case HCLGEVF_RSS_HASH_ALGO_SIMPLE: + *hfunc = ETH_RSS_HASH_XOR; + break; + default: + *hfunc = ETH_RSS_HASH_UNKNOWN; + break; + } } - if (key_offset == 2) - key_size = - HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; - else - key_size = HCLGEVF_RSS_HASH_KEY_NUM; - + /* Get the RSS Key required by the user */ if (key) - memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, - req->hash_key, - key_size); - } - - if (hash) { - if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) - *hash = ETH_RSS_HASH_TOP; - else - *hash = ETH_RSS_HASH_UNKNOWN; + memcpy(key, rss_cfg->rss_hash_key, + HCLGEVF_RSS_KEY_SIZE); } - return 0; -} - -static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, - u8 *hfunc) -{ - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i; - if (indir) for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) indir[i] = rss_cfg->rss_indirection_tbl[i]; - return hclgevf_get_rss_hw_cfg(handle, hfunc, key); + return 0; } static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, @@ -534,7 +548,36 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i; + int ret, i; + + if (handle->pdev->revision >= 0x21) { + /* Set the RSS Hash Key if specififed by the user */ + if (key) { + switch (hfunc) { + case ETH_RSS_HASH_TOP: + rss_cfg->hash_algo = + HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; + break; + case ETH_RSS_HASH_XOR: + rss_cfg->hash_algo = + HCLGEVF_RSS_HASH_ALGO_SIMPLE; + break; + case ETH_RSS_HASH_NO_CHANGE: + break; + default: + return -EINVAL; + } + + ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, + key); + if (ret) + return ret; + + /* Update the shadow RSS key with user specified qids */ + memcpy(rss_cfg->rss_hash_key, key, + HCLGEVF_RSS_KEY_SIZE); + } + } /* update the shadow RSS table with user specified qids */ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) @@ -544,6 +587,193 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, return hclgevf_set_rss_indir_table(hdev); } +static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) +{ + u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; + + if (nfc->data & RXH_L4_B_2_3) + hash_sets |= HCLGEVF_D_PORT_BIT; + else + hash_sets &= ~HCLGEVF_D_PORT_BIT; + + if (nfc->data & RXH_IP_SRC) + hash_sets |= HCLGEVF_S_IP_BIT; + else + hash_sets &= ~HCLGEVF_S_IP_BIT; + + if (nfc->data & RXH_IP_DST) + hash_sets |= HCLGEVF_D_IP_BIT; + else + hash_sets &= ~HCLGEVF_D_IP_BIT; + + if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) + hash_sets |= HCLGEVF_V_TAG_BIT; + + return hash_sets; +} + +static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + struct hclgevf_rss_input_tuple_cmd *req; + struct hclgevf_desc desc; + u8 tuple_sets; + int ret; + + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; + + if (nfc->data & + ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); + + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; + + tuple_sets = hclgevf_get_rss_hash_bits(nfc); + switch (nfc->flow_type) { + case TCP_V4_FLOW: + req->ipv4_tcp_en = tuple_sets; + break; + case TCP_V6_FLOW: + req->ipv6_tcp_en = tuple_sets; + break; + case UDP_V4_FLOW: + req->ipv4_udp_en = tuple_sets; + break; + case UDP_V6_FLOW: + req->ipv6_udp_en = tuple_sets; + break; + case SCTP_V4_FLOW: + req->ipv4_sctp_en = tuple_sets; + break; + case SCTP_V6_FLOW: + if ((nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + + req->ipv6_sctp_en = tuple_sets; + break; + case IPV4_FLOW: + req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + break; + case IPV6_FLOW: + req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + break; + default: + return -EINVAL; + } + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set rss tuple fail, status = %d\n", ret); + return ret; + } + + rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; + rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; + rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; + rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; + rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; + rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; + rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; + rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; + return 0; +} + +static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + u8 tuple_sets; + + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; + + nfc->data = 0; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + break; + case UDP_V4_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; + break; + case TCP_V6_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + break; + case UDP_V6_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; + break; + case SCTP_V4_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + break; + case SCTP_V6_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + break; + case IPV4_FLOW: + case IPV6_FLOW: + tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; + break; + default: + return -EINVAL; + } + + if (!tuple_sets) + return 0; + + if (tuple_sets & HCLGEVF_D_PORT_BIT) + nfc->data |= RXH_L4_B_2_3; + if (tuple_sets & HCLGEVF_S_PORT_BIT) + nfc->data |= RXH_L4_B_0_1; + if (tuple_sets & HCLGEVF_D_IP_BIT) + nfc->data |= RXH_IP_DST; + if (tuple_sets & HCLGEVF_S_IP_BIT) + nfc->data |= RXH_IP_SRC; + + return 0; +} + +static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, + struct hclgevf_rss_cfg *rss_cfg) +{ + struct hclgevf_rss_input_tuple_cmd *req; + struct hclgevf_desc desc; + int ret; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); + + req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; + + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "Configure rss input fail, status = %d\n", ret); + return ret; +} + static int hclgevf_get_tc_size(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -735,138 +965,16 @@ static int hclgevf_get_queue_id(struct hnae3_queue *queue) static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hnae3_queue *queue; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_tqp *tqp; int i; - for (i = 0; i < hdev->num_tqps; i++) { - queue = handle->kinfo.tqp[i]; - tqp = container_of(queue, struct hclgevf_tqp, q); + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); } } -static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev) -{ - u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX; - int ret; - - ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, - NULL, 0, true, &resp_msg, sizeof(u8)); - - if (ret) { - dev_err(&hdev->pdev->dev, - "Read mta type fail, ret=%d.\n", ret); - return ret; - } - - if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) { - dev_err(&hdev->pdev->dev, - "Read mta type invalid, resp=%d.\n", resp_msg); - return -EINVAL; - } - - hdev->mta_mac_sel_type = resp_msg; - - return 0; -} - -static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev, - const u8 *addr) -{ - u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type; - u16 high_val = addr[1] | (addr[0] << 8); - - return (high_val >> rsh) & 0xfff; -} - -static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev, - unsigned long *status) -{ -#define HCLGEVF_MTA_STATUS_MSG_SIZE 13 -#define HCLGEVF_MTA_STATUS_MSG_BITS \ - (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) -#define HCLGEVF_MTA_STATUS_MSG_END_BITS \ - (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS) - u16 tbl_cnt; - u16 tbl_idx; - u8 msg_cnt; - u8 msg_idx; - int ret; - - msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE, - HCLGEVF_MTA_STATUS_MSG_BITS); - tbl_idx = 0; - msg_idx = 0; - while (msg_cnt--) { - u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1]; - u8 *p = &msg[1]; - u8 msg_ofs; - u8 msg_bit; - - memset(msg, 0, sizeof(msg)); - - /* set index field */ - msg[0] = 0x7F & msg_idx; - - /* set end flag field */ - if (msg_cnt == 0) { - msg[0] |= 0x80; - tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS; - } else { - tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS; - } - - /* set status field */ - msg_ofs = 0; - msg_bit = 0; - while (tbl_cnt--) { - if (test_bit(tbl_idx, status)) - p[msg_ofs] |= BIT(msg_bit); - - tbl_idx++; - - msg_bit++; - if (msg_bit == BITS_PER_BYTE) { - msg_bit = 0; - msg_ofs++; - } - } - - ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, - msg, sizeof(msg), false, NULL, 0); - if (ret) - break; - - msg_idx++; - } - - return ret; -} - -static int hclgevf_update_mta_status(struct hnae3_handle *handle) -{ - unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)]; - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct net_device *netdev = hdev->nic.kinfo.netdev; - struct netdev_hw_addr *ha; - u16 tbl_idx; - - /* clear status */ - memset(mta_status, 0, sizeof(mta_status)); - - /* update status from mc addr list */ - netdev_for_each_mc_addr(ha, netdev) { - tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr); - set_bit(tbl_idx, mta_status); - } - - return hclgevf_do_update_mta_status(hdev, mta_status); -} - static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -1341,8 +1449,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) { int ret; + hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; + /* get queue configuration from PF */ - ret = hclge_get_queue_info(hdev); + ret = hclgevf_get_queue_info(hdev); if (ret) return ret; /* get tc configuration from PF */ @@ -1395,6 +1505,39 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) rss_cfg->rss_size = hdev->rss_size_max; + if (hdev->pdev->revision >= 0x21) { + rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; + netdev_rss_key_fill(rss_cfg->rss_hash_key, + HCLGEVF_RSS_KEY_SIZE); + + ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, + rss_cfg->rss_hash_key); + if (ret) + return ret; + + rss_cfg->rss_tuple_sets.ipv4_tcp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv4_udp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv4_sctp_en = + HCLGEVF_RSS_INPUT_TUPLE_SCTP; + rss_cfg->rss_tuple_sets.ipv4_fragment_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv6_tcp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv6_udp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv6_sctp_en = + HCLGEVF_RSS_INPUT_TUPLE_SCTP; + rss_cfg->rss_tuple_sets.ipv6_fragment_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + + ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); + if (ret) + return ret; + + } + /* Initialize RSS indirect table for each vport */ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; @@ -1417,12 +1560,13 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) static int hclgevf_ae_start(struct hnae3_handle *handle) { + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); int i, queue_id; - for (i = 0; i < handle->kinfo.num_tqps; i++) { + for (i = 0; i < kinfo->num_tqps; i++) { /* ring enable */ - queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); + queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); if (queue_id < 0) { dev_warn(&hdev->pdev->dev, "Get invalid queue id, ignore it\n"); @@ -1445,12 +1589,15 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) static void hclgevf_ae_stop(struct hnae3_handle *handle) { + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); int i, queue_id; - for (i = 0; i < hdev->num_tqps; i++) { + set_bit(HCLGEVF_STATE_DOWN, &hdev->state); + + for (i = 0; i < kinfo->num_tqps; i++) { /* Ring disable */ - queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); + queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); if (queue_id < 0) { dev_warn(&hdev->pdev->dev, "Get invalid queue id, ignore it\n"); @@ -1619,17 +1766,22 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, ret = client->ops->init_instance(&hdev->nic); if (ret) - return ret; + goto clear_nic; + + hnae3_set_client_init_flag(client, ae_dev, 1); if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { struct hnae3_client *rc = hdev->roce_client; ret = hclgevf_init_roce_base_info(hdev); if (ret) - return ret; + goto clear_roce; ret = rc->ops->init_instance(&hdev->roce); if (ret) - return ret; + goto clear_roce; + + hnae3_set_client_init_flag(hdev->roce_client, ae_dev, + 1); } break; case HNAE3_CLIENT_UNIC: @@ -1638,7 +1790,9 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, ret = client->ops->init_instance(&hdev->nic); if (ret) - return ret; + goto clear_nic; + + hnae3_set_client_init_flag(client, ae_dev, 1); break; case HNAE3_CLIENT_ROCE: if (hnae3_dev_roce_supported(hdev)) { @@ -1649,15 +1803,29 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, if (hdev->roce_client && hdev->nic_client) { ret = hclgevf_init_roce_base_info(hdev); if (ret) - return ret; + goto clear_roce; ret = client->ops->init_instance(&hdev->roce); if (ret) - return ret; + goto clear_roce; } + + hnae3_set_client_init_flag(client, ae_dev, 1); + break; + default: + return -EINVAL; } return 0; + +clear_nic: + hdev->nic_client = NULL; + hdev->nic.client = NULL; + return ret; +clear_roce: + hdev->roce_client = NULL; + hdev->roce.client = NULL; + return ret; } static void hclgevf_uninit_client_instance(struct hnae3_client *client, @@ -1666,13 +1834,19 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client, struct hclgevf_dev *hdev = ae_dev->priv; /* un-init roce, if it exists */ - if (hdev->roce_client) + if (hdev->roce_client) { hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); + hdev->roce_client = NULL; + hdev->roce.client = NULL; + } /* un-init nic/unic, if this was not called by roce client */ - if ((client->ops->uninit_instance) && - (client->type != HNAE3_CLIENT_ROCE)) + if (client->ops->uninit_instance && hdev->nic_client && + client->type != HNAE3_CLIENT_ROCE) { client->ops->uninit_instance(&hdev->nic, 0); + hdev->nic_client = NULL; + hdev->nic.client = NULL; + } } static int hclgevf_pci_init(struct hclgevf_dev *hdev) @@ -1839,14 +2013,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - /* Initialize mta type for this VF */ - ret = hclgevf_cfg_func_mta_type(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed(%d) to initialize MTA type\n", ret); - goto err_config; - } - /* Initialize RSS for this VF */ ret = hclgevf_rss_init_hw(hdev); if (ret) { @@ -1943,11 +2109,11 @@ static void hclgevf_get_channels(struct hnae3_handle *handle, } static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, - u16 *free_tqps, u16 *max_rss_size) + u16 *alloc_tqps, u16 *max_rss_size) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - *free_tqps = 0; + *alloc_tqps = hdev->num_tqps; *max_rss_size = hdev->rss_size_max; } @@ -1979,6 +2145,14 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, hdev->hw.mac.duplex = duplex; } +static void hclgevf_get_media_type(struct hnae3_handle *handle, + u8 *media_type) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + if (media_type) + *media_type = hdev->hw.mac.media_type; +} + static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, @@ -1998,7 +2172,6 @@ static const struct hnae3_ae_ops hclgevf_ops = { .rm_uc_addr = hclgevf_rm_uc_addr, .add_mc_addr = hclgevf_add_mc_addr, .rm_mc_addr = hclgevf_rm_mc_addr, - .update_mta_status = hclgevf_update_mta_status, .get_stats = hclgevf_get_stats, .update_stats = hclgevf_update_stats, .get_strings = hclgevf_get_strings, @@ -2007,6 +2180,8 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_rss_indir_size = hclgevf_get_rss_indir_size, .get_rss = hclgevf_get_rss, .set_rss = hclgevf_set_rss, + .get_rss_tuple = hclgevf_get_rss_tuple, + .set_rss_tuple = hclgevf_set_rss_tuple, .get_tc_size = hclgevf_get_tc_size, .get_fw_version = hclgevf_get_fw_version, .set_vlan_filter = hclgevf_set_vlan_filter, @@ -2016,6 +2191,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, .get_status = hclgevf_get_status, .get_ksettings_an_result = hclgevf_get_ksettings_an_result, + .get_media_type = hclgevf_get_media_type, }; static struct hnae3_ae_algo ae_algovf = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index b23ba171473c..aed241e8ffab 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -46,9 +46,13 @@ #define HCLGEVF_RSS_HASH_ALGO_MASK 0xf #define HCLGEVF_RSS_CFG_TBL_NUM \ (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE) - -#define HCLGEVF_MTA_TBL_SIZE 4096 -#define HCLGEVF_MTA_TYPE_SEL_MAX 4 +#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) +#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) +#define HCLGEVF_D_PORT_BIT BIT(0) +#define HCLGEVF_S_PORT_BIT BIT(1) +#define HCLGEVF_D_IP_BIT BIT(2) +#define HCLGEVF_S_IP_BIT BIT(3) +#define HCLGEVF_V_TAG_BIT BIT(4) /* states of hclgevf device & tasks */ enum hclgevf_states { @@ -66,6 +70,7 @@ enum hclgevf_states { #define HCLGEVF_MPF_ENBALE 1 struct hclgevf_mac { + u8 media_type; u8 mac_addr[ETH_ALEN]; int link; u8 duplex; @@ -108,12 +113,24 @@ struct hclgevf_cfg { u32 numa_node_map; }; +struct hclgevf_rss_tuple_cfg { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_sctp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_sctp_en; + u8 ipv6_fragment_en; +}; + struct hclgevf_rss_cfg { u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */ u32 hash_algo; u32 rss_size; u8 hw_tc_map; u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */ + struct hclgevf_rss_tuple_cfg rss_tuple_sets; }; struct hclgevf_misc_vector { @@ -156,8 +173,6 @@ struct hclgevf_dev { u16 *vector_status; int *vector_irq; - bool accept_mta_mc; /* whether to accept mta filter multicast */ - u8 mta_mac_sel_type; bool mbx_event_pending; struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 03f64f40b2a3..3baabdc89726 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -778,12 +778,11 @@ static void check_sqs(struct ehea_port *port) { struct ehea_swqe *swqe; int swqe_index; - int i, k; + int i; for (i = 0; i < port->num_def_qps; i++) { struct ehea_port_res *pr = &port->port_res[i]; int ret; - k = 0; swqe = ehea_get_swqe(pr->qp, &swqe_index); memset(swqe, 0, SWQE_HEADER_SIZE); atomic_dec(&pr->swqe_avail); @@ -2027,7 +2026,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, dev_consume_skb_any(skb); } -static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ehea_port *port = netdev_priv(dev); struct ehea_swqe *swqe; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c index a0820f72b25c..5e4e37132bf2 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c @@ -125,7 +125,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, struct ehea_cq *cq; struct h_epa epa; u64 *cq_handle_ref, hret, rpage; - u32 act_nr_of_entries, act_pages, counter; + u32 counter; int ret; void *vpage; @@ -140,8 +140,6 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, cq->adapter = adapter; cq_handle_ref = &cq->fw_handle; - act_nr_of_entries = 0; - act_pages = 0; hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, &cq->fw_handle, &cq->epas); diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 129f4e9f38da..760b2ad8e295 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -423,7 +423,7 @@ static void emac_hash_mc(struct emac_instance *dev) { const int regs = EMAC_XAHT_REGS(dev); u32 *gaht_base = emac_gaht_base(dev); - u32 gaht_temp[regs]; + u32 gaht_temp[EMAC_XAHT_MAX_REGS]; struct netdev_hw_addr *ha; int i; @@ -1409,7 +1409,7 @@ static inline u16 emac_tx_csum(struct emac_instance *dev, return 0; } -static inline int emac_xmit_finish(struct emac_instance *dev, int len) +static inline netdev_tx_t emac_xmit_finish(struct emac_instance *dev, int len) { struct emac_regs __iomem *p = dev->emacp; struct net_device *ndev = dev->ndev; @@ -1436,7 +1436,7 @@ static inline int emac_xmit_finish(struct emac_instance *dev, int len) } /* Tx lock BH */ -static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); unsigned int len = skb->len; @@ -1494,7 +1494,8 @@ static inline int emac_xmit_split(struct emac_instance *dev, int slot, } /* Tx lock BH disabled (SG version for TAH equipped EMACs) */ -static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t +emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); int nr_frags = skb_shinfo(skb)->nr_frags; @@ -2969,6 +2970,10 @@ static int emac_init_config(struct emac_instance *dev) dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT; } + /* This should never happen */ + if (WARN_ON(EMAC_XAHT_REGS(dev) > EMAC_XAHT_MAX_REGS)) + return -ENXIO; + DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE); DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige); DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige); diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 369de2cfb15b..84caa4a3fc52 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -390,6 +390,9 @@ static inline int emac_has_feature(struct emac_instance *dev, #define EMAC4SYNC_XAHT_SLOTS_SHIFT 8 #define EMAC4SYNC_XAHT_WIDTH_SHIFT 5 +/* The largest span between slots and widths above is 3 */ +#define EMAC_XAHT_MAX_REGS (1 << 3) + #define EMAC_XAHT_SLOTS(dev) (1 << (dev)->xaht_slots_shift) #define EMAC_XAHT_WIDTH(dev) (1 << (dev)->xaht_width_shift) #define EMAC_XAHT_REGS(dev) (1 << ((dev)->xaht_slots_shift - \ diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h index eeade2ea8334..e4c20f0024f6 100644 --- a/drivers/net/ethernet/ibm/emac/mal.h +++ b/drivers/net/ethernet/ibm/emac/mal.h @@ -136,7 +136,7 @@ static inline int mal_rx_size(int len) static inline int mal_tx_chunks(int len) { - return (len + MAL_MAX_TX_SIZE - 1) / MAL_MAX_TX_SIZE; + return DIV_ROUND_UP(len, MAL_MAX_TX_SIZE); } #define MAL_CHAN_MASK(n) (0x80000000 >> (n)) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 525d8b89187b..a4681780a55d 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -24,7 +24,6 @@ */ #include <linux/module.h> -#include <linux/moduleparam.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/dma-mapping.h> diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 699ef942b615..7893beffcc71 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1428,7 +1428,7 @@ static int ibmvnic_xmit_workarounds(struct sk_buff *skb, return 0; } -static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); int queue_num = skb_get_queue_mapping(skb); @@ -1452,7 +1452,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) u64 *handle_array; int index = 0; u8 proto = 0; - int ret = 0; + netdev_tx_t ret = NETDEV_TX_OK; if (adapter->resetting) { if (!netif_subqueue_stopped(netdev, skb)) @@ -2348,8 +2348,13 @@ static void ibmvnic_get_ringparam(struct net_device *netdev, { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; - ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; + if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { + ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; + ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; + } else { + ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ; + ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ; + } ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; @@ -2362,21 +2367,23 @@ static int ibmvnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int ret; - if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq || - ring->tx_pending > adapter->max_tx_entries_per_subcrq) { - netdev_err(netdev, "Invalid request.\n"); - netdev_err(netdev, "Max tx buffers = %llu\n", - adapter->max_rx_add_entries_per_subcrq); - netdev_err(netdev, "Max rx buffers = %llu\n", - adapter->max_tx_entries_per_subcrq); - return -EINVAL; - } - + ret = 0; adapter->desired.rx_entries = ring->rx_pending; adapter->desired.tx_entries = ring->tx_pending; - return wait_for_reset(adapter); + ret = wait_for_reset(adapter); + + if (!ret && + (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending || + adapter->req_tx_entries_per_subcrq != ring->tx_pending)) + netdev_info(netdev, + "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", + ring->rx_pending, ring->tx_pending, + adapter->req_rx_add_entries_per_subcrq, + adapter->req_tx_entries_per_subcrq); + return ret; } static void ibmvnic_get_channels(struct net_device *netdev, @@ -2384,8 +2391,14 @@ static void ibmvnic_get_channels(struct net_device *netdev, { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - channels->max_rx = adapter->max_rx_queues; - channels->max_tx = adapter->max_tx_queues; + if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { + channels->max_rx = adapter->max_rx_queues; + channels->max_tx = adapter->max_tx_queues; + } else { + channels->max_rx = IBMVNIC_MAX_QUEUES; + channels->max_tx = IBMVNIC_MAX_QUEUES; + } + channels->max_other = 0; channels->max_combined = 0; channels->rx_count = adapter->req_rx_queues; @@ -2398,11 +2411,23 @@ static int ibmvnic_set_channels(struct net_device *netdev, struct ethtool_channels *channels) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int ret; + ret = 0; adapter->desired.rx_queues = channels->rx_count; adapter->desired.tx_queues = channels->tx_count; - return wait_for_reset(adapter); + ret = wait_for_reset(adapter); + + if (!ret && + (adapter->req_rx_queues != channels->rx_count || + adapter->req_tx_queues != channels->tx_count)) + netdev_info(netdev, + "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", + channels->rx_count, channels->tx_count, + adapter->req_rx_queues, adapter->req_tx_queues); + return ret; + } static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -2410,32 +2435,43 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) struct ibmvnic_adapter *adapter = netdev_priv(dev); int i; - if (stringset != ETH_SS_STATS) - return; + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); + i++, data += ETH_GSTRING_LEN) + memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); - for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) - memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); + for (i = 0; i < adapter->req_tx_queues; i++) { + snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); + data += ETH_GSTRING_LEN; - for (i = 0; i < adapter->req_tx_queues; i++) { - snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); - data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); + data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); - data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, + "tx%d_dropped_packets", i); + data += ETH_GSTRING_LEN; + } - snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); - data += ETH_GSTRING_LEN; - } + for (i = 0; i < adapter->req_rx_queues; i++) { + snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); + data += ETH_GSTRING_LEN; - for (i = 0; i < adapter->req_rx_queues; i++) { - snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); - data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); + data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); - data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); + data += ETH_GSTRING_LEN; + } + break; - snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); - data += ETH_GSTRING_LEN; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++) + strcpy(data + i * ETH_GSTRING_LEN, + ibmvnic_priv_flags[i]); + break; + default: + return; } } @@ -2448,6 +2484,8 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset) return ARRAY_SIZE(ibmvnic_stats) + adapter->req_tx_queues * NUM_TX_STATS + adapter->req_rx_queues * NUM_RX_STATS; + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(ibmvnic_priv_flags); default: return -EOPNOTSUPP; } @@ -2498,6 +2536,25 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, } } +static u32 ibmvnic_get_priv_flags(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + return adapter->priv_flags; +} + +static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES); + + if (which_maxes) + adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES; + else + adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES; + + return 0; +} static const struct ethtool_ops ibmvnic_ethtool_ops = { .get_drvinfo = ibmvnic_get_drvinfo, .get_msglevel = ibmvnic_get_msglevel, @@ -2511,6 +2568,8 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = { .get_sset_count = ibmvnic_get_sset_count, .get_ethtool_stats = ibmvnic_get_ethtool_stats, .get_link_ksettings = ibmvnic_get_link_ksettings, + .get_priv_flags = ibmvnic_get_priv_flags, + .set_priv_flags = ibmvnic_set_priv_flags, }; /* Routines for managing CRQs/sCRQs */ diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index f06eec145ca6..18103b811d4d 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -39,7 +39,8 @@ #define IBMVNIC_RX_WEIGHT 16 /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */ #define IBMVNIC_BUFFS_PER_POOL 100 -#define IBMVNIC_MAX_QUEUES 10 +#define IBMVNIC_MAX_QUEUES 16 +#define IBMVNIC_MAX_QUEUE_SZ 4096 #define IBMVNIC_TSO_BUF_SZ 65536 #define IBMVNIC_TSO_BUFS 64 @@ -48,6 +49,11 @@ #define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE) #define IBMVNIC_BUFFER_HLEN 500 +static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = { +#define IBMVNIC_USE_SERVER_MAXES 0x1 + "use-server-maxes" +}; + struct ibmvnic_login_buffer { __be32 len; __be32 version; @@ -969,6 +975,7 @@ struct ibmvnic_adapter { struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl; dma_addr_t ip_offload_ctrl_tok; u32 msg_enable; + u32 priv_flags; /* Vital Product Data (VPD) */ struct ibmvnic_vpd *vpd; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 1ab613eb5796..b542aba6f0e8 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -235,20 +235,27 @@ config I40E_DCB If unsure, say N. +# this is here to allow seamless migration from I40EVF --> IAVF name +# so that CONFIG_IAVF symbol will always mirror the state of CONFIG_I40EVF +config IAVF + tristate config I40EVF tristate "Intel(R) Ethernet Adaptive Virtual Function support" + select IAVF depends on PCI_MSI ---help--- This driver supports virtual functions for Intel XL710, - X710, X722, and all devices advertising support for Intel - Ethernet Adaptive Virtual Function devices. For more + X710, X722, XXV710, and all devices advertising support for + Intel Ethernet Adaptive Virtual Function devices. For more information on how to identify your adapter, go to the Adapter & Driver ID Guide that can be located at: - <http://support.intel.com> + <https://support.intel.com> + + This driver was formerly named i40evf. To compile this driver as a module, choose M here. The module - will be called i40evf. MSI-X interrupt support is required + will be called iavf. MSI-X interrupt support is required for this driver to work correctly. config ICE diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile index 807a4f8c7e4e..b91153df6ee8 100644 --- a/drivers/net/ethernet/intel/Makefile +++ b/drivers/net/ethernet/intel/Makefile @@ -12,6 +12,6 @@ obj-$(CONFIG_IXGBE) += ixgbe/ obj-$(CONFIG_IXGBEVF) += ixgbevf/ obj-$(CONFIG_I40E) += i40e/ obj-$(CONFIG_IXGB) += ixgb/ -obj-$(CONFIG_I40EVF) += i40evf/ +obj-$(CONFIG_IAVF) += iavf/ obj-$(CONFIG_FM10K) += fm10k/ obj-$(CONFIG_ICE) += ice/ diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 27d5f27163d2..7c4b55482f72 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -164,7 +164,7 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); MODULE_FIRMWARE(FIRMWARE_D101M); MODULE_FIRMWARE(FIRMWARE_D101S); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 2110d5f2da19..43b6d3cec3b3 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -195,7 +195,7 @@ static struct pci_driver e1000_driver = { MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) @@ -2433,7 +2433,6 @@ static void e1000_watchdog(struct work_struct *work) if (link) { if (!netif_carrier_ok(netdev)) { u32 ctrl; - bool txb2b = true; /* update snapshot of PHY registers on LSC */ e1000_get_speed_and_duplex(hw, &adapter->link_speed, @@ -2455,11 +2454,9 @@ static void e1000_watchdog(struct work_struct *work) adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: - txb2b = false; adapter->tx_timeout_factor = 16; break; case SPEED_100: - txb2b = false; /* maybe add some timeout factor ? */ break; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 3ba0c90e7055..c0f9faca70c4 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7592,7 +7592,7 @@ module_exit(e1000_exit_module); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); /* netdev.c */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 3f536541f45f..503bbc017792 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -21,7 +21,7 @@ static const char fm10k_copyright[] = MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); /* single workqueue for entire fm10k driver */ diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 14397e7e9925..50590e8d1fd1 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -22,6 +22,7 @@ i40e-objs := i40e_main.o \ i40e_txrx.o \ i40e_ptp.o \ i40e_client.o \ - i40e_virtchnl_pf.o + i40e_virtchnl_pf.o \ + i40e_xsk.o i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 7a80652e2500..876cac317e79 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -786,6 +786,11 @@ struct i40e_vsi { /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); + + /* AF_XDP zero-copy */ + struct xdp_umem **xsk_umems; + u16 num_xsk_umems_used; + u16 num_xsk_umems; } ____cacheline_internodealigned_in_smp; struct i40e_netdev_priv { @@ -1090,6 +1095,20 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) return !!vsi->xdp_prog; } +static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) +{ + bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); + int qid = ring->queue_index; + + if (ring_is_xdp(ring)) + qid -= ring->vsi->alloc_queue_pairs; + + if (!ring->vsi->xsk_umems || !ring->vsi->xsk_umems[qid] || !xdp_on) + return NULL; + + return ring->vsi->xsk_umems[qid]; +} + int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 56b911a5dd8b..a20d1cf058ad 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -132,8 +132,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", (unsigned long int)nd->vlan_features); } - dev_info(&pf->pdev->dev, " active_vlans is %s\n", - vsi->active_vlans ? "<valid>" : "<null>"); dev_info(&pf->pdev->dev, " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 5ff6caa83948..9f8464f80783 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -5,26 +5,227 @@ #include "i40e.h" #include "i40e_diag.h" +#include "i40e_txrx_common.h" +/* ethtool statistics helpers */ + +/** + * struct i40e_stats - definition for an ethtool statistic + * @stat_string: statistic name to display in ethtool -S output + * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) + * @stat_offset: offsetof() the stat from a base pointer + * + * This structure defines a statistic to be added to the ethtool stats buffer. + * It defines a statistic as offset from a common base pointer. Stats should + * be defined in constant arrays using the I40E_STAT macro, with every element + * of the array using the same _type for calculating the sizeof_stat and + * stat_offset. + * + * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or + * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from + * the i40e_add_ethtool_stat() helper function. + * + * The @stat_string is interpreted as a format string, allowing formatted + * values to be inserted while looping over multiple structures for a given + * statistics array. Thus, every statistic string in an array should have the + * same type and number of format specifiers, to be formatted by variadic + * arguments to the i40e_add_stat_string() helper function. + **/ struct i40e_stats { - /* The stat_string is expected to be a format string formatted using - * vsnprintf by i40e_add_stat_strings. Every member of a stats array - * should use the same format specifiers as they will be formatted - * using the same variadic arguments. - */ char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; }; +/* Helper macro to define an i40e_stat structure with proper size and type. + * Use this when defining constant statistics arrays. Note that @_type expects + * only a type name and is used multiple times. + */ #define I40E_STAT(_type, _name, _stat) { \ .stat_string = _name, \ .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ .stat_offset = offsetof(_type, _stat) \ } +/* Helper macro for defining some statistics directly copied from the netdev + * stats structure. + */ #define I40E_NETDEV_STAT(_net_stat) \ I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) + +/* Helper macro for defining some statistics related to queues */ +#define I40E_QUEUE_STAT(_name, _stat) \ + I40E_STAT(struct i40e_ring, _name, _stat) + +/* Stats associated with a Tx or Rx ring */ +static const struct i40e_stats i40e_gstrings_queue_stats[] = { + I40E_QUEUE_STAT("%s-%u.packets", stats.packets), + I40E_QUEUE_STAT("%s-%u.bytes", stats.bytes), +}; + +/** + * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer + * @data: location to store the stat value + * @pointer: basis for where to copy from + * @stat: the stat definition + * + * Copies the stat data defined by the pointer and stat structure pair into + * the memory supplied as data. Used to implement i40e_add_ethtool_stats and + * i40e_add_queue_stats. If the pointer is null, data will be zero'd. + */ +static void +i40e_add_one_ethtool_stat(u64 *data, void *pointer, + const struct i40e_stats *stat) +{ + char *p; + + if (!pointer) { + /* ensure that the ethtool data buffer is zero'd for any stats + * which don't have a valid pointer. + */ + *data = 0; + return; + } + + p = (char *)pointer + stat->stat_offset; + switch (stat->sizeof_stat) { + case sizeof(u64): + *data = *((u64 *)p); + break; + case sizeof(u32): + *data = *((u32 *)p); + break; + case sizeof(u16): + *data = *((u16 *)p); + break; + case sizeof(u8): + *data = *((u8 *)p); + break; + default: + WARN_ONCE(1, "unexpected stat size for %s", + stat->stat_string); + *data = 0; + } +} + +/** + * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location to copy stats from + * @stats: array of stats to copy + * @size: the size of the stats definition + * + * Copy the stats defined by the stats array using the pointer as a base into + * the data buffer supplied by ethtool. Updates the data pointer to point to + * the next empty location for successive calls to __i40e_add_ethtool_stats. + * If pointer is null, set the data values to zero and update the pointer to + * skip these stats. + **/ +static void +__i40e_add_ethtool_stats(u64 **data, void *pointer, + const struct i40e_stats stats[], + const unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]); +} + +/** + * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location where stats are stored + * @stats: static const array of stat definitions + * + * Macro to ease the use of __i40e_add_ethtool_stats by taking a static + * constant stats array and passing the ARRAY_SIZE(). This avoids typos by + * ensuring that we pass the size associated with the given stats array. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. + **/ +#define i40e_add_ethtool_stats(data, pointer, stats) \ + __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) + +/** + * i40e_add_queue_stats - copy queue statistics into supplied buffer + * @data: ethtool stats buffer + * @ring: the ring to copy + * + * Queue statistics must be copied while protected by + * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats. + * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the + * ring pointer is null, zero out the queue stat values and update the data + * pointer. Otherwise safely copy the stats from the ring into the supplied + * buffer and update the data pointer when finished. + * + * This function expects to be called while under rcu_read_lock(). + **/ +static void +i40e_add_queue_stats(u64 **data, struct i40e_ring *ring) +{ + const unsigned int size = ARRAY_SIZE(i40e_gstrings_queue_stats); + const struct i40e_stats *stats = i40e_gstrings_queue_stats; + unsigned int start; + unsigned int i; + + /* To avoid invalid statistics values, ensure that we keep retrying + * the copy until we get a consistent value according to + * u64_stats_fetch_retry_irq. But first, make sure our ring is + * non-null before attempting to access its syncp. + */ + do { + start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); + for (i = 0; i < size; i++) { + i40e_add_one_ethtool_stat(&(*data)[i], ring, + &stats[i]); + } + } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); + + /* Once we successfully copy the stats in, update the data pointer */ + *data += size; +} + +/** + * __i40e_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * @size: size of the stats array + * + * Format and copy the strings described by stats into the buffer pointed at + * by p. + **/ +static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], + const unsigned int size, ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } +} + +/** + * 40e_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * + * Format and copy the strings described by the const static stats value into + * the buffer pointed at by p. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. Additionally, stats must be an array such that + * ARRAY_SIZE can be called on it. + **/ +#define i40e_add_stat_strings(p, stats, ...) \ + __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) + #define I40E_PF_STAT(_name, _stat) \ I40E_STAT(struct i40e_pf, _name, _stat) #define I40E_VSI_STAT(_name, _stat) \ @@ -33,6 +234,8 @@ struct i40e_stats { I40E_STAT(struct i40e_veb, _name, _stat) #define I40E_PFC_STAT(_name, _stat) \ I40E_STAT(struct i40e_pfc_stats, _name, _stat) +#define I40E_QUEUE_STAT(_name, _stat) \ + I40E_STAT(struct i40e_ring, _name, _stat) static const struct i40e_stats i40e_gstrings_net_stats[] = { I40E_NETDEV_STAT(rx_packets), @@ -171,20 +374,11 @@ static const struct i40e_stats i40e_gstrings_pfc_stats[] = { I40E_PFC_STAT("port.rx_priority_%u_xon_2_xoff", priority_xon_2_xoff), }; -/* We use num_tx_queues here as a proxy for the maximum number of queues - * available because we always allocate queues symmetrically. - */ -#define I40E_MAX_NUM_QUEUES(n) ((n)->num_tx_queues) -#define I40E_QUEUE_STATS_LEN(n) \ - (I40E_MAX_NUM_QUEUES(n) \ - * 2 /* Tx and Rx together */ \ - * (sizeof(struct i40e_queue_stats) / sizeof(u64))) -#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) #define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) + #define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats) -#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ - I40E_MISC_STATS_LEN + \ - I40E_QUEUE_STATS_LEN((n))) + +#define I40E_VSI_STATS_LEN (I40E_NETDEV_STATS_LEN + I40E_MISC_STATS_LEN) #define I40E_PFC_STATS_LEN (ARRAY_SIZE(i40e_gstrings_pfc_stats) * \ I40E_MAX_USER_PRIORITY) @@ -193,10 +387,15 @@ static const struct i40e_stats i40e_gstrings_pfc_stats[] = { (ARRAY_SIZE(i40e_gstrings_veb_tc_stats) * \ I40E_MAX_TRAFFIC_CLASS)) -#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \ +#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) + +#define I40E_PF_STATS_LEN (I40E_GLOBAL_STATS_LEN + \ I40E_PFC_STATS_LEN + \ I40E_VEB_STATS_LEN + \ - I40E_VSI_STATS_LEN((n))) + I40E_VSI_STATS_LEN) + +/* Length of stats for a single queue */ +#define I40E_QUEUE_STATS_LEN ARRAY_SIZE(i40e_gstrings_queue_stats) enum i40e_ethtool_test_id { I40E_ETH_TEST_REG = 0, @@ -1512,6 +1711,13 @@ static int i40e_set_ringparam(struct net_device *netdev, (new_rx_count == vsi->rx_rings[0]->count)) return 0; + /* If there is a AF_XDP UMEM attached to any of Rx rings, + * disallow changing the number of descriptors -- regardless + * if the netdev is running or not. + */ + if (i40e_xsk_any_rx_ring_enabled(vsi)) + return -EBUSY; + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { timeout--; if (!timeout) @@ -1701,11 +1907,30 @@ static int i40e_get_stats_count(struct net_device *netdev) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + int stats_len; if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) - return I40E_PF_STATS_LEN(netdev); + stats_len = I40E_PF_STATS_LEN; else - return I40E_VSI_STATS_LEN(netdev); + stats_len = I40E_VSI_STATS_LEN; + + /* The number of stats reported for a given net_device must remain + * constant throughout the life of that device. + * + * This is because the API for obtaining the size, strings, and stats + * is spread out over three separate ethtool ioctls. There is no safe + * way to lock the number of stats across these calls, so we must + * assume that they will never change. + * + * Due to this, we report the maximum number of queues, even if not + * every queue is currently configured. Since we always allocate + * queues in pairs, we'll just use netdev->num_tx_queues * 2. This + * works because the num_tx_queues is set at device creation and never + * changes. + */ + stats_len += I40E_QUEUE_STATS_LEN * 2 * netdev->num_tx_queues; + + return stats_len; } static int i40e_get_sset_count(struct net_device *netdev, int sset) @@ -1728,89 +1953,6 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) } /** - * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer - * @data: location to store the stat value - * @pointer: basis for where to copy from - * @stat: the stat definition - * - * Copies the stat data defined by the pointer and stat structure pair into - * the memory supplied as data. Used to implement i40e_add_ethtool_stats. - * If the pointer is null, data will be zero'd. - */ -static inline void -i40e_add_one_ethtool_stat(u64 *data, void *pointer, - const struct i40e_stats *stat) -{ - char *p; - - if (!pointer) { - /* ensure that the ethtool data buffer is zero'd for any stats - * which don't have a valid pointer. - */ - *data = 0; - return; - } - - p = (char *)pointer + stat->stat_offset; - switch (stat->sizeof_stat) { - case sizeof(u64): - *data = *((u64 *)p); - break; - case sizeof(u32): - *data = *((u32 *)p); - break; - case sizeof(u16): - *data = *((u16 *)p); - break; - case sizeof(u8): - *data = *((u8 *)p); - break; - default: - WARN_ONCE(1, "unexpected stat size for %s", - stat->stat_string); - *data = 0; - } -} - -/** - * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer - * @data: ethtool stats buffer - * @pointer: location to copy stats from - * @stats: array of stats to copy - * @size: the size of the stats definition - * - * Copy the stats defined by the stats array using the pointer as a base into - * the data buffer supplied by ethtool. Updates the data pointer to point to - * the next empty location for successive calls to __i40e_add_ethtool_stats. - * If pointer is null, set the data values to zero and update the pointer to - * skip these stats. - **/ -static inline void -__i40e_add_ethtool_stats(u64 **data, void *pointer, - const struct i40e_stats stats[], - const unsigned int size) -{ - unsigned int i; - - for (i = 0; i < size; i++) - i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]); -} - -/** - * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer - * @data: ethtool stats buffer - * @pointer: location where stats are stored - * @stats: static const array of stat definitions - * - * Macro to ease the use of __i40e_add_ethtool_stats by taking a static - * constant stats array and passing the ARRAY_SIZE(). This avoids typos by - * ensuring that we pass the size associated with the given stats array. - * Assumes that stats is an array. - **/ -#define i40e_add_ethtool_stats(data, pointer, stats) \ - __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) - -/** * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure * @pf: the PF device structure * @i: the priority value to copy @@ -1853,12 +1995,10 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_veb *veb = pf->veb[pf->lan_veb]; unsigned int i; - unsigned int start; bool veb_stats; u64 *p = data; @@ -1870,38 +2010,12 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, i40e_add_ethtool_stats(&data, vsi, i40e_gstrings_misc_stats); rcu_read_lock(); - for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev) ; i++) { - tx_ring = READ_ONCE(vsi->tx_rings[i]); - - if (!tx_ring) { - /* Bump the stat counter to skip these stats, and make - * sure the memory is zero'd - */ - *(data++) = 0; - *(data++) = 0; - *(data++) = 0; - *(data++) = 0; - continue; - } - - /* process Tx ring statistics */ - do { - start = u64_stats_fetch_begin_irq(&tx_ring->syncp); - data[0] = tx_ring->stats.packets; - data[1] = tx_ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); - data += 2; - - /* Rx ring is the 2nd half of the queue pair */ - rx_ring = &tx_ring[1]; - do { - start = u64_stats_fetch_begin_irq(&rx_ring->syncp); - data[0] = rx_ring->stats.packets; - data[1] = rx_ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); - data += 2; + for (i = 0; i < netdev->num_tx_queues; i++) { + i40e_add_queue_stats(&data, READ_ONCE(vsi->tx_rings[i])); + i40e_add_queue_stats(&data, READ_ONCE(vsi->rx_rings[i])); } rcu_read_unlock(); + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) goto check_data_pointer; @@ -1933,42 +2047,6 @@ check_data_pointer: } /** - * __i40e_add_stat_strings - copy stat strings into ethtool buffer - * @p: ethtool supplied buffer - * @stats: stat definitions array - * @size: size of the stats array - * - * Format and copy the strings described by stats into the buffer pointed at - * by p. - **/ -static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], - const unsigned int size, ...) -{ - unsigned int i; - - for (i = 0; i < size; i++) { - va_list args; - - va_start(args, size); - vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); - *p += ETH_GSTRING_LEN; - va_end(args); - } -} - -/** - * 40e_add_stat_strings - copy stat strings into ethtool buffer - * @p: ethtool supplied buffer - * @stats: stat definitions array - * - * Format and copy the strings described by the const static stats value into - * the buffer pointed at by p. Assumes that stats can have ARRAY_SIZE called - * for it. - **/ -#define i40e_add_stat_strings(p, stats, ...) \ - __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) - -/** * i40e_get_stat_strings - copy stat strings into supplied buffer * @netdev: the netdev to collect strings for * @data: supplied buffer to copy strings into @@ -1990,16 +2068,13 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) i40e_add_stat_strings(&data, i40e_gstrings_misc_stats); - for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) { - snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); - data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); - data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); - data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); - data += ETH_GSTRING_LEN; + for (i = 0; i < netdev->num_tx_queues; i++) { + i40e_add_stat_strings(&data, i40e_gstrings_queue_stats, + "tx", i); + i40e_add_stat_strings(&data, i40e_gstrings_queue_stats, + "rx", i); } + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ac685ad4d877..330bafe6a689 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9,7 +9,9 @@ /* Local includes */ #include "i40e.h" #include "i40e_diag.h" +#include "i40e_xsk.h" #include <net/udp_tunnel.h> +#include <net/xdp_sock.h> /* All i40e tracepoints are defined by the include below, which * must be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined @@ -89,7 +91,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); static struct workqueue_struct *i40e_wq; @@ -420,9 +422,9 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); + struct i40e_ring *ring; int i; if (test_bit(__I40E_VSI_DOWN, vsi->state)) @@ -436,24 +438,26 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, u64 bytes, packets; unsigned int start; - tx_ring = READ_ONCE(vsi->tx_rings[i]); - if (!tx_ring) + ring = READ_ONCE(vsi->tx_rings[i]); + if (!ring) continue; - i40e_get_netdev_stats_struct_tx(tx_ring, stats); + i40e_get_netdev_stats_struct_tx(ring, stats); - rx_ring = &tx_ring[1]; + if (i40e_enabled_xdp_vsi(vsi)) { + ring++; + i40e_get_netdev_stats_struct_tx(ring, stats); + } + ring++; do { - start = u64_stats_fetch_begin_irq(&rx_ring->syncp); - packets = rx_ring->stats.packets; - bytes = rx_ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; - if (i40e_enabled_xdp_vsi(vsi)) - i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats); } rcu_read_unlock(); @@ -1528,8 +1532,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p) return 0; } - if (test_bit(__I40E_VSI_DOWN, vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state)) + if (test_bit(__I40E_DOWN, pf->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) return -EADDRNOTAVAIL; if (ether_addr_equal(hw->mac.addr, addr->sa_data)) @@ -1553,8 +1557,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p) if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; - ret = i40e_aq_mac_address_write(&vsi->back->hw, - I40E_AQC_WRITE_TYPE_LAA_WOL, + ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, addr->sa_data, NULL); if (ret) netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n", @@ -1565,7 +1568,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p) /* schedule our worker thread which will take care of * applying the new filter changes */ - i40e_service_event_schedule(vsi->back); + i40e_service_event_schedule(pf); return 0; } @@ -3072,6 +3075,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) i40e_status err = 0; u32 qtx_ctl = 0; + if (ring_is_xdp(ring)) + ring->xsk_umem = i40e_xsk_umem(ring); + /* some ATR related tx ring init */ if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { ring->atr_sample_rate = vsi->back->atr_sample_rate; @@ -3181,13 +3187,46 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) struct i40e_hw *hw = &vsi->back->hw; struct i40e_hmc_obj_rxq rx_ctx; i40e_status err = 0; + bool ok; + int ret; bitmap_zero(ring->state, __I40E_RING_STATE_NBITS); /* clear the context structure first */ memset(&rx_ctx, 0, sizeof(rx_ctx)); - ring->rx_buf_len = vsi->rx_buf_len; + if (ring->vsi->type == I40E_VSI_MAIN) + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + + ring->xsk_umem = i40e_xsk_umem(ring); + if (ring->xsk_umem) { + ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr - + XDP_PACKET_HEADROOM; + /* For AF_XDP ZC, we disallow packets to span on + * multiple buffers, thus letting us skip that + * handling in the fast-path. + */ + chain_len = 1; + ring->zca.free = i40e_zca_free; + ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_ZERO_COPY, + &ring->zca); + if (ret) + return ret; + dev_info(&vsi->back->pdev->dev, + "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n", + ring->queue_index); + + } else { + ring->rx_buf_len = vsi->rx_buf_len; + if (ring->vsi->type == I40E_VSI_MAIN) { + ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL); + if (ret) + return ret; + } + } rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); @@ -3243,7 +3282,15 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); - i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + ok = ring->xsk_umem ? + i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : + !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + if (!ok) { + dev_info(&vsi->back->pdev->dev, + "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", + ring->xsk_umem ? "UMEM enabled " : "", + ring->queue_index, pf_q); + } return 0; } @@ -6384,7 +6431,10 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) char *req_fec = ""; char *an = ""; - new_speed = pf->hw.phy.link_info.link_speed; + if (isup) + new_speed = pf->hw.phy.link_info.link_speed; + else + new_speed = I40E_LINK_SPEED_UNKNOWN; if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) return; @@ -6568,6 +6618,24 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) struct i40e_hw *hw = &pf->hw; i40e_status err; u64 mask; + u8 speed; + + /* Card might've been put in an unstable state by other drivers + * and applications, which causes incorrect speed values being + * set on startup. In order to clear speed registers, we call + * get_phy_capabilities twice, once to get initial state of + * available speeds, and once to get current PHY config. + */ + err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, + NULL); + if (err) { + dev_err(&pf->pdev->dev, + "failed to get phy cap., ret = %s last_status = %s\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return err; + } + speed = abilities.link_speed; /* Get the current phy config */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, @@ -6581,9 +6649,9 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) } /* If link needs to go up, but was not forced to go down, - * no need for a flap + * and its speed values are OK, no need for a flap */ - if (is_up && abilities.phy_type != 0) + if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) return I40E_SUCCESS; /* To force link we need to set bits for all supported PHY types, @@ -6595,7 +6663,10 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0; /* Copy the old settings, except of phy_type */ config.abilities = abilities.abilities; - config.link_speed = abilities.link_speed; + if (abilities.link_speed != 0) + config.link_speed = abilities.link_speed; + else + config.link_speed = speed; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; @@ -8440,14 +8511,9 @@ static void i40e_link_event(struct i40e_pf *pf) i40e_status status; bool new_link, old_link; - /* save off old link status information */ - pf->hw.phy.link_info_old = pf->hw.phy.link_info; - /* set this to force the get_link_status call to refresh state */ pf->hw.phy.get_link_info = true; - old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); - status = i40e_get_link_status(&pf->hw, &new_link); /* On success, disable temp link polling */ @@ -11828,6 +11894,256 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, } /** + * i40e_enter_busy_conf - Enters busy config state + * @vsi: vsi + * + * Returns 0 on success, <0 for failure. + **/ +static int i40e_enter_busy_conf(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int timeout = 50; + + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; + usleep_range(1000, 2000); + } + + return 0; +} + +/** + * i40e_exit_busy_conf - Exits busy config state + * @vsi: vsi + **/ +static void i40e_exit_busy_conf(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + + clear_bit(__I40E_CONFIG_BUSY, pf->state); +} + +/** + * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair + * @vsi: vsi + * @queue_pair: queue pair + **/ +static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair) +{ + memset(&vsi->rx_rings[queue_pair]->rx_stats, 0, + sizeof(vsi->rx_rings[queue_pair]->rx_stats)); + memset(&vsi->tx_rings[queue_pair]->stats, 0, + sizeof(vsi->tx_rings[queue_pair]->stats)); + if (i40e_enabled_xdp_vsi(vsi)) { + memset(&vsi->xdp_rings[queue_pair]->stats, 0, + sizeof(vsi->xdp_rings[queue_pair]->stats)); + } +} + +/** + * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair + * @vsi: vsi + * @queue_pair: queue pair + **/ +static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) +{ + i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); + if (i40e_enabled_xdp_vsi(vsi)) + i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); + i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); +} + +/** + * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair + * @vsi: vsi + * @queue_pair: queue pair + * @enable: true for enable, false for disable + **/ +static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair, + bool enable) +{ + struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; + struct i40e_q_vector *q_vector = rxr->q_vector; + + if (!vsi->netdev) + return; + + /* All rings in a qp belong to the same qvector. */ + if (q_vector->rx.ring || q_vector->tx.ring) { + if (enable) + napi_enable(&q_vector->napi); + else + napi_disable(&q_vector->napi); + } +} + +/** + * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair + * @vsi: vsi + * @queue_pair: queue pair + * @enable: true for enable, false for disable + * + * Returns 0 on success, <0 on failure. + **/ +static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair, + bool enable) +{ + struct i40e_pf *pf = vsi->back; + int pf_q, ret = 0; + + pf_q = vsi->base_queue + queue_pair; + ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, + false /*is xdp*/, enable); + if (ret) { + dev_info(&pf->pdev->dev, + "VSI seid %d Tx ring %d %sable timeout\n", + vsi->seid, pf_q, (enable ? "en" : "dis")); + return ret; + } + + i40e_control_rx_q(pf, pf_q, enable); + ret = i40e_pf_rxq_wait(pf, pf_q, enable); + if (ret) { + dev_info(&pf->pdev->dev, + "VSI seid %d Rx ring %d %sable timeout\n", + vsi->seid, pf_q, (enable ? "en" : "dis")); + return ret; + } + + /* Due to HW errata, on Rx disable only, the register can + * indicate done before it really is. Needs 50ms to be sure + */ + if (!enable) + mdelay(50); + + if (!i40e_enabled_xdp_vsi(vsi)) + return ret; + + ret = i40e_control_wait_tx_q(vsi->seid, pf, + pf_q + vsi->alloc_queue_pairs, + true /*is xdp*/, enable); + if (ret) { + dev_info(&pf->pdev->dev, + "VSI seid %d XDP Tx ring %d %sable timeout\n", + vsi->seid, pf_q, (enable ? "en" : "dis")); + } + + return ret; +} + +/** + * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair + * @vsi: vsi + * @queue_pair: queue_pair + **/ +static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair) +{ + struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + + /* All rings in a qp belong to the same qvector. */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx); + else + i40e_irq_dynamic_enable_icr0(pf); + + i40e_flush(hw); +} + +/** + * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair + * @vsi: vsi + * @queue_pair: queue_pair + **/ +static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair) +{ + struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + + /* For simplicity, instead of removing the qp interrupt causes + * from the interrupt linked list, we simply disable the interrupt, and + * leave the list intact. + * + * All rings in a qp belong to the same qvector. + */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + u32 intpf = vsi->base_vector + rxr->q_vector->v_idx; + + wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0); + i40e_flush(hw); + synchronize_irq(pf->msix_entries[intpf].vector); + } else { + /* Legacy and MSI mode - this stops all interrupt handling */ + wr32(hw, I40E_PFINT_ICR0_ENA, 0); + wr32(hw, I40E_PFINT_DYN_CTL0, 0); + i40e_flush(hw); + synchronize_irq(pf->pdev->irq); + } +} + +/** + * i40e_queue_pair_disable - Disables a queue pair + * @vsi: vsi + * @queue_pair: queue pair + * + * Returns 0 on success, <0 on failure. + **/ +int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) +{ + int err; + + err = i40e_enter_busy_conf(vsi); + if (err) + return err; + + i40e_queue_pair_disable_irq(vsi, queue_pair); + err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); + i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); + i40e_queue_pair_clean_rings(vsi, queue_pair); + i40e_queue_pair_reset_stats(vsi, queue_pair); + + return err; +} + +/** + * i40e_queue_pair_enable - Enables a queue pair + * @vsi: vsi + * @queue_pair: queue pair + * + * Returns 0 on success, <0 on failure. + **/ +int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair) +{ + int err; + + err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]); + if (err) + return err; + + if (i40e_enabled_xdp_vsi(vsi)) { + err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]); + if (err) + return err; + } + + err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]); + if (err) + return err; + + err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */); + i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */); + i40e_queue_pair_enable_irq(vsi, queue_pair); + + i40e_exit_busy_conf(vsi); + + return err; +} + +/** * i40e_xdp - implements ndo_bpf for i40e * @dev: netdevice * @xdp: XDP command @@ -11847,6 +12163,12 @@ static int i40e_xdp(struct net_device *dev, case XDP_QUERY_PROG: xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; return 0; + case XDP_QUERY_XSK_UMEM: + return i40e_xsk_umem_query(vsi, &xdp->xsk.umem, + xdp->xsk.queue_id); + case XDP_SETUP_XSK_UMEM: + return i40e_xsk_umem_setup(vsi, xdp->xsk.umem, + xdp->xsk.queue_id); default: return -EINVAL; } @@ -11886,6 +12208,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_bridge_setlink = i40e_ndo_bridge_setlink, .ndo_bpf = i40e_xdp, .ndo_xdp_xmit = i40e_xdp_xmit, + .ndo_xsk_async_xmit = i40e_xsk_async_xmit, }; /** @@ -13033,7 +13356,7 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) break; - if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { + if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) { dev_info(&pf->pdev->dev, "vsi seid %d not found\n", vsi_seid); return NULL; @@ -14159,6 +14482,7 @@ static void i40e_remove(struct pci_dev *pdev) mutex_destroy(&hw->aq.asq_mutex); /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ + rtnl_lock(); i40e_clear_interrupt_scheme(pf); for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i]) { @@ -14167,6 +14491,7 @@ static void i40e_remove(struct pci_dev *pdev) pf->vsi[i] = NULL; } } + rtnl_unlock(); for (i = 0; i < I40E_MAX_VEB; i++) { kfree(pf->veb[i]); @@ -14378,7 +14703,13 @@ static void i40e_shutdown(struct pci_dev *pdev) wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + /* Since we're going to destroy queues during the + * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this + * whole section + */ + rtnl_lock(); i40e_clear_interrupt_scheme(pf); + rtnl_unlock(); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, pf->wol_en); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 35f2866b38c6..1199f0502d6d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -694,7 +694,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) if (!IS_ERR_OR_NULL(pf->ptp_clock)) return 0; - strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name)); + strncpy(pf->ptp_caps.name, i40e_driver_name, + sizeof(pf->ptp_caps.name) - 1); pf->ptp_caps.owner = THIS_MODULE; pf->ptp_caps.max_adj = 999999999; pf->ptp_caps.n_ext_ts = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index b5042d1a63c0..740ea58ba938 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -8,16 +8,8 @@ #include "i40e.h" #include "i40e_trace.h" #include "i40e_prototype.h" - -static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, - u32 td_tag) -{ - return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | - ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | - ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | - ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | - ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); -} +#include "i40e_txrx_common.h" +#include "i40e_xsk.h" #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) /** @@ -536,8 +528,8 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi, * This is used to verify if the FD programming or invalidation * requested by SW to the HW is successful or not and take actions accordingly. **/ -static void i40e_fd_handle_status(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, u8 prog_id) +void i40e_fd_handle_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, u8 prog_id) { struct i40e_pf *pf = rx_ring->vsi->back; struct pci_dev *pdev = pf->pdev; @@ -644,13 +636,18 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring) unsigned long bi_size; u16 i; - /* ring already cleared, nothing to do */ - if (!tx_ring->tx_bi) - return; + if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) { + i40e_xsk_clean_tx_ring(tx_ring); + } else { + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_bi) + return; - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) - i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) + i40e_unmap_and_free_tx_resource(tx_ring, + &tx_ring->tx_bi[i]); + } bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; memset(tx_ring->tx_bi, 0, bi_size); @@ -767,8 +764,6 @@ void i40e_detect_recover_hung(struct i40e_vsi *vsi) } } -#define WB_STRIDE 4 - /** * i40e_clean_tx_irq - Reclaim resources after transmit completes * @vsi: the VSI we care about @@ -873,27 +868,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, i += tx_ring->count; tx_ring->next_to_clean = i; - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->stats.bytes += total_bytes; - tx_ring->stats.packets += total_packets; - u64_stats_update_end(&tx_ring->syncp); - tx_ring->q_vector->tx.total_bytes += total_bytes; - tx_ring->q_vector->tx.total_packets += total_packets; - - if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { - /* check to see if there are < 4 descriptors - * waiting to be written back, then kick the hardware to force - * them to be written back in case we stay in NAPI. - * In this mode on X722 we do not enable Interrupt. - */ - unsigned int j = i40e_get_tx_pending(tx_ring, false); - - if (budget && - ((j / WB_STRIDE) == 0) && (j > 0) && - !test_bit(__I40E_VSI_DOWN, vsi->state) && - (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) - tx_ring->arm_wb = true; - } + i40e_update_tx_stats(tx_ring, total_packets, total_bytes); + i40e_arm_wb(tx_ring, vsi, budget); if (ring_is_xdp(tx_ring)) return !!budget; @@ -1244,6 +1220,11 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, new_buff->page = old_buff->page; new_buff->page_offset = old_buff->page_offset; new_buff->pagecnt_bias = old_buff->pagecnt_bias; + + rx_ring->rx_stats.page_reuse_count++; + + /* clear contents of buffer_info */ + old_buff->page = NULL; } /** @@ -1266,7 +1247,7 @@ static inline bool i40e_rx_is_programming_status(u64 qw) } /** - * i40e_clean_programming_status - clean the programming status descriptor + * i40e_clean_programming_status - try clean the programming status descriptor * @rx_ring: the rx ring that has this descriptor * @rx_desc: the rx descriptor written back by HW * @qw: qword representing status_error_len in CPU ordering @@ -1275,15 +1256,22 @@ static inline bool i40e_rx_is_programming_status(u64 qw) * status being successful or not and take actions accordingly. FCoE should * handle its context/filter programming/invalidation status and take actions. * + * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL. **/ -static void i40e_clean_programming_status(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, - u64 qw) +struct i40e_rx_buffer *i40e_clean_programming_status( + struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + u64 qw) { struct i40e_rx_buffer *rx_buffer; - u32 ntc = rx_ring->next_to_clean; + u32 ntc; u8 id; + if (!i40e_rx_is_programming_status(qw)) + return NULL; + + ntc = rx_ring->next_to_clean; + /* fetch, update, and store next to clean */ rx_buffer = &rx_ring->rx_bi[ntc++]; ntc = (ntc < rx_ring->count) ? ntc : 0; @@ -1291,18 +1279,13 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring, prefetch(I40E_RX_DESC(rx_ring, ntc)); - /* place unused page back on the ring */ - i40e_reuse_rx_page(rx_ring, rx_buffer); - rx_ring->rx_stats.page_reuse_count++; - - /* clear contents of buffer_info */ - rx_buffer->page = NULL; - id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) i40e_fd_handle_status(rx_ring, rx_desc, id); + + return rx_buffer; } /** @@ -1372,6 +1355,11 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) rx_ring->skb = NULL; } + if (rx_ring->xsk_umem) { + i40e_xsk_clean_rx_ring(rx_ring); + goto skip_free; + } + /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; @@ -1400,6 +1388,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) rx_bi->page_offset = 0; } +skip_free: bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; memset(rx_ring->rx_bi, 0, bi_size); @@ -1492,7 +1481,7 @@ err: * @rx_ring: ring to bump * @val: new head index **/ -static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) +void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; @@ -1576,8 +1565,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, * @skb: packet to send up * @vlan_tag: vlan tag for packet **/ -static void i40e_receive_skb(struct i40e_ring *rx_ring, - struct sk_buff *skb, u16 vlan_tag) +void i40e_receive_skb(struct i40e_ring *rx_ring, + struct sk_buff *skb, u16 vlan_tag) { struct i40e_q_vector *q_vector = rx_ring->q_vector; @@ -1804,7 +1793,6 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. **/ -static inline void i40e_process_skb_fields(struct i40e_ring *rx_ring, union i40e_rx_desc *rx_desc, struct sk_buff *skb, u8 rx_ptype) @@ -2152,7 +2140,6 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, if (i40e_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */ i40e_reuse_rx_page(rx_ring, rx_buffer); - rx_ring->rx_stats.page_reuse_count++; } else { /* we are not reusing the buffer so unmap it */ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, @@ -2160,10 +2147,9 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); + /* clear contents of buffer_info */ + rx_buffer->page = NULL; } - - /* clear contents of buffer_info */ - rx_buffer->page = NULL; } /** @@ -2199,16 +2185,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, return true; } -#define I40E_XDP_PASS 0 -#define I40E_XDP_CONSUMED BIT(0) -#define I40E_XDP_TX BIT(1) -#define I40E_XDP_REDIR BIT(2) - static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, struct i40e_ring *xdp_ring); -static int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, - struct i40e_ring *xdp_ring) +int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) { struct xdp_frame *xdpf = convert_to_xdp_frame(xdp); @@ -2287,7 +2267,13 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring, #endif } -static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) +/** + * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register + * @xdp_ring: XDP Tx ring + * + * This function updates the XDP Tx ring tail register. + **/ +void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. @@ -2297,6 +2283,48 @@ static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) } /** + * i40e_update_rx_stats - Update Rx ring statistics + * @rx_ring: rx descriptor ring + * @total_rx_bytes: number of bytes received + * @total_rx_packets: number of packets received + * + * This function updates the Rx ring statistics. + **/ +void i40e_update_rx_stats(struct i40e_ring *rx_ring, + unsigned int total_rx_bytes, + unsigned int total_rx_packets) +{ + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; +} + +/** + * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map + * @rx_ring: Rx ring + * @xdp_res: Result of the receive batch + * + * This function bumps XDP Tx tail and/or flush redirect map, and + * should be called when a batch of packets has been processed in the + * napi loop. + **/ +void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) +{ + if (xdp_res & I40E_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_res & I40E_XDP_TX) { + struct i40e_ring *xdp_ring = + rx_ring->vsi->xdp_rings[rx_ring->queue_index]; + + i40e_xdp_ring_update_tail(xdp_ring); + } +} + +/** * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process @@ -2349,11 +2377,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) */ dma_rmb(); - if (unlikely(i40e_rx_is_programming_status(qword))) { - i40e_clean_programming_status(rx_ring, rx_desc, qword); + rx_buffer = i40e_clean_programming_status(rx_ring, rx_desc, + qword); + if (unlikely(rx_buffer)) { + i40e_reuse_rx_page(rx_ring, rx_buffer); cleaned_count++; continue; } + size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; if (!size) @@ -2432,24 +2463,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) total_rx_packets++; } - if (xdp_xmit & I40E_XDP_REDIR) - xdp_do_flush_map(); - - if (xdp_xmit & I40E_XDP_TX) { - struct i40e_ring *xdp_ring = - rx_ring->vsi->xdp_rings[rx_ring->queue_index]; - - i40e_xdp_ring_update_tail(xdp_ring); - } - + i40e_finalize_xdp_rx(rx_ring, xdp_xmit); rx_ring->skb = skb; - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - rx_ring->q_vector->rx.total_packets += total_rx_packets; - rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); /* guarantee a trip back through this routine if there was a failure */ return failure ? budget : (int)total_rx_packets; @@ -2587,7 +2604,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - if (!i40e_clean_tx_irq(vsi, ring, budget)) { + bool wd = ring->xsk_umem ? + i40e_clean_xdp_tx_irq(vsi, ring, budget) : + i40e_clean_tx_irq(vsi, ring, budget); + + if (!wd) { clean_complete = false; continue; } @@ -2605,7 +2626,9 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) budget_per_ring = max(budget/q_vector->num_ringpairs, 1); i40e_for_each_ring(ring, q_vector->rx) { - int cleaned = i40e_clean_rx_irq(ring, budget_per_ring); + int cleaned = ring->xsk_umem ? + i40e_clean_rx_irq_zc(ring, budget_per_ring) : + i40e_clean_rx_irq(ring, budget_per_ring); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index bb04f6a731fe..100e92d2982f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -296,13 +296,17 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { dma_addr_t dma; - struct page *page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 page_offset; -#else - __u16 page_offset; -#endif - __u16 pagecnt_bias; + union { + struct { + struct page *page; + __u32 page_offset; + __u16 pagecnt_bias; + }; + struct { + void *addr; + u64 handle; + }; + }; }; struct i40e_queue_stats { @@ -414,6 +418,8 @@ struct i40e_ring { struct i40e_channel *ch; struct xdp_rxq_info xdp_rxq; + struct xdp_umem *xsk_umem; + struct zero_copy_allocator zca; /* ZC allocator anchor */ } ____cacheline_internodealigned_in_smp; static inline bool ring_uses_build_skb(struct i40e_ring *ring) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h new file mode 100644 index 000000000000..09809dffe399 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2018 Intel Corporation. */ + +#ifndef I40E_TXRX_COMMON_ +#define I40E_TXRX_COMMON_ + +void i40e_fd_handle_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, u8 prog_id); +int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring); +struct i40e_rx_buffer *i40e_clean_programming_status( + struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + u64 qw); +void i40e_process_skb_fields(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, struct sk_buff *skb, + u8 rx_ptype); +void i40e_receive_skb(struct i40e_ring *rx_ring, + struct sk_buff *skb, u16 vlan_tag); +void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring); +void i40e_update_rx_stats(struct i40e_ring *rx_ring, + unsigned int total_rx_bytes, + unsigned int total_rx_packets); +void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res); +void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val); + +#define I40E_XDP_PASS 0 +#define I40E_XDP_CONSUMED BIT(0) +#define I40E_XDP_TX BIT(1) +#define I40E_XDP_REDIR BIT(2) + +/** + * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword + **/ +static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, + u32 td_tag) +{ + return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | + ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | + ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | + ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); +} + +/** + * i40e_update_tx_stats - Update the egress statistics for the Tx ring + * @tx_ring: Tx ring to update + * @total_packet: total packets sent + * @total_bytes: total bytes sent + **/ +static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring, + unsigned int total_packets, + unsigned int total_bytes) +{ + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + tx_ring->q_vector->tx.total_bytes += total_bytes; + tx_ring->q_vector->tx.total_packets += total_packets; +} + +#define WB_STRIDE 4 + +/** + * i40e_arm_wb - (Possibly) arms Tx write-back + * @tx_ring: Tx ring to update + * @vsi: the VSI + * @budget: the NAPI budget left + **/ +static inline void i40e_arm_wb(struct i40e_ring *tx_ring, + struct i40e_vsi *vsi, + int budget) +{ + if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { + /* check to see if there are < 4 descriptors + * waiting to be written back, then kick the hardware to force + * them to be written back in case we stay in NAPI. + * In this mode on X722 we do not enable Interrupt. + */ + unsigned int j = i40e_get_tx_pending(tx_ring, false); + + if (budget && + ((j / WB_STRIDE) == 0) && j > 0 && + !test_bit(__I40E_VSI_DOWN, vsi->state) && + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + tx_ring->arm_wb = true; + } +} + +void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring); +void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring); +bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi); + +#endif /* I40E_TXRX_COMMON_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index c6d24eaede18..81b0e1f8d14b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1084,6 +1084,136 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf) return -EIO; } +static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi); + +/** + * i40e_config_vf_promiscuous_mode + * @vf: pointer to the VF info + * @vsi_id: VSI id + * @allmulti: set MAC L2 layer multicast promiscuous enable/disable + * @alluni: set MAC L2 layer unicast promiscuous enable/disable + * + * Called from the VF to configure the promiscuous mode of + * VF vsis and from the VF reset path to reset promiscuous mode. + **/ +static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, + u16 vsi_id, + bool allmulti, + bool alluni) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_mac_filter *f; + i40e_status aq_ret = 0; + struct i40e_vsi *vsi; + int bkt; + + vsi = i40e_find_vsi_from_id(pf, vsi_id); + if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) + return I40E_ERR_PARAM; + + if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + dev_err(&pf->pdev->dev, + "Unprivileged VF %d is attempting to configure promiscuous mode\n", + vf->vf_id); + /* Lie to the VF on purpose. */ + return 0; + } + + if (vf->port_vlan_id) { + aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, + allmulti, + vf->port_vlan_id, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + return aq_ret; + } + + aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, + alluni, + vf->port_vlan_id, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + return aq_ret; + } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { + hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { + if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) + continue; + aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, + vsi->seid, + allmulti, + f->vlan, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", + f->vlan, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + + aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, + vsi->seid, + alluni, + f->vlan, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", + f->vlan, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + } + return aq_ret; + } + aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + return aq_ret; + } + + aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni, + NULL, true); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + + return aq_ret; +} + /** * i40e_trigger_vf_reset * @vf: pointer to the VF structure @@ -1145,6 +1275,9 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) struct i40e_hw *hw = &pf->hw; u32 reg; + /* disable promisc modes in case they were enabled */ + i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false); + /* free VF resources to begin resetting the VSI state */ i40e_free_vf_res(vf); @@ -1840,143 +1973,55 @@ static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) * i40e_vc_config_promiscuous_mode_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to configure the promiscuous mode of * VF vsis **/ -static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, - u8 *msg, u16 msglen) +static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_promisc_info *info = (struct virtchnl_promisc_info *)msg; struct i40e_pf *pf = vf->pf; - struct i40e_hw *hw = &pf->hw; - struct i40e_mac_filter *f; i40e_status aq_ret = 0; bool allmulti = false; - struct i40e_vsi *vsi; bool alluni = false; - int aq_err = 0; - int bkt; - vsi = i40e_find_vsi_from_id(pf, info->vsi_id); - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || - !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || - !vsi) { - aq_ret = I40E_ERR_PARAM; - goto error_param; - } - if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { - dev_err(&pf->pdev->dev, - "Unprivileged VF %d is attempting to configure promiscuous mode\n", - vf->vf_id); - /* Lie to the VF on purpose. */ - aq_ret = 0; - goto error_param; - } + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) + return I40E_ERR_PARAM; + /* Multicast promiscuous handling*/ if (info->flags & FLAG_VF_MULTICAST_PROMISC) allmulti = true; - if (vf->port_vlan_id) { - aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, - allmulti, - vf->port_vlan_id, - NULL); - } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { - hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { - if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) - continue; - aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, - vsi->seid, - allmulti, - f->vlan, - NULL); - aq_err = pf->hw.aq.asq_last_status; - if (aq_ret) { - dev_err(&pf->pdev->dev, - "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", - f->vlan, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - break; - } - } - } else { - aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, - allmulti, NULL); - aq_err = pf->hw.aq.asq_last_status; - if (aq_ret) { - dev_err(&pf->pdev->dev, - "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", - vf->vf_id, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - goto error_param; - } - } - + if (info->flags & FLAG_VF_UNICAST_PROMISC) + alluni = true; + aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, + alluni); if (!aq_ret) { - dev_info(&pf->pdev->dev, - "VF %d successfully set multicast promiscuous mode\n", - vf->vf_id); - if (allmulti) + if (allmulti) { + dev_info(&pf->pdev->dev, + "VF %d successfully set multicast promiscuous mode\n", + vf->vf_id); set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); - else + } else { + dev_info(&pf->pdev->dev, + "VF %d successfully unset multicast promiscuous mode\n", + vf->vf_id); clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); - } - - if (info->flags & FLAG_VF_UNICAST_PROMISC) - alluni = true; - if (vf->port_vlan_id) { - aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, - alluni, - vf->port_vlan_id, - NULL); - } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { - hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { - if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) - continue; - aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, - vsi->seid, - alluni, - f->vlan, - NULL); - aq_err = pf->hw.aq.asq_last_status; - if (aq_ret) - dev_err(&pf->pdev->dev, - "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", - f->vlan, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - } - } else { - aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, - alluni, NULL, - true); - aq_err = pf->hw.aq.asq_last_status; - if (aq_ret) { - dev_err(&pf->pdev->dev, - "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n", - vf->vf_id, info->flags, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - goto error_param; } - } - - if (!aq_ret) { - dev_info(&pf->pdev->dev, - "VF %d successfully set unicast promiscuous mode\n", - vf->vf_id); - if (alluni) + if (alluni) { + dev_info(&pf->pdev->dev, + "VF %d successfully set unicast promiscuous mode\n", + vf->vf_id); set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); - else + } else { + dev_info(&pf->pdev->dev, + "VF %d successfully unset unicast promiscuous mode\n", + vf->vf_id); clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); + } } -error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, @@ -1987,12 +2032,11 @@ error_param: * i40e_vc_config_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to configure the rx/tx * queues **/ -static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; @@ -2105,12 +2149,11 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, * i40e_vc_config_irq_map_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to configure the irq to * queue map **/ -static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_irq_map_info *irqmap_info = (struct virtchnl_irq_map_info *)msg; @@ -2202,11 +2245,10 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, * i40e_vc_enable_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to enable all or specific queue(s) **/ -static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; @@ -2261,12 +2303,11 @@ error_param: * i40e_vc_disable_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to disable all or specific * queue(s) **/ -static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; @@ -2309,14 +2350,13 @@ error_param: * i40e_vc_request_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * VFs get a default number of queues but can use this message to request a * different number. If the request is successful, PF will reset the VF and * return 0. If unsuccessful, PF will send message informing VF of number of * available queues and return result of sending VF a message. **/ -static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen) +static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; @@ -2360,11 +2400,10 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen) * i40e_vc_get_stats_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to get vsi stats **/ -static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; @@ -2458,7 +2497,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, !is_multicast_ether_addr(addr) && vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, - "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n"); + "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); return -EPERM; } } @@ -2470,11 +2509,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, * i40e_vc_add_mac_addr_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * add guest mac address filter **/ -static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_ether_addr_list *al = (struct virtchnl_ether_addr_list *)msg; @@ -2541,11 +2579,10 @@ error_param: * i40e_vc_del_mac_addr_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * remove guest mac address filter **/ -static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_ether_addr_list *al = (struct virtchnl_ether_addr_list *)msg; @@ -2569,6 +2606,16 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ret = I40E_ERR_INVALID_MAC_ADDR; goto error_param; } + + if (vf->pf_set_mac && + ether_addr_equal(al->list[i].addr, + vf->default_lan_addr.addr)) { + dev_err(&pf->pdev->dev, + "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n", + vf->default_lan_addr.addr, vf->vf_id); + ret = I40E_ERR_PARAM; + goto error_param; + } } vsi = pf->vsi[vf->lan_vsi_idx]; @@ -2601,11 +2648,10 @@ error_param: * i40e_vc_add_vlan_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * program guest vlan id **/ -static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; @@ -2674,11 +2720,10 @@ error_param: * i40e_vc_remove_vlan_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * remove programmed guest vlan id **/ -static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; @@ -2761,13 +2806,11 @@ error_param: * i40e_vc_iwarp_qvmap_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * @config: config qvmap or release it * * called from the VF for the iwarp msgs **/ -static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, - bool config) +static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) { struct virtchnl_iwarp_qvlist_info *qvlist_info = (struct virtchnl_iwarp_qvlist_info *)msg; @@ -2798,11 +2841,10 @@ error_param: * i40e_vc_config_rss_key * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Configure the VF's RSS key **/ -static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; @@ -2830,11 +2872,10 @@ err: * i40e_vc_config_rss_lut * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Configure the VF's RSS LUT **/ -static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; @@ -2862,11 +2903,10 @@ err: * i40e_vc_get_rss_hena * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Return the RSS HENA bits allowed by the hardware **/ -static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_hena *vrh = NULL; struct i40e_pf *pf = vf->pf; @@ -2898,11 +2938,10 @@ err: * i40e_vc_set_rss_hena * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Set the RSS HENA bits for the VF **/ -static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; @@ -2927,12 +2966,10 @@ err: * i40e_vc_enable_vlan_stripping * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Enable vlan header stripping for the VF **/ -static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg, - u16 msglen) +static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) { struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; i40e_status aq_ret = 0; @@ -2954,12 +2991,10 @@ err: * i40e_vc_disable_vlan_stripping * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Disable vlan header stripping for the VF **/ -static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg, - u16 msglen) +static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) { struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; i40e_status aq_ret = 0; @@ -3659,65 +3694,65 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, ret = 0; break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: - ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); + ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: - ret = i40e_vc_config_queues_msg(vf, msg, msglen); + ret = i40e_vc_config_queues_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: - ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); + ret = i40e_vc_config_irq_map_msg(vf, msg); break; case VIRTCHNL_OP_ENABLE_QUEUES: - ret = i40e_vc_enable_queues_msg(vf, msg, msglen); + ret = i40e_vc_enable_queues_msg(vf, msg); i40e_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_DISABLE_QUEUES: - ret = i40e_vc_disable_queues_msg(vf, msg, msglen); + ret = i40e_vc_disable_queues_msg(vf, msg); break; case VIRTCHNL_OP_ADD_ETH_ADDR: - ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); + ret = i40e_vc_add_mac_addr_msg(vf, msg); break; case VIRTCHNL_OP_DEL_ETH_ADDR: - ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); + ret = i40e_vc_del_mac_addr_msg(vf, msg); break; case VIRTCHNL_OP_ADD_VLAN: - ret = i40e_vc_add_vlan_msg(vf, msg, msglen); + ret = i40e_vc_add_vlan_msg(vf, msg); break; case VIRTCHNL_OP_DEL_VLAN: - ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); + ret = i40e_vc_remove_vlan_msg(vf, msg); break; case VIRTCHNL_OP_GET_STATS: - ret = i40e_vc_get_stats_msg(vf, msg, msglen); + ret = i40e_vc_get_stats_msg(vf, msg); break; case VIRTCHNL_OP_IWARP: ret = i40e_vc_iwarp_msg(vf, msg, msglen); break; case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: - ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true); + ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true); break; case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: - ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false); + ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false); break; case VIRTCHNL_OP_CONFIG_RSS_KEY: - ret = i40e_vc_config_rss_key(vf, msg, msglen); + ret = i40e_vc_config_rss_key(vf, msg); break; case VIRTCHNL_OP_CONFIG_RSS_LUT: - ret = i40e_vc_config_rss_lut(vf, msg, msglen); + ret = i40e_vc_config_rss_lut(vf, msg); break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: - ret = i40e_vc_get_rss_hena(vf, msg, msglen); + ret = i40e_vc_get_rss_hena(vf, msg); break; case VIRTCHNL_OP_SET_RSS_HENA: - ret = i40e_vc_set_rss_hena(vf, msg, msglen); + ret = i40e_vc_set_rss_hena(vf, msg); break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: - ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen); + ret = i40e_vc_enable_vlan_stripping(vf, msg); break; case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: - ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen); + ret = i40e_vc_disable_vlan_stripping(vf, msg); break; case VIRTCHNL_OP_REQUEST_QUEUES: - ret = i40e_vc_request_queues_msg(vf, msg, msglen); + ret = i40e_vc_request_queues_msg(vf, msg); break; case VIRTCHNL_OP_ENABLE_CHANNELS: ret = i40e_vc_add_qch_msg(vf, msg); @@ -3786,6 +3821,35 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) } /** + * i40e_validate_vf + * @pf: the physical function + * @vf_id: VF identifier + * + * Check that the VF is enabled and the VSI exists. + * + * Returns 0 on success, negative on failure + **/ +static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) +{ + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret = 0; + + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, + "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto err_out; + } + vf = &pf->vf[vf_id]; + vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); + if (!vsi) + ret = -EINVAL; +err_out: + return ret; +} + +/** * i40e_ndo_set_vf_mac * @netdev: network interface device structure * @vf_id: VF identifier @@ -3806,14 +3870,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) u8 i; /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, - "Invalid VF Identifier %d\n", vf_id); - ret = -EINVAL; + ret = i40e_validate_vf(pf, vf_id); + if (ret) goto error_param; - } - vf = &(pf->vf[vf_id]); + vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; /* When the VF is resetting wait until it is done. @@ -3873,9 +3934,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) mac, vf_id); } - /* Force the VF driver stop so it has to reload with new MAC address */ + /* Force the VF interface down so it has to bring up with new MAC + * address + */ i40e_vc_disable_vf(vf); - dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); + dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); error_param: return ret; @@ -3930,11 +3993,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, int ret = 0; /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); - ret = -EINVAL; + ret = i40e_validate_vf(pf, vf_id); + if (ret) goto error_pvid; - } if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); @@ -3948,7 +4009,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, goto error_pvid; } - vf = &(pf->vf[vf_id]); + vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", @@ -4068,11 +4129,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int ret = 0; /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); - ret = -EINVAL; + ret = i40e_validate_vf(pf, vf_id); + if (ret) goto error; - } if (min_tx_rate) { dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", @@ -4080,7 +4139,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, return -EINVAL; } - vf = &(pf->vf[vf_id]); + vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", @@ -4116,13 +4175,11 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, int ret = 0; /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); - ret = -EINVAL; + ret = i40e_validate_vf(pf, vf_id); + if (ret) goto error_param; - } - vf = &(pf->vf[vf_id]); + vf = &pf->vf[vf_id]; /* first vsi is always the LAN vsi */ vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { @@ -4199,7 +4256,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) vf->link_forced = true; vf->link_up = true; pfe.event_data.link_event.link_status = true; - pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; + pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB; break; case IFLA_VF_LINK_STATE_DISABLE: vf->link_forced = true; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c new file mode 100644 index 000000000000..add1e457886d --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -0,0 +1,967 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2018 Intel Corporation. */ + +#include <linux/bpf_trace.h> +#include <net/xdp_sock.h> +#include <net/xdp.h> + +#include "i40e.h" +#include "i40e_txrx_common.h" +#include "i40e_xsk.h" + +/** + * i40e_alloc_xsk_umems - Allocate an array to store per ring UMEMs + * @vsi: Current VSI + * + * Returns 0 on success, <0 on failure + **/ +static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi) +{ + if (vsi->xsk_umems) + return 0; + + vsi->num_xsk_umems_used = 0; + vsi->num_xsk_umems = vsi->alloc_queue_pairs; + vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems), + GFP_KERNEL); + if (!vsi->xsk_umems) { + vsi->num_xsk_umems = 0; + return -ENOMEM; + } + + return 0; +} + +/** + * i40e_add_xsk_umem - Store an UMEM for a certain ring/qid + * @vsi: Current VSI + * @umem: UMEM to store + * @qid: Ring/qid to associate with the UMEM + * + * Returns 0 on success, <0 on failure + **/ +static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem, + u16 qid) +{ + int err; + + err = i40e_alloc_xsk_umems(vsi); + if (err) + return err; + + vsi->xsk_umems[qid] = umem; + vsi->num_xsk_umems_used++; + + return 0; +} + +/** + * i40e_remove_xsk_umem - Remove an UMEM for a certain ring/qid + * @vsi: Current VSI + * @qid: Ring/qid associated with the UMEM + **/ +static void i40e_remove_xsk_umem(struct i40e_vsi *vsi, u16 qid) +{ + vsi->xsk_umems[qid] = NULL; + vsi->num_xsk_umems_used--; + + if (vsi->num_xsk_umems == 0) { + kfree(vsi->xsk_umems); + vsi->xsk_umems = NULL; + vsi->num_xsk_umems = 0; + } +} + +/** + * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev + * @vsi: Current VSI + * @umem: UMEM to DMA map + * + * Returns 0 on success, <0 on failure + **/ +static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem) +{ + struct i40e_pf *pf = vsi->back; + struct device *dev; + unsigned int i, j; + dma_addr_t dma; + + dev = &pf->pdev->dev; + for (i = 0; i < umem->npgs; i++) { + dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, + DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); + if (dma_mapping_error(dev, dma)) + goto out_unmap; + + umem->pages[i].dma = dma; + } + + return 0; + +out_unmap: + for (j = 0; j < i; j++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); + umem->pages[i].dma = 0; + } + + return -1; +} + +/** + * i40e_xsk_umem_dma_unmap - DMA unmaps all UMEM memory for the netdev + * @vsi: Current VSI + * @umem: UMEM to DMA map + **/ +static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem) +{ + struct i40e_pf *pf = vsi->back; + struct device *dev; + unsigned int i; + + dev = &pf->pdev->dev; + + for (i = 0; i < umem->npgs; i++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); + + umem->pages[i].dma = 0; + } +} + +/** + * i40e_xsk_umem_enable - Enable/associate an UMEM to a certain ring/qid + * @vsi: Current VSI + * @umem: UMEM + * @qid: Rx ring to associate UMEM to + * + * Returns 0 on success, <0 on failure + **/ +static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, + u16 qid) +{ + struct xdp_umem_fq_reuse *reuseq; + bool if_running; + int err; + + if (vsi->type != I40E_VSI_MAIN) + return -EINVAL; + + if (qid >= vsi->num_queue_pairs) + return -EINVAL; + + if (vsi->xsk_umems) { + if (qid >= vsi->num_xsk_umems) + return -EINVAL; + if (vsi->xsk_umems[qid]) + return -EBUSY; + } + + reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count); + if (!reuseq) + return -ENOMEM; + + xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); + + err = i40e_xsk_umem_dma_map(vsi, umem); + if (err) + return err; + + if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); + + if (if_running) { + err = i40e_queue_pair_disable(vsi, qid); + if (err) + return err; + } + + err = i40e_add_xsk_umem(vsi, umem, qid); + if (err) + return err; + + if (if_running) { + err = i40e_queue_pair_enable(vsi, qid); + if (err) + return err; + } + + return 0; +} + +/** + * i40e_xsk_umem_disable - Diassociate an UMEM from a certain ring/qid + * @vsi: Current VSI + * @qid: Rx ring to associate UMEM to + * + * Returns 0 on success, <0 on failure + **/ +static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) +{ + bool if_running; + int err; + + if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems || + !vsi->xsk_umems[qid]) + return -EINVAL; + + if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); + + if (if_running) { + err = i40e_queue_pair_disable(vsi, qid); + if (err) + return err; + } + + i40e_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]); + i40e_remove_xsk_umem(vsi, qid); + + if (if_running) { + err = i40e_queue_pair_enable(vsi, qid); + if (err) + return err; + } + + return 0; +} + +/** + * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM + * @vsi: Current VSI + * @umem: UMEM associated to the ring, if any + * @qid: Rx ring to associate UMEM to + * + * This function will store, if any, the UMEM associated to certain ring. + * + * Returns 0 on success, <0 on failure + **/ +int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem, + u16 qid) +{ + if (vsi->type != I40E_VSI_MAIN) + return -EINVAL; + + if (qid >= vsi->num_queue_pairs) + return -EINVAL; + + if (vsi->xsk_umems) { + if (qid >= vsi->num_xsk_umems) + return -EINVAL; + *umem = vsi->xsk_umems[qid]; + return 0; + } + + *umem = NULL; + return 0; +} + +/** + * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM + * @vsi: Current VSI + * @umem: UMEM to enable/associate to a ring, or NULL to disable + * @qid: Rx ring to (dis)associate UMEM (from)to + * + * This function enables or disables an UMEM to a certain ring. + * + * Returns 0 on success, <0 on failure + **/ +int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, + u16 qid) +{ + return umem ? i40e_xsk_umem_enable(vsi, umem, qid) : + i40e_xsk_umem_disable(vsi, qid); +} + +/** + * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff + * @rx_ring: Rx ring + * @xdp: xdp_buff used as input to the XDP program + * + * This function enables or disables an UMEM to a certain ring. + * + * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} + **/ +static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) +{ + int err, result = I40E_XDP_PASS; + struct i40e_ring *xdp_ring; + struct bpf_prog *xdp_prog; + u32 act; + + rcu_read_lock(); + /* NB! xdp_prog will always be !NULL, due to the fact that + * this path is enabled by setting an XDP program. + */ + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + act = bpf_prog_run_xdp(xdp_prog, xdp); + xdp->handle += xdp->data - xdp->data_hard_start; + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; + result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); + break; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; + break; + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + /* fallthrough -- handle aborts by dropping packet */ + case XDP_DROP: + result = I40E_XDP_CONSUMED; + break; + } + rcu_read_unlock(); + return result; +} + +/** + * i40e_alloc_buffer_zc - Allocates an i40e_rx_buffer + * @rx_ring: Rx ring + * @bi: Rx buffer to populate + * + * This function allocates an Rx buffer. The buffer can come from fill + * queue, or via the recycle queue (next_to_alloc). + * + * Returns true for a successful allocation, false otherwise + **/ +static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi) +{ + struct xdp_umem *umem = rx_ring->xsk_umem; + void *addr = bi->addr; + u64 handle, hr; + + if (addr) { + rx_ring->rx_stats.page_reuse_count++; + return true; + } + + if (!xsk_umem_peek_addr(umem, &handle)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(umem, handle); + bi->addr += hr; + + bi->handle = handle + umem->headroom; + + xsk_umem_discard_addr(umem); + return true; +} + +/** + * i40e_alloc_buffer_slow_zc - Allocates an i40e_rx_buffer + * @rx_ring: Rx ring + * @bi: Rx buffer to populate + * + * This function allocates an Rx buffer. The buffer can come from fill + * queue, or via the reuse queue. + * + * Returns true for a successful allocation, false otherwise + **/ +static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi) +{ + struct xdp_umem *umem = rx_ring->xsk_umem; + u64 handle, hr; + + if (!xsk_umem_peek_addr_rq(umem, &handle)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + handle &= rx_ring->xsk_umem->chunk_mask; + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(umem, handle); + bi->addr += hr; + + bi->handle = handle + umem->headroom; + + xsk_umem_discard_addr_rq(umem); + return true; +} + +static __always_inline bool +__i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count, + bool alloc(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi)) +{ + u16 ntu = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + bool ok = true; + + rx_desc = I40E_RX_DESC(rx_ring, ntu); + bi = &rx_ring->rx_bi[ntu]; + do { + if (!alloc(rx_ring, bi)) { + ok = false; + goto no_buffers; + } + + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0, + rx_ring->rx_buf_len, + DMA_BIDIRECTIONAL); + + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + + rx_desc++; + bi++; + ntu++; + + if (unlikely(ntu == rx_ring->count)) { + rx_desc = I40E_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_bi; + ntu = 0; + } + + rx_desc->wb.qword1.status_error_len = 0; + count--; + } while (count); + +no_buffers: + if (rx_ring->next_to_use != ntu) + i40e_release_rx_desc(rx_ring, ntu); + + return ok; +} + +/** + * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers + * @rx_ring: Rx ring + * @count: The number of buffers to allocate + * + * This function allocates a number of Rx buffers from the reuse queue + * or fill ring and places them on the Rx ring. + * + * Returns true for a successful allocation, false otherwise + **/ +bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) +{ + return __i40e_alloc_rx_buffers_zc(rx_ring, count, + i40e_alloc_buffer_slow_zc); +} + +/** + * i40e_alloc_rx_buffers_fast_zc - Allocates a number of Rx buffers + * @rx_ring: Rx ring + * @count: The number of buffers to allocate + * + * This function allocates a number of Rx buffers from the fill ring + * or the internal recycle mechanism and places them on the Rx ring. + * + * Returns true for a successful allocation, false otherwise + **/ +static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count) +{ + return __i40e_alloc_rx_buffers_zc(rx_ring, count, + i40e_alloc_buffer_zc); +} + +/** + * i40e_get_rx_buffer_zc - Return the current Rx buffer + * @rx_ring: Rx ring + * @size: The size of the rx buffer (read from descriptor) + * + * This function returns the current, received Rx buffer, and also + * does DMA synchronization. the Rx ring. + * + * Returns the received Rx buffer + **/ +static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring, + const unsigned int size) +{ + struct i40e_rx_buffer *bi; + + bi = &rx_ring->rx_bi[rx_ring->next_to_clean]; + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + bi->dma, 0, + size, + DMA_BIDIRECTIONAL); + + return bi; +} + +/** + * i40e_reuse_rx_buffer_zc - Recycle an Rx buffer + * @rx_ring: Rx ring + * @old_bi: The Rx buffer to recycle + * + * This function recycles a finished Rx buffer, and places it on the + * recycle queue (next_to_alloc). + **/ +static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *old_bi) +{ + struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc]; + unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; + u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; + u16 nta = rx_ring->next_to_alloc; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_bi->dma = old_bi->dma & mask; + new_bi->dma += hr; + + new_bi->addr = (void *)((unsigned long)old_bi->addr & mask); + new_bi->addr += hr; + + new_bi->handle = old_bi->handle & mask; + new_bi->handle += rx_ring->xsk_umem->headroom; + + old_bi->addr = NULL; +} + +/** + * i40e_zca_free - Free callback for MEM_TYPE_ZERO_COPY allocations + * @alloc: Zero-copy allocator + * @handle: Buffer handle + **/ +void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) +{ + struct i40e_rx_buffer *bi; + struct i40e_ring *rx_ring; + u64 hr, mask; + u16 nta; + + rx_ring = container_of(alloc, struct i40e_ring, zca); + hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; + mask = rx_ring->xsk_umem->chunk_mask; + + nta = rx_ring->next_to_alloc; + bi = &rx_ring->rx_bi[nta]; + + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + handle &= mask; + + bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); + bi->addr += hr; + + bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; +} + +/** + * i40e_construct_skb_zc - Create skbufff from zero-copy Rx buffer + * @rx_ring: Rx ring + * @bi: Rx buffer + * @xdp: xdp_buff + * + * This functions allocates a new skb from a zero-copy Rx buffer. + * + * Returns the skb, or NULL on failure. + **/ +static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + struct sk_buff *skb; + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + xdp->data_end - xdp->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp->data, datasize); + if (metasize) + skb_metadata_set(skb, metasize); + + i40e_reuse_rx_buffer_zc(rx_ring, bi); + return skb; +} + +/** + * i40e_inc_ntc: Advance the next_to_clean index + * @rx_ring: Rx ring + **/ +static void i40e_inc_ntc(struct i40e_ring *rx_ring) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + prefetch(I40E_RX_DESC(rx_ring, ntc)); +} + +/** + * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring + * @rx_ring: Rx ring + * @budget: NAPI budget + * + * Returns amount of work completed + **/ +int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + unsigned int xdp_res, xdp_xmit = 0; + bool failure = false; + struct sk_buff *skb; + struct xdp_buff xdp; + + xdp.rxq = &rx_ring->xdp_rxq; + + while (likely(total_rx_packets < (unsigned int)budget)) { + struct i40e_rx_buffer *bi; + union i40e_rx_desc *rx_desc; + unsigned int size; + u16 vlan_tag; + u8 rx_ptype; + u64 qword; + + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + failure = failure || + !i40e_alloc_rx_buffers_fast_zc(rx_ring, + cleaned_count); + cleaned_count = 0; + } + + rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we have + * verified the descriptor has been written back. + */ + dma_rmb(); + + bi = i40e_clean_programming_status(rx_ring, rx_desc, + qword); + if (unlikely(bi)) { + i40e_reuse_rx_buffer_zc(rx_ring, bi); + cleaned_count++; + continue; + } + + size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + if (!size) + break; + + bi = i40e_get_rx_buffer_zc(rx_ring, size); + xdp.data = bi->addr; + xdp.data_meta = xdp.data; + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; + xdp.data_end = xdp.data + size; + xdp.handle = bi->handle; + + xdp_res = i40e_run_xdp_zc(rx_ring, &xdp); + if (xdp_res) { + if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { + xdp_xmit |= xdp_res; + bi->addr = NULL; + } else { + i40e_reuse_rx_buffer_zc(rx_ring, bi); + } + + total_rx_bytes += size; + total_rx_packets++; + + cleaned_count++; + i40e_inc_ntc(rx_ring); + continue; + } + + /* XDP_PASS path */ + + /* NB! We are not checking for errors using + * i40e_test_staterr with + * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that + * SBP is *not* set in PRT_SBPVSI (default not set). + */ + skb = i40e_construct_skb_zc(rx_ring, bi, &xdp); + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + break; + } + + cleaned_count++; + i40e_inc_ntc(rx_ring); + + if (eth_skb_pad(skb)) + continue; + + total_rx_bytes += skb->len; + total_rx_packets++; + + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); + + vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? + le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; + i40e_receive_skb(rx_ring, skb, vlan_tag); + } + + i40e_finalize_xdp_rx(rx_ring, xdp_xmit); + i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); + return failure ? budget : (int)total_rx_packets; +} + +/** + * i40e_xmit_zc - Performs zero-copy Tx AF_XDP + * @xdp_ring: XDP Tx ring + * @budget: NAPI budget + * + * Returns true if the work is finished. + **/ +static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) +{ + struct i40e_tx_desc *tx_desc = NULL; + struct i40e_tx_buffer *tx_bi; + bool work_done = true; + dma_addr_t dma; + u32 len; + + while (budget-- > 0) { + if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { + xdp_ring->tx_stats.tx_busy++; + work_done = false; + break; + } + + if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len)) + break; + + dma_sync_single_for_device(xdp_ring->dev, dma, len, + DMA_BIDIRECTIONAL); + + tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use]; + tx_bi->bytecount = len; + + tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use); + tx_desc->buffer_addr = cpu_to_le64(dma); + tx_desc->cmd_type_offset_bsz = + build_ctob(I40E_TX_DESC_CMD_ICRC + | I40E_TX_DESC_CMD_EOP, + 0, len, 0); + + xdp_ring->next_to_use++; + if (xdp_ring->next_to_use == xdp_ring->count) + xdp_ring->next_to_use = 0; + } + + if (tx_desc) { + /* Request an interrupt for the last frame and bump tail ptr. */ + tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << + I40E_TXD_QW1_CMD_SHIFT); + i40e_xdp_ring_update_tail(xdp_ring); + + xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + } + + return !!budget && work_done; +} + +/** + * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry + * @tx_ring: XDP Tx ring + * @tx_bi: Tx buffer info to clean + **/ +static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, + struct i40e_tx_buffer *tx_bi) +{ + xdp_return_frame(tx_bi->xdpf); + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_bi, dma), + dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_bi, len, 0); +} + +/** + * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries + * @tx_ring: XDP Tx ring + * @tx_bi: Tx buffer info to clean + * + * Returns true if cleanup/tranmission is done. + **/ +bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, + struct i40e_ring *tx_ring, int napi_budget) +{ + unsigned int ntc, total_bytes = 0, budget = vsi->work_limit; + u32 i, completed_frames, frames_ready, xsk_frames = 0; + struct xdp_umem *umem = tx_ring->xsk_umem; + u32 head_idx = i40e_get_head(tx_ring); + bool work_done = true, xmit_done; + struct i40e_tx_buffer *tx_bi; + + if (head_idx < tx_ring->next_to_clean) + head_idx += tx_ring->count; + frames_ready = head_idx - tx_ring->next_to_clean; + + if (frames_ready == 0) { + goto out_xmit; + } else if (frames_ready > budget) { + completed_frames = budget; + work_done = false; + } else { + completed_frames = frames_ready; + } + + ntc = tx_ring->next_to_clean; + + for (i = 0; i < completed_frames; i++) { + tx_bi = &tx_ring->tx_bi[ntc]; + + if (tx_bi->xdpf) + i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + total_bytes += tx_bi->bytecount; + + if (++ntc >= tx_ring->count) + ntc = 0; + } + + tx_ring->next_to_clean += completed_frames; + if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) + tx_ring->next_to_clean -= tx_ring->count; + + if (xsk_frames) + xsk_umem_complete_tx(umem, xsk_frames); + + i40e_arm_wb(tx_ring, vsi, budget); + i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); + +out_xmit: + xmit_done = i40e_xmit_zc(tx_ring, budget); + + return work_done && xmit_done; +} + +/** + * i40e_xsk_async_xmit - Implements the ndo_xsk_async_xmit + * @dev: the netdevice + * @queue_id: queue id to wake up + * + * Returns <0 for errors, 0 otherwise. + **/ +int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_ring *ring; + + if (test_bit(__I40E_VSI_DOWN, vsi->state)) + return -ENETDOWN; + + if (!i40e_enabled_xdp_vsi(vsi)) + return -ENXIO; + + if (queue_id >= vsi->num_queue_pairs) + return -ENXIO; + + if (!vsi->xdp_rings[queue_id]->xsk_umem) + return -ENXIO; + + ring = vsi->xdp_rings[queue_id]; + + /* The idea here is that if NAPI is running, mark a miss, so + * it will run again. If not, trigger an interrupt and + * schedule the NAPI from interrupt context. If NAPI would be + * scheduled here, the interrupt affinity would not be + * honored. + */ + if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) + i40e_force_wb(vsi, ring->q_vector); + + return 0; +} + +void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) +{ + u16 i; + + for (i = 0; i < rx_ring->count; i++) { + struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + + if (!rx_bi->addr) + continue; + + xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle); + rx_bi->addr = NULL; + } +} + +/** + * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown + * @xdp_ring: XDP Tx ring + **/ +void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) +{ + u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; + struct xdp_umem *umem = tx_ring->xsk_umem; + struct i40e_tx_buffer *tx_bi; + u32 xsk_frames = 0; + + while (ntc != ntu) { + tx_bi = &tx_ring->tx_bi[ntc]; + + if (tx_bi->xdpf) + i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + + ntc++; + if (ntc >= tx_ring->count) + ntc = 0; + } + + if (xsk_frames) + xsk_umem_complete_tx(umem, xsk_frames); +} + +/** + * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached + * @vsi: vsi + * + * Returns true if any of the Rx rings has an AF_XDP UMEM attached + **/ +bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) +{ + int i; + + if (!vsi->xsk_umems) + return false; + + for (i = 0; i < vsi->num_queue_pairs; i++) { + if (vsi->xsk_umems[i]) + return true; + } + + return false; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h new file mode 100644 index 000000000000..9038c5d5cf08 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2018 Intel Corporation. */ + +#ifndef _I40E_XSK_H_ +#define _I40E_XSK_H_ + +struct i40e_vsi; +struct xdp_umem; +struct zero_copy_allocator; + +int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair); +int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair); +int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem, + u16 qid); +int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, + u16 qid); +void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle); +bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count); +int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); + +bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, + struct i40e_ring *tx_ring, int napi_budget); +int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id); + +#endif /* _I40E_XSK_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile deleted file mode 100644 index 3c5c6e962280..000000000000 --- a/drivers/net/ethernet/intel/i40evf/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# Copyright(c) 2013 - 2018 Intel Corporation. - -# -## Makefile for the Intel(R) 40GbE VF driver -# -# - -ccflags-y += -I$(src) -subdir-ccflags-y += -I$(src) - -obj-$(CONFIG_I40EVF) += i40evf.o - -i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \ - i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o - diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h deleted file mode 100644 index 5fd8529465d4..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ /dev/null @@ -1,2717 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_ADMINQ_CMD_H_ -#define _I40E_ADMINQ_CMD_H_ - -/* This header file defines the i40e Admin Queue commands and is shared between - * i40e Firmware and Software. - * - * This file needs to comply with the Linux Kernel coding style. - */ - -#define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR_X722 0x0005 -#define I40E_FW_API_VERSION_MINOR_X710 0x0007 - -#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ - I40E_FW_API_VERSION_MINOR_X710 : \ - I40E_FW_API_VERSION_MINOR_X722) - -/* API version 1.7 implements additional link and PHY-specific APIs */ -#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 - -struct i40e_aq_desc { - __le16 flags; - __le16 opcode; - __le16 datalen; - __le16 retval; - __le32 cookie_high; - __le32 cookie_low; - union { - struct { - __le32 param0; - __le32 param1; - __le32 param2; - __le32 param3; - } internal; - struct { - __le32 param0; - __le32 param1; - __le32 addr_high; - __le32 addr_low; - } external; - u8 raw[16]; - } params; -}; - -/* Flags sub-structure - * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | - * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | - */ - -/* command flags and offsets*/ -#define I40E_AQ_FLAG_DD_SHIFT 0 -#define I40E_AQ_FLAG_CMP_SHIFT 1 -#define I40E_AQ_FLAG_ERR_SHIFT 2 -#define I40E_AQ_FLAG_VFE_SHIFT 3 -#define I40E_AQ_FLAG_LB_SHIFT 9 -#define I40E_AQ_FLAG_RD_SHIFT 10 -#define I40E_AQ_FLAG_VFC_SHIFT 11 -#define I40E_AQ_FLAG_BUF_SHIFT 12 -#define I40E_AQ_FLAG_SI_SHIFT 13 -#define I40E_AQ_FLAG_EI_SHIFT 14 -#define I40E_AQ_FLAG_FE_SHIFT 15 - -#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ - -/* error codes */ -enum i40e_admin_queue_err { - I40E_AQ_RC_OK = 0, /* success */ - I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ - I40E_AQ_RC_ENOENT = 2, /* No such element */ - I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ - I40E_AQ_RC_EINTR = 4, /* operation interrupted */ - I40E_AQ_RC_EIO = 5, /* I/O error */ - I40E_AQ_RC_ENXIO = 6, /* No such resource */ - I40E_AQ_RC_E2BIG = 7, /* Arg too long */ - I40E_AQ_RC_EAGAIN = 8, /* Try again */ - I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ - I40E_AQ_RC_EACCES = 10, /* Permission denied */ - I40E_AQ_RC_EFAULT = 11, /* Bad address */ - I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ - I40E_AQ_RC_EEXIST = 13, /* object already exists */ - I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ - I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ - I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ - I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ - I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ - I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ - I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ - I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ - I40E_AQ_RC_EFBIG = 22, /* File too large */ -}; - -/* Admin Queue command opcodes */ -enum i40e_admin_queue_opc { - /* aq commands */ - i40e_aqc_opc_get_version = 0x0001, - i40e_aqc_opc_driver_version = 0x0002, - i40e_aqc_opc_queue_shutdown = 0x0003, - i40e_aqc_opc_set_pf_context = 0x0004, - - /* resource ownership */ - i40e_aqc_opc_request_resource = 0x0008, - i40e_aqc_opc_release_resource = 0x0009, - - i40e_aqc_opc_list_func_capabilities = 0x000A, - i40e_aqc_opc_list_dev_capabilities = 0x000B, - - /* Proxy commands */ - i40e_aqc_opc_set_proxy_config = 0x0104, - i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105, - - /* LAA */ - i40e_aqc_opc_mac_address_read = 0x0107, - i40e_aqc_opc_mac_address_write = 0x0108, - - /* PXE */ - i40e_aqc_opc_clear_pxe_mode = 0x0110, - - /* WoL commands */ - i40e_aqc_opc_set_wol_filter = 0x0120, - i40e_aqc_opc_get_wake_reason = 0x0121, - - /* internal switch commands */ - i40e_aqc_opc_get_switch_config = 0x0200, - i40e_aqc_opc_add_statistics = 0x0201, - i40e_aqc_opc_remove_statistics = 0x0202, - i40e_aqc_opc_set_port_parameters = 0x0203, - i40e_aqc_opc_get_switch_resource_alloc = 0x0204, - i40e_aqc_opc_set_switch_config = 0x0205, - i40e_aqc_opc_rx_ctl_reg_read = 0x0206, - i40e_aqc_opc_rx_ctl_reg_write = 0x0207, - - i40e_aqc_opc_add_vsi = 0x0210, - i40e_aqc_opc_update_vsi_parameters = 0x0211, - i40e_aqc_opc_get_vsi_parameters = 0x0212, - - i40e_aqc_opc_add_pv = 0x0220, - i40e_aqc_opc_update_pv_parameters = 0x0221, - i40e_aqc_opc_get_pv_parameters = 0x0222, - - i40e_aqc_opc_add_veb = 0x0230, - i40e_aqc_opc_update_veb_parameters = 0x0231, - i40e_aqc_opc_get_veb_parameters = 0x0232, - - i40e_aqc_opc_delete_element = 0x0243, - - i40e_aqc_opc_add_macvlan = 0x0250, - i40e_aqc_opc_remove_macvlan = 0x0251, - i40e_aqc_opc_add_vlan = 0x0252, - i40e_aqc_opc_remove_vlan = 0x0253, - i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, - i40e_aqc_opc_add_tag = 0x0255, - i40e_aqc_opc_remove_tag = 0x0256, - i40e_aqc_opc_add_multicast_etag = 0x0257, - i40e_aqc_opc_remove_multicast_etag = 0x0258, - i40e_aqc_opc_update_tag = 0x0259, - i40e_aqc_opc_add_control_packet_filter = 0x025A, - i40e_aqc_opc_remove_control_packet_filter = 0x025B, - i40e_aqc_opc_add_cloud_filters = 0x025C, - i40e_aqc_opc_remove_cloud_filters = 0x025D, - i40e_aqc_opc_clear_wol_switch_filters = 0x025E, - - i40e_aqc_opc_add_mirror_rule = 0x0260, - i40e_aqc_opc_delete_mirror_rule = 0x0261, - - /* Dynamic Device Personalization */ - i40e_aqc_opc_write_personalization_profile = 0x0270, - i40e_aqc_opc_get_personalization_profile_list = 0x0271, - - /* DCB commands */ - i40e_aqc_opc_dcb_ignore_pfc = 0x0301, - i40e_aqc_opc_dcb_updated = 0x0302, - i40e_aqc_opc_set_dcb_parameters = 0x0303, - - /* TX scheduler */ - i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, - i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, - i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, - i40e_aqc_opc_query_vsi_bw_config = 0x0408, - i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, - i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, - - i40e_aqc_opc_enable_switching_comp_ets = 0x0413, - i40e_aqc_opc_modify_switching_comp_ets = 0x0414, - i40e_aqc_opc_disable_switching_comp_ets = 0x0415, - i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, - i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, - i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, - i40e_aqc_opc_query_port_ets_config = 0x0419, - i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, - i40e_aqc_opc_suspend_port_tx = 0x041B, - i40e_aqc_opc_resume_port_tx = 0x041C, - i40e_aqc_opc_configure_partition_bw = 0x041D, - /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, - - /* phy commands*/ - i40e_aqc_opc_get_phy_abilities = 0x0600, - i40e_aqc_opc_set_phy_config = 0x0601, - i40e_aqc_opc_set_mac_config = 0x0603, - i40e_aqc_opc_set_link_restart_an = 0x0605, - i40e_aqc_opc_get_link_status = 0x0607, - i40e_aqc_opc_set_phy_int_mask = 0x0613, - i40e_aqc_opc_get_local_advt_reg = 0x0614, - i40e_aqc_opc_set_local_advt_reg = 0x0615, - i40e_aqc_opc_get_partner_advt = 0x0616, - i40e_aqc_opc_set_lb_modes = 0x0618, - i40e_aqc_opc_get_phy_wol_caps = 0x0621, - i40e_aqc_opc_set_phy_debug = 0x0622, - i40e_aqc_opc_upload_ext_phy_fm = 0x0625, - i40e_aqc_opc_run_phy_activity = 0x0626, - i40e_aqc_opc_set_phy_register = 0x0628, - i40e_aqc_opc_get_phy_register = 0x0629, - - /* NVM commands */ - i40e_aqc_opc_nvm_read = 0x0701, - i40e_aqc_opc_nvm_erase = 0x0702, - i40e_aqc_opc_nvm_update = 0x0703, - i40e_aqc_opc_nvm_config_read = 0x0704, - i40e_aqc_opc_nvm_config_write = 0x0705, - i40e_aqc_opc_oem_post_update = 0x0720, - i40e_aqc_opc_thermal_sensor = 0x0721, - - /* virtualization commands */ - i40e_aqc_opc_send_msg_to_pf = 0x0801, - i40e_aqc_opc_send_msg_to_vf = 0x0802, - i40e_aqc_opc_send_msg_to_peer = 0x0803, - - /* alternate structure */ - i40e_aqc_opc_alternate_write = 0x0900, - i40e_aqc_opc_alternate_write_indirect = 0x0901, - i40e_aqc_opc_alternate_read = 0x0902, - i40e_aqc_opc_alternate_read_indirect = 0x0903, - i40e_aqc_opc_alternate_write_done = 0x0904, - i40e_aqc_opc_alternate_set_mode = 0x0905, - i40e_aqc_opc_alternate_clear_port = 0x0906, - - /* LLDP commands */ - i40e_aqc_opc_lldp_get_mib = 0x0A00, - i40e_aqc_opc_lldp_update_mib = 0x0A01, - i40e_aqc_opc_lldp_add_tlv = 0x0A02, - i40e_aqc_opc_lldp_update_tlv = 0x0A03, - i40e_aqc_opc_lldp_delete_tlv = 0x0A04, - i40e_aqc_opc_lldp_stop = 0x0A05, - i40e_aqc_opc_lldp_start = 0x0A06, - - /* Tunnel commands */ - i40e_aqc_opc_add_udp_tunnel = 0x0B00, - i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_set_rss_key = 0x0B02, - i40e_aqc_opc_set_rss_lut = 0x0B03, - i40e_aqc_opc_get_rss_key = 0x0B04, - i40e_aqc_opc_get_rss_lut = 0x0B05, - - /* Async Events */ - i40e_aqc_opc_event_lan_overflow = 0x1001, - - /* OEM commands */ - i40e_aqc_opc_oem_parameter_change = 0xFE00, - i40e_aqc_opc_oem_device_status_change = 0xFE01, - i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, - i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, - - /* debug commands */ - i40e_aqc_opc_debug_read_reg = 0xFF03, - i40e_aqc_opc_debug_write_reg = 0xFF04, - i40e_aqc_opc_debug_modify_reg = 0xFF07, - i40e_aqc_opc_debug_dump_internals = 0xFF08, -}; - -/* command structures and indirect data structures */ - -/* Structure naming conventions: - * - no suffix for direct command descriptor structures - * - _data for indirect sent data - * - _resp for indirect return data (data which is both will use _data) - * - _completion for direct return data - * - _element_ for repeated elements (may also be _data or _resp) - * - * Command structures are expected to overlay the params.raw member of the basic - * descriptor, and as such cannot exceed 16 bytes in length. - */ - -/* This macro is used to generate a compilation error if a structure - * is not exactly the correct length. It gives a divide by zero error if the - * structure is not of the correct size, otherwise it creates an enum that is - * never used. - */ -#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ - { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } - -/* This macro is used extensively to ensure that command structures are 16 - * bytes in length as they have to map to the raw array of that size. - */ -#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) - -/* internal (0x00XX) commands */ - -/* Get version (direct 0x0001) */ -struct i40e_aqc_get_version { - __le32 rom_ver; - __le32 fw_build; - __le16 fw_major; - __le16 fw_minor; - __le16 api_major; - __le16 api_minor; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); - -/* Send driver version (indirect 0x0002) */ -struct i40e_aqc_driver_version { - u8 driver_major_ver; - u8 driver_minor_ver; - u8 driver_build_ver; - u8 driver_subbuild_ver; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); - -/* Queue Shutdown (direct 0x0003) */ -struct i40e_aqc_queue_shutdown { - __le32 driver_unloading; -#define I40E_AQ_DRIVER_UNLOADING 0x1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); - -/* Set PF context (0x0004, direct) */ -struct i40e_aqc_set_pf_context { - u8 pf_id; - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); - -/* Request resource ownership (direct 0x0008) - * Release resource ownership (direct 0x0009) - */ -#define I40E_AQ_RESOURCE_NVM 1 -#define I40E_AQ_RESOURCE_SDP 2 -#define I40E_AQ_RESOURCE_ACCESS_READ 1 -#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 -#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 -#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 - -struct i40e_aqc_request_resource { - __le16 resource_id; - __le16 access_type; - __le32 timeout; - __le32 resource_number; - u8 reserved[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); - -/* Get function capabilities (indirect 0x000A) - * Get device capabilities (indirect 0x000B) - */ -struct i40e_aqc_list_capabilites { - u8 command_flags; -#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 - u8 pf_index; - u8 reserved[2]; - __le32 count; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); - -struct i40e_aqc_list_capabilities_element_resp { - __le16 id; - u8 major_rev; - u8 minor_rev; - __le32 number; - __le32 logical_id; - __le32 phys_id; - u8 reserved[16]; -}; - -/* list of caps */ - -#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 -#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 -#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 -#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 -#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 -#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 -#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 -#define I40E_AQ_CAP_ID_SRIOV 0x0012 -#define I40E_AQ_CAP_ID_VF 0x0013 -#define I40E_AQ_CAP_ID_VMDQ 0x0014 -#define I40E_AQ_CAP_ID_8021QBG 0x0015 -#define I40E_AQ_CAP_ID_8021QBR 0x0016 -#define I40E_AQ_CAP_ID_VSI 0x0017 -#define I40E_AQ_CAP_ID_DCB 0x0018 -#define I40E_AQ_CAP_ID_FCOE 0x0021 -#define I40E_AQ_CAP_ID_ISCSI 0x0022 -#define I40E_AQ_CAP_ID_RSS 0x0040 -#define I40E_AQ_CAP_ID_RXQ 0x0041 -#define I40E_AQ_CAP_ID_TXQ 0x0042 -#define I40E_AQ_CAP_ID_MSIX 0x0043 -#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 -#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 -#define I40E_AQ_CAP_ID_1588 0x0046 -#define I40E_AQ_CAP_ID_IWARP 0x0051 -#define I40E_AQ_CAP_ID_LED 0x0061 -#define I40E_AQ_CAP_ID_SDP 0x0062 -#define I40E_AQ_CAP_ID_MDIO 0x0063 -#define I40E_AQ_CAP_ID_WSR_PROT 0x0064 -#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080 -#define I40E_AQ_CAP_ID_FLEX10 0x00F1 -#define I40E_AQ_CAP_ID_CEM 0x00F2 - -/* Set CPPM Configuration (direct 0x0103) */ -struct i40e_aqc_cppm_configuration { - __le16 command_flags; -#define I40E_AQ_CPPM_EN_LTRC 0x0800 -#define I40E_AQ_CPPM_EN_DMCTH 0x1000 -#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 -#define I40E_AQ_CPPM_EN_HPTC 0x4000 -#define I40E_AQ_CPPM_EN_DMARC 0x8000 - __le16 ttlx; - __le32 dmacr; - __le16 dmcth; - u8 hptc; - u8 reserved; - __le32 pfltrc; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); - -/* Set ARP Proxy command / response (indirect 0x0104) */ -struct i40e_aqc_arp_proxy_data { - __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0800 -#define I40E_AQ_ARP_UNSUP_CTL 0x1000 -#define I40E_AQ_ARP_ENA 0x2000 -#define I40E_AQ_ARP_ADD_IPV4 0x4000 -#define I40E_AQ_ARP_DEL_IPV4 0x8000 - __le16 table_id; - __le32 enabled_offloads; -#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 -#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 - __le32 ip_addr; - u8 mac_addr[6]; - u8 reserved[2]; -}; - -I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data); - -/* Set NS Proxy Table Entry Command (indirect 0x0105) */ -struct i40e_aqc_ns_proxy_data { - __le16 table_idx_mac_addr_0; - __le16 table_idx_mac_addr_1; - __le16 table_idx_ipv6_0; - __le16 table_idx_ipv6_1; - __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0001 -#define I40E_AQ_NS_PROXY_DEL_0 0x0002 -#define I40E_AQ_NS_PROXY_ADD_1 0x0004 -#define I40E_AQ_NS_PROXY_DEL_1 0x0008 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 -#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 -#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 - u8 mac_addr_0[6]; - u8 mac_addr_1[6]; - u8 local_mac_addr[6]; - u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ - u8 ipv6_addr_1[16]; -}; - -I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); - -/* Manage LAA Command (0x0106) - obsolete */ -struct i40e_aqc_mng_laa { - __le16 command_flags; -#define I40E_AQ_LAA_FLAG_WR 0x8000 - u8 reserved[2]; - __le32 sal; - __le16 sah; - u8 reserved2[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); - -/* Manage MAC Address Read Command (indirect 0x0107) */ -struct i40e_aqc_mac_address_read { - __le16 command_flags; -#define I40E_AQC_LAN_ADDR_VALID 0x10 -#define I40E_AQC_SAN_ADDR_VALID 0x20 -#define I40E_AQC_PORT_ADDR_VALID 0x40 -#define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_MC_MAG_EN_VALID 0x100 -#define I40E_AQC_ADDR_VALID_MASK 0x3F0 - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); - -struct i40e_aqc_mac_address_read_data { - u8 pf_lan_mac[6]; - u8 pf_san_mac[6]; - u8 port_mac[6]; - u8 pf_wol_mac[6]; -}; - -I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); - -/* Manage MAC Address Write Command (0x0108) */ -struct i40e_aqc_mac_address_write { - __le16 command_flags; -#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 -#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 -#define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 -#define I40E_AQC_WRITE_TYPE_MASK 0xC000 - - __le16 mac_sah; - __le32 mac_sal; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); - -/* PXE commands (0x011x) */ - -/* Clear PXE Command and response (direct 0x0110) */ -struct i40e_aqc_clear_pxe { - u8 rx_cnt; - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); - -/* Set WoL Filter (0x0120) */ - -struct i40e_aqc_set_wol_filter { - __le16 filter_index; -#define I40E_AQC_MAX_NUM_WOL_FILTERS 8 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \ - I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT) - -#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0 -#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \ - I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT) - __le16 cmd_flags; -#define I40E_AQC_SET_WOL_FILTER 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 -#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000 -#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 -#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 - __le16 valid_flags; -#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 - u8 reserved[2]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter); - -struct i40e_aqc_set_wol_filter_data { - u8 filter[128]; - u8 mask[16]; -}; - -I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); - -/* Get Wake Reason (0x0121) */ - -struct i40e_aqc_get_wake_reason_completion { - u8 reserved_1[2]; - __le16 wake_reason; -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT) -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT) - u8 reserved_2[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion); - -/* Switch configuration commands (0x02xx) */ - -/* Used by many indirect commands that only pass an seid and a buffer in the - * command - */ -struct i40e_aqc_switch_seid { - __le16 seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); - -/* Get Switch Configuration command (indirect 0x0200) - * uses i40e_aqc_switch_seid for the descriptor - */ -struct i40e_aqc_get_switch_config_header_resp { - __le16 num_reported; - __le16 num_total; - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); - -struct i40e_aqc_switch_config_element_resp { - u8 element_type; -#define I40E_AQ_SW_ELEM_TYPE_MAC 1 -#define I40E_AQ_SW_ELEM_TYPE_PF 2 -#define I40E_AQ_SW_ELEM_TYPE_VF 3 -#define I40E_AQ_SW_ELEM_TYPE_EMP 4 -#define I40E_AQ_SW_ELEM_TYPE_BMC 5 -#define I40E_AQ_SW_ELEM_TYPE_PV 16 -#define I40E_AQ_SW_ELEM_TYPE_VEB 17 -#define I40E_AQ_SW_ELEM_TYPE_PA 18 -#define I40E_AQ_SW_ELEM_TYPE_VSI 19 - u8 revision; -#define I40E_AQ_SW_ELEM_REV_1 1 - __le16 seid; - __le16 uplink_seid; - __le16 downlink_seid; - u8 reserved[3]; - u8 connection_type; -#define I40E_AQ_CONN_TYPE_REGULAR 0x1 -#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_CONN_TYPE_CASCADED 0x3 - __le16 scheduler_id; - __le16 element_info; -}; - -I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp); - -/* Get Switch Configuration (indirect 0x0200) - * an array of elements are returned in the response buffer - * the first in the array is the header, remainder are elements - */ -struct i40e_aqc_get_switch_config_resp { - struct i40e_aqc_get_switch_config_header_resp header; - struct i40e_aqc_switch_config_element_resp element[1]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp); - -/* Add Statistics (direct 0x0201) - * Remove Statistics (direct 0x0202) - */ -struct i40e_aqc_add_remove_statistics { - __le16 seid; - __le16 vlan; - __le16 stat_index; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); - -/* Set Port Parameters command (direct 0x0203) */ -struct i40e_aqc_set_port_parameters { - __le16 command_flags; -#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 -#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ -#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 - __le16 bad_frame_vsi; -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0 -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF - __le16 default_seid; /* reserved for command */ - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); - -/* Get Switch Resource Allocation (indirect 0x0204) */ -struct i40e_aqc_get_switch_resource_alloc { - u8 num_entries; /* reserved for command */ - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); - -/* expect an array of these structs in the response buffer */ -struct i40e_aqc_switch_resource_alloc_element_resp { - u8 resource_type; -#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 -#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 -#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 -#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 -#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 -#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 -#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 -#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 -#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 -#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 -#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA -#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB -#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC -#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD -#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF -#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 -#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 -#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 -#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 - u8 reserved1; - __le16 guaranteed; - __le16 total; - __le16 used; - __le16 total_unalloced; - u8 reserved2[6]; -}; - -I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); - -/* Set Switch Configuration (direct 0x0205) */ -struct i40e_aqc_set_switch_config { - __le16 flags; -/* flags used for both fields below */ -#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 -#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 - __le16 valid_flags; - /* The ethertype in switch_tag is dropped on ingress and used - * internally by the switch. Set this to zero for the default - * of 0x88a8 (802.1ad). Should be zero for firmware API - * versions lower than 1.7. - */ - __le16 switch_tag; - /* The ethertypes in first_tag and second_tag are used to - * match the outer and inner VLAN tags (respectively) when HW - * double VLAN tagging is enabled via the set port parameters - * AQ command. Otherwise these are both ignored. Set them to - * zero for their defaults of 0x8100 (802.1Q). Should be zero - * for firmware API versions lower than 1.7. - */ - __le16 first_tag; - __le16 second_tag; - u8 reserved[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); - -/* Read Receive control registers (direct 0x0206) - * Write Receive control registers (direct 0x0207) - * used for accessing Rx control registers that can be - * slow and need special handling when under high Rx load - */ -struct i40e_aqc_rx_ctl_reg_read_write { - __le32 reserved1; - __le32 address; - __le32 reserved2; - __le32 value; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write); - -/* Add VSI (indirect 0x0210) - * this indirect command uses struct i40e_aqc_vsi_properties_data - * as the indirect buffer (128 bytes) - * - * Update VSI (indirect 0x211) - * uses the same data structure as Add VSI - * - * Get VSI (indirect 0x0212) - * uses the same completion and data structure as Add VSI - */ -struct i40e_aqc_add_get_update_vsi { - __le16 uplink_seid; - u8 connection_type; -#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 -#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 - u8 reserved1; - u8 vf_id; - u8 reserved2; - __le16 vsi_flags; -#define I40E_AQ_VSI_TYPE_SHIFT 0x0 -#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) -#define I40E_AQ_VSI_TYPE_VF 0x0 -#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 -#define I40E_AQ_VSI_TYPE_PF 0x2 -#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 -#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); - -struct i40e_aqc_add_get_update_vsi_completion { - __le16 seid; - __le16 vsi_number; - __le16 vsi_used; - __le16 vsi_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); - -struct i40e_aqc_vsi_properties_data { - /* first 96 byte are written by SW */ - __le16 valid_sections; -#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 -#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 -#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 -#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 -#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 -#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 -#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 -#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 -#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 -#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 - /* switch section */ - __le16 switch_id; /* 12bit id combined with flags below */ -#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 -#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) -#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 -#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 -#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 - u8 sw_reserved[2]; - /* security section */ - u8 sec_flags; -#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 - u8 sec_reserved; - /* VLAN section */ - __le16 pvid; /* VLANS include priority bits */ - __le16 fcoe_pvid; - u8 port_vlan_flags; -#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 -#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ - I40E_AQ_VSI_PVLAN_MODE_SHIFT) -#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 -#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 -#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 -#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 -#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 -#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ - I40E_AQ_VSI_PVLAN_EMOD_SHIFT) -#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 -#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 -#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 -#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 - u8 pvlan_reserved[3]; - /* ingress egress up sections */ - __le32 ingress_table; /* bitmap, 3 bits per up */ -#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 -#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 -#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 -#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 -#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 -#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 -#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 -#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 -#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) - __le32 egress_table; /* same defines as for ingress table */ - /* cascaded PV section */ - __le16 cas_pv_tag; - u8 cas_pv_flags; -#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ - I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) -#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 -#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 -#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 -#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 -#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 - u8 cas_pv_reserved; - /* queue mapping section */ - __le16 mapping_flags; -#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 -#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 - __le16 queue_mapping[16]; -#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 -#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) - __le16 tc_mapping[8]; -#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 -#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ - I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) -#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 -#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ - I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) - /* queueing option section */ - u8 queueing_opt_flags; -#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 -#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 -#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 -#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 -#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 -#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 - u8 queueing_opt_reserved[3]; - /* scheduler section */ - u8 up_enable_bits; - u8 sched_reserved; - /* outer up section */ - __le32 outer_up_table; /* same structure and defines as ingress tbl */ - u8 cmd_reserved[8]; - /* last 32 bytes are written by FW */ - __le16 qs_handle[8]; -#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF - __le16 stat_counter_idx; - __le16 sched_id; - u8 resp_reserved[12]; -}; - -I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); - -/* Add Port Virtualizer (direct 0x0220) - * also used for update PV (direct 0x0221) but only flags are used - * (IS_CTRL_PORT only works on add PV) - */ -struct i40e_aqc_add_update_pv { - __le16 command_flags; -#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 -#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 - __le16 uplink_seid; - __le16 connected_seid; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); - -struct i40e_aqc_add_update_pv_completion { - /* reserved for update; for add also encodes error if rc == ENOSPC */ - __le16 pv_seid; -#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 -#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); - -/* Get PV Params (direct 0x0222) - * uses i40e_aqc_switch_seid for the descriptor - */ - -struct i40e_aqc_get_pv_params_completion { - __le16 seid; - __le16 default_stag; - __le16 pv_flags; /* same flags as add_pv */ -#define I40E_AQC_GET_PV_PV_TYPE 0x1 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 - u8 reserved[8]; - __le16 default_port_seid; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); - -/* Add VEB (direct 0x0230) */ -struct i40e_aqc_add_veb { - __le16 uplink_seid; - __le16 downlink_seid; - __le16 veb_flags; -#define I40E_AQC_ADD_VEB_FLOATING 0x1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) -#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 -#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ -#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 - u8 enable_tcs; - u8 reserved[9]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); - -struct i40e_aqc_add_veb_completion { - u8 reserved[6]; - __le16 switch_seid; - /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ - __le16 veb_seid; -#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 -#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); - -/* Get VEB Parameters (direct 0x0232) - * uses i40e_aqc_switch_seid for the descriptor - */ -struct i40e_aqc_get_veb_parameters_completion { - __le16 seid; - __le16 switch_id; - __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; - u8 reserved[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); - -/* Delete Element (direct 0x0243) - * uses the generic i40e_aqc_switch_seid - */ - -/* Add MAC-VLAN (indirect 0x0250) */ - -/* used for the command for most vlan commands */ -struct i40e_aqc_macvlan { - __le16 num_addresses; - __le16 seid[3]; -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) -#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); - -/* indirect data for command and response */ -struct i40e_aqc_add_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - __le16 flags; -#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 -#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 -#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 -#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 -#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 - __le16 queue_number; -#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) - /* response section */ - u8 match_method; -#define I40E_AQC_MM_PERFECT_MATCH 0x01 -#define I40E_AQC_MM_HASH_MATCH 0x02 -#define I40E_AQC_MM_ERR_NO_RES 0xFF - u8 reserved1[3]; -}; - -struct i40e_aqc_add_remove_macvlan_completion { - __le16 perfect_mac_used; - __le16 perfect_mac_free; - __le16 unicast_hash_free; - __le16 multicast_hash_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); - -/* Remove MAC-VLAN (indirect 0x0251) - * uses i40e_aqc_macvlan for the descriptor - * data points to an array of num_addresses of elements - */ - -struct i40e_aqc_remove_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - u8 flags; -#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 -#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 -#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 -#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 - u8 reserved[3]; - /* reply section */ - u8 error_code; -#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF - u8 reply_reserved[3]; -}; - -/* Add VLAN (indirect 0x0252) - * Remove VLAN (indirect 0x0253) - * use the generic i40e_aqc_macvlan for the command - */ -struct i40e_aqc_add_remove_vlan_element_data { - __le16 vlan_tag; - u8 vlan_flags; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_LOCAL 0x1 -#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 -#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) -#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 -#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 -#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 -#define I40E_AQC_VLAN_PTYPE_SHIFT 3 -#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) -#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 -#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 -#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 -#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_ALL 0x1 - u8 reserved; - u8 result; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 -#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE -#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF - u8 reserved1[3]; -}; - -struct i40e_aqc_add_remove_vlan_completion { - u8 reserved[4]; - __le16 vlans_used; - __le16 vlans_free; - __le32 addr_high; - __le32 addr_low; -}; - -/* Set VSI Promiscuous Modes (direct 0x0254) */ -struct i40e_aqc_set_vsi_promiscuous_modes { - __le16 promiscuous_flags; - __le16 valid_flags; -/* flags used for both fields above */ -#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 -#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 -#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 -#define I40E_AQC_SET_VSI_DEFAULT 0x08 -#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 -#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 - __le16 seid; -#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF - __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF -#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); - -/* Add S/E-tag command (direct 0x0255) - * Uses generic i40e_aqc_add_remove_tag_completion for completion - */ -struct i40e_aqc_add_tag { - __le16 flags; -#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 - __le16 seid; -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - __le16 queue_number; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); - -struct i40e_aqc_add_remove_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); - -/* Remove S/E-tag command (direct 0x0256) - * Uses generic i40e_aqc_add_remove_tag_completion for completion - */ -struct i40e_aqc_remove_tag { - __le16 seid; -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag); - -/* Add multicast E-Tag (direct 0x0257) - * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields - * and no external data - */ -struct i40e_aqc_add_remove_mcast_etag { - __le16 pv_seid; - __le16 etag; - u8 num_unicast_etags; - u8 reserved[3]; - __le32 addr_high; /* address of array of 2-byte s-tags */ - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); - -struct i40e_aqc_add_remove_mcast_etag_completion { - u8 reserved[4]; - __le16 mcast_etags_used; - __le16 mcast_etags_free; - __le32 addr_high; - __le32 addr_low; - -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); - -/* Update S/E-Tag (direct 0x0259) */ -struct i40e_aqc_update_tag { - __le16 seid; -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) - __le16 old_tag; - __le16 new_tag; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); - -struct i40e_aqc_update_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); - -/* Add Control Packet filter (direct 0x025A) - * Remove Control Packet filter (direct 0x025B) - * uses the i40e_aqc_add_oveb_cloud, - * and the generic direct completion structure - */ -struct i40e_aqc_add_remove_control_packet_filter { - u8 mac[6]; - __le16 etype; - __le16 flags; -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 - __le16 seid; -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) - __le16 queue; - u8 reserved[2]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); - -struct i40e_aqc_add_remove_control_packet_filter_completion { - __le16 mac_etype_used; - __le16 etype_used; - __le16 mac_etype_free; - __le16 etype_free; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); - -/* Add Cloud filters (indirect 0x025C) - * Remove Cloud filters (indirect 0x025D) - * uses the i40e_aqc_add_remove_cloud_filters, - * and the generic indirect completion structure - */ -struct i40e_aqc_add_remove_cloud_filters { - u8 num_filters; - u8 reserved; - __le16 seid; -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) - u8 big_buffer_flag; -#define I40E_AQC_ADD_CLOUD_CMD_BB 1 - u8 reserved2[3]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); - -struct i40e_aqc_cloud_filters_element_data { - u8 outer_mac[6]; - u8 inner_mac[6]; - __le16 inner_vlan; - union { - struct { - u8 reserved[12]; - u8 data[4]; - } v4; - struct { - u8 data[16]; - } v6; - struct { - __le16 data[8]; - } raw_v6; - } ipaddr; - __le16 flags; -#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_FILTER_SHIFT) -/* 0x0000 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 -/* 0x0002 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 -/* 0x0005 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 -/* 0x0007 reserved */ -/* 0x0008 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B -#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C -/* 0x0010 to 0x0017 is for custom filters */ -#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */ -#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ -#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */ - -#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 -#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 -#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 - -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 - -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 - - __le32 tenant_id; - u8 reserved[4]; - __le16 queue_number; -#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ - I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) - u8 reserved2[14]; - /* response section */ - u8 allocation_result; -#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 -#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF - u8 response_reserved[7]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data); - -/* i40e_aqc_cloud_filters_element_bb is used when - * I40E_AQC_ADD_CLOUD_CMD_BB flag is set. - */ -struct i40e_aqc_cloud_filters_element_bb { - struct i40e_aqc_cloud_filters_element_data element; - u16 general_fields[32]; -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 -}; - -I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb); - -struct i40e_aqc_remove_cloud_filters_completion { - __le16 perfect_ovlan_used; - __le16 perfect_ovlan_free; - __le16 vlan_used; - __le16 vlan_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); - -/* Replace filter Command 0x025F - * uses the i40e_aqc_replace_cloud_filters, - * and the generic indirect completion structure - */ -struct i40e_filter_data { - u8 filter_type; - u8 input[3]; -}; - -I40E_CHECK_STRUCT_LEN(4, i40e_filter_data); - -struct i40e_aqc_replace_cloud_filters_cmd { - u8 valid_flags; -#define I40E_AQC_REPLACE_L1_FILTER 0x0 -#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1 -#define I40E_AQC_GET_CLOUD_FILTERS 0x2 -#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4 -#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8 - u8 old_filter_type; - u8 new_filter_type; - u8 tr_bit; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd); - -struct i40e_aqc_replace_cloud_filters_cmd_buf { - u8 data[32]; -/* Filter type INPUT codes*/ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7) - -/* Field Vector offsets */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15 - -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37 - struct i40e_filter_data filters[8]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf); - -/* Add Mirror Rule (indirect or direct 0x0260) - * Delete Mirror Rule (indirect or direct 0x0261) - * note: some rule types (4,5) do not use an external buffer. - * take care to set the flags correctly. - */ -struct i40e_aqc_add_delete_mirror_rule { - __le16 seid; - __le16 rule_type; -#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 -#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ - I40E_AQC_MIRROR_RULE_TYPE_SHIFT) -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 -#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 - __le16 num_entries; - __le16 destination; /* VSI for add, rule id for delete */ - __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); - -struct i40e_aqc_add_delete_mirror_rule_completion { - u8 reserved[2]; - __le16 rule_id; /* only used on add */ - __le16 mirror_rules_used; - __le16 mirror_rules_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); - -/* Dynamic Device Personalization */ -struct i40e_aqc_write_personalization_profile { - u8 flags; - u8 reserved[3]; - __le32 profile_track_id; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile); - -struct i40e_aqc_write_ddp_resp { - __le32 error_offset; - __le32 error_info; - __le32 addr_high; - __le32 addr_low; -}; - -struct i40e_aqc_get_applied_profiles { - u8 flags; -#define I40E_AQC_GET_DDP_GET_CONF 0x1 -#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2 - u8 rsv[3]; - __le32 reserved; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles); - -/* DCB 0x03xx*/ - -/* PFC Ignore (direct 0x0301) - * the command and response use the same descriptor structure - */ -struct i40e_aqc_pfc_ignore { - u8 tc_bitmap; - u8 command_flags; /* unused on response */ -#define I40E_AQC_PFC_IGNORE_SET 0x80 -#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); - -/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure - * with no parameters - */ - -/* TX scheduler 0x04xx */ - -/* Almost all the indirect commands use - * this generic struct to pass the SEID in param0 - */ -struct i40e_aqc_tx_sched_ind { - __le16 vsi_seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); - -/* Several commands respond with a set of queue set handles */ -struct i40e_aqc_qs_handles_resp { - __le16 qs_handles[8]; -}; - -/* Configure VSI BW limits (direct 0x0400) */ -struct i40e_aqc_configure_vsi_bw_limit { - __le16 vsi_seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_credit; /* 0-3, limit = 2^max */ - u8 reserved2[7]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); - -/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) - * responds with i40e_aqc_qs_handles_resp - */ -struct i40e_aqc_configure_vsi_ets_sla_bw_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data); - -/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) - * responds with i40e_aqc_qs_handles_resp - */ -struct i40e_aqc_configure_vsi_tc_bw_data { - u8 tc_valid_bits; - u8 reserved[3]; - u8 tc_bw_credits[8]; - u8 reserved1[4]; - __le16 qs_handles[8]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data); - -/* Query vsi bw configuration (indirect 0x0408) */ -struct i40e_aqc_query_vsi_bw_config_resp { - u8 tc_valid_bits; - u8 tc_suspended_bits; - u8 reserved[14]; - __le16 qs_handles[8]; - u8 reserved1[4]; - __le16 port_bw_limit; - u8 reserved2[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved3[23]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp); - -/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ -struct i40e_aqc_query_vsi_ets_sla_config_resp { - u8 tc_valid_bits; - u8 reserved[3]; - u8 share_credits[8]; - __le16 credits[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp); - -/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ -struct i40e_aqc_configure_switching_comp_bw_limit { - __le16 seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved2[7]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); - -/* Enable Physical Port ETS (indirect 0x0413) - * Modify Physical Port ETS (indirect 0x0414) - * Disable Physical Port ETS (indirect 0x0415) - */ -struct i40e_aqc_configure_switching_comp_ets_data { - u8 reserved[4]; - u8 tc_valid_bits; - u8 seepage; -#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 - u8 tc_strict_priority_flags; - u8 reserved1[17]; - u8 tc_bw_share_credits[8]; - u8 reserved2[96]; -}; - -I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data); - -/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ -struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credit[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, - i40e_aqc_configure_switching_comp_ets_bw_limit_data); - -/* Configure Switching Component Bandwidth Allocation per Tc - * (indirect 0x0417) - */ -struct i40e_aqc_configure_switching_comp_bw_config_data { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits; /* bool */ - u8 tc_bw_share_credits[8]; - u8 reserved1[20]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data); - -/* Query Switching Component Configuration (indirect 0x0418) */ -struct i40e_aqc_query_switching_comp_ets_config_resp { - u8 tc_valid_bits; - u8 reserved[35]; - __le16 port_bw_limit; - u8 reserved1[2]; - u8 tc_bw_max; /* 0-3, limit = 2^max */ - u8 reserved2[23]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp); - -/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ -struct i40e_aqc_query_port_ets_config_resp { - u8 reserved[4]; - u8 tc_valid_bits; - u8 reserved1; - u8 tc_strict_priority_bits; - u8 reserved2; - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; - - /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved3[32]; -}; - -I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp); - -/* Query Switching Component Bandwidth Allocation per Traffic Type - * (indirect 0x041A) - */ -struct i40e_aqc_query_switching_comp_bw_config_resp { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits_enable; /* bool */ - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp); - -/* Suspend/resume port TX traffic - * (direct 0x041B and 0x041C) uses the generic SEID struct - */ - -/* Configure partition BW - * (indirect 0x041D) - */ -struct i40e_aqc_configure_partition_bw_data { - __le16 pf_valid_bits; - u8 min_bw[16]; /* guaranteed bandwidth */ - u8 max_bw[16]; /* bandwidth limit */ -}; - -I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); - -/* Get and set the active HMC resource profile and status. - * (direct 0x0500) and (direct 0x0501) - */ -struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); - -enum i40e_aq_hmc_profile { - /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, -}; - -/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ - -/* set in param0 for get phy abilities to report qualified modules */ -#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 -#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 - -enum i40e_aq_phy_type { - I40E_PHY_TYPE_SGMII = 0x0, - I40E_PHY_TYPE_1000BASE_KX = 0x1, - I40E_PHY_TYPE_10GBASE_KX4 = 0x2, - I40E_PHY_TYPE_10GBASE_KR = 0x3, - I40E_PHY_TYPE_40GBASE_KR4 = 0x4, - I40E_PHY_TYPE_XAUI = 0x5, - I40E_PHY_TYPE_XFI = 0x6, - I40E_PHY_TYPE_SFI = 0x7, - I40E_PHY_TYPE_XLAUI = 0x8, - I40E_PHY_TYPE_XLPPI = 0x9, - I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, - I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, - I40E_PHY_TYPE_10GBASE_AOC = 0xC, - I40E_PHY_TYPE_40GBASE_AOC = 0xD, - I40E_PHY_TYPE_UNRECOGNIZED = 0xE, - I40E_PHY_TYPE_UNSUPPORTED = 0xF, - I40E_PHY_TYPE_100BASE_TX = 0x11, - I40E_PHY_TYPE_1000BASE_T = 0x12, - I40E_PHY_TYPE_10GBASE_T = 0x13, - I40E_PHY_TYPE_10GBASE_SR = 0x14, - I40E_PHY_TYPE_10GBASE_LR = 0x15, - I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, - I40E_PHY_TYPE_10GBASE_CR1 = 0x17, - I40E_PHY_TYPE_40GBASE_CR4 = 0x18, - I40E_PHY_TYPE_40GBASE_SR4 = 0x19, - I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, - I40E_PHY_TYPE_1000BASE_SX = 0x1B, - I40E_PHY_TYPE_1000BASE_LX = 0x1C, - I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, - I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, - I40E_PHY_TYPE_25GBASE_KR = 0x1F, - I40E_PHY_TYPE_25GBASE_CR = 0x20, - I40E_PHY_TYPE_25GBASE_SR = 0x21, - I40E_PHY_TYPE_25GBASE_LR = 0x22, - I40E_PHY_TYPE_25GBASE_AOC = 0x23, - I40E_PHY_TYPE_25GBASE_ACC = 0x24, - I40E_PHY_TYPE_MAX, - I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD, - I40E_PHY_TYPE_EMPTY = 0xFE, - I40E_PHY_TYPE_DEFAULT = 0xFF, -}; - -#define I40E_LINK_SPEED_100MB_SHIFT 0x1 -#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 -#define I40E_LINK_SPEED_10GB_SHIFT 0x3 -#define I40E_LINK_SPEED_40GB_SHIFT 0x4 -#define I40E_LINK_SPEED_20GB_SHIFT 0x5 -#define I40E_LINK_SPEED_25GB_SHIFT 0x6 - -enum i40e_aq_link_speed { - I40E_LINK_SPEED_UNKNOWN = 0, - I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), - I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), - I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), - I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), - I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT), - I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT), -}; - -struct i40e_aqc_module_desc { - u8 oui[3]; - u8 reserved1; - u8 part_number[16]; - u8 revision[4]; - u8 reserved2[8]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc); - -struct i40e_aq_get_phy_abilities_resp { - __le32 phy_type; /* bitmap using the above enum for offsets */ - u8 link_speed; /* bitmap using the above enum bit patterns */ - u8 abilities; -#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 -#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 -#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 -#define I40E_AQ_PHY_LINK_ENABLED 0x08 -#define I40E_AQ_PHY_AN_ENABLED 0x10 -#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 -#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40 -#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80 - __le16 eee_capability; -#define I40E_AQ_EEE_100BASE_TX 0x0002 -#define I40E_AQ_EEE_1000BASE_T 0x0004 -#define I40E_AQ_EEE_10GBASE_T 0x0008 -#define I40E_AQ_EEE_1000BASE_KX 0x0010 -#define I40E_AQ_EEE_10GBASE_KX4 0x0020 -#define I40E_AQ_EEE_10GBASE_KR 0x0040 - __le32 eeer_val; - u8 d3_lpan; -#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 - u8 phy_type_ext; -#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 -#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 -#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 -#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 -#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10 -#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20 - u8 fec_cfg_curr_mod_ext_info; -#define I40E_AQ_ENABLE_FEC_KR 0x01 -#define I40E_AQ_ENABLE_FEC_RS 0x02 -#define I40E_AQ_REQUEST_FEC_KR 0x04 -#define I40E_AQ_REQUEST_FEC_RS 0x08 -#define I40E_AQ_ENABLE_FEC_AUTO 0x10 -#define I40E_AQ_FEC -#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0 -#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5 - - u8 ext_comp_code; - u8 phy_id[4]; - u8 module_type[3]; - u8 qualified_module_count; -#define I40E_AQ_PHY_MAX_QMS 16 - struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; -}; - -I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp); - -/* Set PHY Config (direct 0x0601) */ -struct i40e_aq_set_phy_config { /* same bits as above in all */ - __le32 phy_type; - u8 link_speed; - u8 abilities; -/* bits 0-2 use the values from get_phy_abilities_resp */ -#define I40E_AQ_PHY_ENABLE_LINK 0x08 -#define I40E_AQ_PHY_ENABLE_AN 0x10 -#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 - __le16 eee_capability; - __le32 eeer; - u8 low_power_ctrl; - u8 phy_type_ext; -#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 -#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 -#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 -#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 - u8 fec_config; -#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0) -#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1) -#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2) -#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3) -#define I40E_AQ_SET_FEC_AUTO BIT(4) -#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0 -#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT) - u8 reserved; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); - -/* Set MAC Config command data structure (direct 0x0603) */ -struct i40e_aq_set_mac_config { - __le16 max_frame_size; - u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 - u8 tx_timer_priority; /* bitmap */ - __le16 tx_timer_value; - __le16 fc_refresh_threshold; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); - -/* Restart Auto-Negotiation (direct 0x605) */ -struct i40e_aqc_set_link_restart_an { - u8 command; -#define I40E_AQ_PHY_RESTART_AN 0x02 -#define I40E_AQ_PHY_LINK_ENABLE 0x04 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); - -/* Get Link Status cmd & response data structure (direct 0x0607) */ -struct i40e_aqc_get_link_status { - __le16 command_flags; /* only field set on command */ -#define I40E_AQ_LSE_MASK 0x3 -#define I40E_AQ_LSE_NOP 0x0 -#define I40E_AQ_LSE_DISABLE 0x2 -#define I40E_AQ_LSE_ENABLE 0x3 -/* only response uses this flag */ -#define I40E_AQ_LSE_IS_ENABLED 0x1 - u8 phy_type; /* i40e_aq_phy_type */ - u8 link_speed; /* i40e_aq_link_speed */ - u8 link_info; -#define I40E_AQ_LINK_UP 0x01 /* obsolete */ -#define I40E_AQ_LINK_UP_FUNCTION 0x01 -#define I40E_AQ_LINK_FAULT 0x02 -#define I40E_AQ_LINK_FAULT_TX 0x04 -#define I40E_AQ_LINK_FAULT_RX 0x08 -#define I40E_AQ_LINK_FAULT_REMOTE 0x10 -#define I40E_AQ_LINK_UP_PORT 0x20 -#define I40E_AQ_MEDIA_AVAILABLE 0x40 -#define I40E_AQ_SIGNAL_DETECT 0x80 - u8 an_info; -#define I40E_AQ_AN_COMPLETED 0x01 -#define I40E_AQ_LP_AN_ABILITY 0x02 -#define I40E_AQ_PD_FAULT 0x04 -#define I40E_AQ_FEC_EN 0x08 -#define I40E_AQ_PHY_LOW_POWER 0x10 -#define I40E_AQ_LINK_PAUSE_TX 0x20 -#define I40E_AQ_LINK_PAUSE_RX 0x40 -#define I40E_AQ_QUALIFIED_MODULE 0x80 - u8 ext_info; -#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 -#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 -#define I40E_AQ_LINK_TX_SHIFT 0x02 -#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) -#define I40E_AQ_LINK_TX_ACTIVE 0x00 -#define I40E_AQ_LINK_TX_DRAINED 0x01 -#define I40E_AQ_LINK_TX_FLUSHED 0x03 -#define I40E_AQ_LINK_FORCED_40G 0x10 -/* 25G Error Codes */ -#define I40E_AQ_25G_NO_ERR 0X00 -#define I40E_AQ_25G_NOT_PRESENT 0X01 -#define I40E_AQ_25G_NVM_CRC_ERR 0X02 -#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03 -#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04 -#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05 - u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ -/* Since firmware API 1.7 loopback field keeps power class info as well */ -#define I40E_AQ_LOOPBACK_MASK 0x07 -#define I40E_AQ_PWR_CLASS_SHIFT_LB 6 -#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB) - __le16 max_frame_size; - u8 config; -#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01 -#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 -#define I40E_AQ_CONFIG_CRC_ENA 0x04 -#define I40E_AQ_CONFIG_PACING_MASK 0x78 - union { - struct { - u8 power_desc; -#define I40E_AQ_LINK_POWER_CLASS_1 0x00 -#define I40E_AQ_LINK_POWER_CLASS_2 0x01 -#define I40E_AQ_LINK_POWER_CLASS_3 0x02 -#define I40E_AQ_LINK_POWER_CLASS_4 0x03 -#define I40E_AQ_PWR_CLASS_MASK 0x03 - u8 reserved[4]; - }; - struct { - u8 link_type[4]; - u8 link_type_ext; - }; - }; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); - -/* Set event mask command (direct 0x613) */ -struct i40e_aqc_set_phy_int_mask { - u8 reserved[8]; - __le16 event_mask; -#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 -#define I40E_AQ_EVENT_MEDIA_NA 0x0004 -#define I40E_AQ_EVENT_LINK_FAULT 0x0008 -#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 -#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 -#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 -#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 -#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 -#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 - u8 reserved1[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); - -/* Get Local AN advt register (direct 0x0614) - * Set Local AN advt register (direct 0x0615) - * Get Link Partner AN advt register (direct 0x0616) - */ -struct i40e_aqc_an_advt_reg { - __le32 local_an_reg0; - __le16 local_an_reg1; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); - -/* Set Loopback mode (0x0618) */ -struct i40e_aqc_set_lb_mode { - __le16 lb_mode; -#define I40E_AQ_LB_PHY_LOCAL 0x01 -#define I40E_AQ_LB_PHY_REMOTE 0x02 -#define I40E_AQ_LB_MAC_LOCAL 0x04 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); - -/* Set PHY Debug command (0x0622) */ -struct i40e_aqc_set_phy_debug { - u8 command_flags; -#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ - I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 -#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); - -enum i40e_aq_phy_reg_type { - I40E_AQC_PHY_REG_INTERNAL = 0x1, - I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, - I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 -}; - -/* Run PHY Activity (0x0626) */ -struct i40e_aqc_run_phy_activity { - __le16 activity_id; - u8 flags; - u8 reserved1; - __le32 control; - __le32 data; - u8 reserved2[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); - -/* Set PHY Register command (0x0628) */ -/* Get PHY Register command (0x0629) */ -struct i40e_aqc_phy_register_access { - u8 phy_interface; -#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0 -#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1 -#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 - u8 dev_address; - u8 reserved1[2]; - __le32 reg_address; - __le32 reg_value; - u8 reserved2[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); - -/* NVM Read command (indirect 0x0701) - * NVM Erase commands (direct 0x0702) - * NVM Update commands (indirect 0x0703) - */ -struct i40e_aqc_nvm_update { - u8 command_flags; -#define I40E_AQ_NVM_LAST_CMD 0x01 -#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20 -#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01 - u8 module_pointer; - __le16 length; - __le32 offset; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); - -/* NVM Config Read (indirect 0x0704) */ -struct i40e_aqc_nvm_config_read { - __le16 cmd_flags; -#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 -#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 -#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 - __le16 element_count; - __le16 element_id; /* Feature/field ID */ - __le16 element_id_msw; /* MSWord of field ID */ - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); - -/* NVM Config Write (indirect 0x0705) */ -struct i40e_aqc_nvm_config_write { - __le16 cmd_flags; - __le16 element_count; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); - -/* Used for 0x0704 as well as for 0x0705 commands */ -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ - BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) -#define I40E_AQ_ANVM_FEATURE 0 -#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT) -struct i40e_aqc_nvm_config_data_feature { - __le16 feature_id; -#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 -#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 -#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 - __le16 feature_options; - __le16 feature_selection; -}; - -I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature); - -struct i40e_aqc_nvm_config_data_immediate_field { - __le32 field_id; - __le32 field_value; - __le16 field_options; - __le16 reserved; -}; - -I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); - -/* OEM Post Update (indirect 0x0720) - * no command data struct used - */ - struct i40e_aqc_nvm_oem_post_update { -#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 - u8 sel_data; - u8 reserved[7]; -}; - -I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); - -struct i40e_aqc_nvm_oem_post_update_buffer { - u8 str_len; - u8 dev_addr; - __le16 eeprom_addr; - u8 data[36]; -}; - -I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); - -/* Thermal Sensor (indirect 0x0721) - * read or set thermal sensor configs and values - * takes a sensor and command specific data buffer, not detailed here - */ -struct i40e_aqc_thermal_sensor { - u8 sensor_action; -#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 -#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 -#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); - -/* Send to PF command (indirect 0x0801) id is only used by PF - * Send to VF command (indirect 0x0802) id is only used by PF - * Send to Peer PF command (indirect 0x0803) - */ -struct i40e_aqc_pf_vf_message { - __le32 id; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); - -/* Alternate structure */ - -/* Direct write (direct 0x0900) - * Direct read (direct 0x0902) - */ -struct i40e_aqc_alternate_write { - __le32 address0; - __le32 data0; - __le32 address1; - __le32 data1; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); - -/* Indirect write (indirect 0x0901) - * Indirect read (indirect 0x0903) - */ - -struct i40e_aqc_alternate_ind_write { - __le32 address; - __le32 length; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); - -/* Done alternate write (direct 0x0904) - * uses i40e_aq_desc - */ -struct i40e_aqc_alternate_write_done { - __le16 cmd_flags; -#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 -#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 -#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 -#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); - -/* Set OEM mode (direct 0x0905) */ -struct i40e_aqc_alternate_set_mode { - __le32 mode; -#define I40E_AQ_ALTERNATE_MODE_NONE 0 -#define I40E_AQ_ALTERNATE_MODE_OEM 1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); - -/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ - -/* async events 0x10xx */ - -/* Lan Queue Overflow Event (direct, 0x1001) */ -struct i40e_aqc_lan_overflow { - __le32 prtdcb_rupto; - __le32 otx_ctl; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); - -/* Get LLDP MIB (indirect 0x0A00) */ -struct i40e_aqc_lldp_get_mib { - u8 type; - u8 reserved1; -#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 -#define I40E_AQ_LLDP_MIB_LOCAL 0x0 -#define I40E_AQ_LLDP_MIB_REMOTE 0x1 -#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC -#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 -#define I40E_AQ_LLDP_TX_SHIFT 0x4 -#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) -/* TX pause flags use I40E_AQ_LINK_TX_* above */ - __le16 local_len; - __le16 remote_len; - u8 reserved2[2]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); - -/* Configure LLDP MIB Change Event (direct 0x0A01) - * also used for the event (with type in the command field) - */ -struct i40e_aqc_lldp_update_mib { - u8 command; -#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 -#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); - -/* Add LLDP TLV (indirect 0x0A02) - * Delete LLDP TLV (indirect 0x0A04) - */ -struct i40e_aqc_lldp_add_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved1[1]; - __le16 len; - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); - -/* Update LLDP TLV (indirect 0x0A03) */ -struct i40e_aqc_lldp_update_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved; - __le16 old_len; - __le16 new_offset; - __le16 new_len; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); - -/* Stop LLDP (direct 0x0A05) */ -struct i40e_aqc_lldp_stop { - u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 -#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); - -/* Start LLDP (direct 0x0A06) */ - -struct i40e_aqc_lldp_start { - u8 command; -#define I40E_AQ_LLDP_AGENT_START 0x1 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); - -/* Set DCB (direct 0x0303) */ -struct i40e_aqc_set_dcb_parameters { - u8 command; -#define I40E_AQ_DCB_SET_AGENT 0x1 -#define I40E_DCB_VALID 0x1 - u8 valid_flags; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters); - -/* Apply MIB changes (0x0A07) - * uses the generic struc as it contains no data - */ - -/* Add Udp Tunnel command and completion (direct 0x0B00) */ -struct i40e_aqc_add_udp_tunnel { - __le16 udp_port; - u8 reserved0[3]; - u8 protocol_type; -#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 -#define I40E_AQC_TUNNEL_TYPE_NGE 0x01 -#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 -#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 - u8 reserved1[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); - -struct i40e_aqc_add_udp_tunnel_completion { - __le16 udp_port; - u8 filter_entry_index; - u8 multiple_pfs; -#define I40E_AQC_SINGLE_PF 0x0 -#define I40E_AQC_MULTIPLE_PFS 0x1 - u8 total_filters; - u8 reserved[11]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); - -/* remove UDP Tunnel command (0x0B01) */ -struct i40e_aqc_remove_udp_tunnel { - u8 reserved[2]; - u8 index; /* 0 to 15 */ - u8 reserved2[13]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); - -struct i40e_aqc_del_udp_tunnel_completion { - __le16 udp_port; - u8 index; /* 0 to 15 */ - u8 multiple_pfs; - u8 total_filters_used; - u8 reserved1[11]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); - -struct i40e_aqc_get_set_rss_key { -#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) -#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 -#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ - I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) - __le16 vsi_id; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); - -struct i40e_aqc_get_set_rss_key_data { - u8 standard_rss_key[0x28]; - u8 extended_hash_key[0xc]; -}; - -I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); - -struct i40e_aqc_get_set_rss_lut { -#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) -#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ - I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) - __le16 vsi_id; -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ - BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) - -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 - __le16 flags; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); - -/* tunnel key structure 0x0B10 */ - -struct i40e_aqc_tunnel_key_structure_A0 { - __le16 key1_off; - __le16 key1_len; - __le16 key2_off; - __le16 key2_len; - __le16 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 -/* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 - u8 resreved[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0); - -struct i40e_aqc_tunnel_key_structure { - u8 key1_off; - u8 key2_off; - u8 key1_len; /* 0 to 15 */ - u8 key2_len; /* 0 to 15 */ - u8 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 -/* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 - u8 network_key_index; -#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 -#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 -#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 -#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); - -/* OEM mode commands (direct 0xFE0x) */ -struct i40e_aqc_oem_param_change { - __le32 param_type; -#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 -#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 -#define I40E_AQ_OEM_PARAM_MAC 2 - __le32 param_value1; - __le16 param_value2; - u8 reserved[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); - -struct i40e_aqc_oem_state_change { - __le32 state; -#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 -#define I40E_AQ_OEM_STATE_LINK_UP 0x1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); - -/* Initialize OCSD (0xFE02, direct) */ -struct i40e_aqc_opc_oem_ocsd_initialize { - u8 type_status; - u8 reserved1[3]; - __le32 ocsd_memory_block_addr_high; - __le32 ocsd_memory_block_addr_low; - __le32 requested_update_interval; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize); - -/* Initialize OCBB (0xFE03, direct) */ -struct i40e_aqc_opc_oem_ocbb_initialize { - u8 type_status; - u8 reserved1[3]; - __le32 ocbb_memory_block_addr_high; - __le32 ocbb_memory_block_addr_low; - u8 reserved2[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); - -/* debug commands */ - -/* get device id (0xFF00) uses the generic structure */ - -/* set test more (0xFF01, internal) */ - -struct i40e_acq_set_test_mode { - u8 mode; -#define I40E_AQ_TEST_PARTIAL 0 -#define I40E_AQ_TEST_FULL 1 -#define I40E_AQ_TEST_NVM 2 - u8 reserved[3]; - u8 command; -#define I40E_AQ_TEST_OPEN 0 -#define I40E_AQ_TEST_CLOSE 1 -#define I40E_AQ_TEST_INC 2 - u8 reserved2[3]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); - -/* Debug Read Register command (0xFF03) - * Debug Write Register command (0xFF04) - */ -struct i40e_aqc_debug_reg_read_write { - __le32 reserved; - __le32 address; - __le32 value_high; - __le32 value_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); - -/* Scatter/gather Reg Read (indirect 0xFF05) - * Scatter/gather Reg Write (indirect 0xFF06) - */ - -/* i40e_aq_desc is used for the command */ -struct i40e_aqc_debug_reg_sg_element_data { - __le32 address; - __le32 value; -}; - -/* Debug Modify register (direct 0xFF07) */ -struct i40e_aqc_debug_modify_reg { - __le32 address; - __le32 value; - __le32 clear_mask; - __le32 set_mask; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); - -/* dump internal data (0xFF08, indirect) */ - -#define I40E_AQ_CLUSTER_ID_AUX 0 -#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 -#define I40E_AQ_CLUSTER_ID_TXSCHED 2 -#define I40E_AQ_CLUSTER_ID_HMC 3 -#define I40E_AQ_CLUSTER_ID_MAC0 4 -#define I40E_AQ_CLUSTER_ID_MAC1 5 -#define I40E_AQ_CLUSTER_ID_MAC2 6 -#define I40E_AQ_CLUSTER_ID_MAC3 7 -#define I40E_AQ_CLUSTER_ID_DCB 8 -#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 -#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 -#define I40E_AQ_CLUSTER_ID_ALTRAM 11 - -struct i40e_aqc_debug_dump_internals { - u8 cluster_id; - u8 table_id; - __le16 data_size; - __le32 idx; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); - -struct i40e_aqc_debug_modify_internals { - u8 cluster_id; - u8 cluster_specific_params[7]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); - -#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h deleted file mode 100644 index cb8689222c8b..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_ALLOC_H_ -#define _I40E_ALLOC_H_ - -struct i40e_hw; - -/* Memory allocation types */ -enum i40e_memory_type { - i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */ - i40e_mem_asq_buf = 1, - i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */ - i40e_mem_arq_ring = 3, /* ARQ descriptor ring */ - i40e_mem_atq_ring = 4, /* ATQ descriptor ring */ - i40e_mem_pd = 5, /* Page Descriptor */ - i40e_mem_bp = 6, /* Backing Page - 4KB */ - i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ - i40e_mem_reserved -}; - -/* prototype for functions used for dynamic memory allocation */ -i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw, - struct i40e_dma_mem *mem, - enum i40e_memory_type type, - u64 size, u32 alignment); -i40e_status i40e_free_dma_mem(struct i40e_hw *hw, - struct i40e_dma_mem *mem); -i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw, - struct i40e_virt_mem *mem, - u32 size); -i40e_status i40e_free_virt_mem(struct i40e_hw *hw, - struct i40e_virt_mem *mem); - -#endif /* _I40E_ALLOC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c deleted file mode 100644 index eea280ba411e..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ /dev/null @@ -1,1320 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#include "i40e_type.h" -#include "i40e_adminq.h" -#include "i40e_prototype.h" -#include <linux/avf/virtchnl.h> - -/** - * i40e_set_mac_type - Sets MAC type - * @hw: pointer to the HW structure - * - * This function sets the mac type of the adapter based on the - * vendor ID and device ID stored in the hw structure. - **/ -i40e_status i40e_set_mac_type(struct i40e_hw *hw) -{ - i40e_status status = 0; - - if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { - switch (hw->device_id) { - case I40E_DEV_ID_SFP_XL710: - case I40E_DEV_ID_QEMU: - case I40E_DEV_ID_KX_B: - case I40E_DEV_ID_KX_C: - case I40E_DEV_ID_QSFP_A: - case I40E_DEV_ID_QSFP_B: - case I40E_DEV_ID_QSFP_C: - case I40E_DEV_ID_10G_BASE_T: - case I40E_DEV_ID_10G_BASE_T4: - case I40E_DEV_ID_20G_KR2: - case I40E_DEV_ID_20G_KR2_A: - case I40E_DEV_ID_25G_B: - case I40E_DEV_ID_25G_SFP28: - hw->mac.type = I40E_MAC_XL710; - break; - case I40E_DEV_ID_SFP_X722: - case I40E_DEV_ID_1G_BASE_T_X722: - case I40E_DEV_ID_10G_BASE_T_X722: - case I40E_DEV_ID_SFP_I_X722: - hw->mac.type = I40E_MAC_X722; - break; - case I40E_DEV_ID_X722_VF: - hw->mac.type = I40E_MAC_X722_VF; - break; - case I40E_DEV_ID_VF: - case I40E_DEV_ID_VF_HV: - case I40E_DEV_ID_ADAPTIVE_VF: - hw->mac.type = I40E_MAC_VF; - break; - default: - hw->mac.type = I40E_MAC_GENERIC; - break; - } - } else { - status = I40E_ERR_DEVICE_NOT_SUPPORTED; - } - - hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", - hw->mac.type, status); - return status; -} - -/** - * i40evf_aq_str - convert AQ err code to a string - * @hw: pointer to the HW structure - * @aq_err: the AQ error code to convert - **/ -const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) -{ - switch (aq_err) { - case I40E_AQ_RC_OK: - return "OK"; - case I40E_AQ_RC_EPERM: - return "I40E_AQ_RC_EPERM"; - case I40E_AQ_RC_ENOENT: - return "I40E_AQ_RC_ENOENT"; - case I40E_AQ_RC_ESRCH: - return "I40E_AQ_RC_ESRCH"; - case I40E_AQ_RC_EINTR: - return "I40E_AQ_RC_EINTR"; - case I40E_AQ_RC_EIO: - return "I40E_AQ_RC_EIO"; - case I40E_AQ_RC_ENXIO: - return "I40E_AQ_RC_ENXIO"; - case I40E_AQ_RC_E2BIG: - return "I40E_AQ_RC_E2BIG"; - case I40E_AQ_RC_EAGAIN: - return "I40E_AQ_RC_EAGAIN"; - case I40E_AQ_RC_ENOMEM: - return "I40E_AQ_RC_ENOMEM"; - case I40E_AQ_RC_EACCES: - return "I40E_AQ_RC_EACCES"; - case I40E_AQ_RC_EFAULT: - return "I40E_AQ_RC_EFAULT"; - case I40E_AQ_RC_EBUSY: - return "I40E_AQ_RC_EBUSY"; - case I40E_AQ_RC_EEXIST: - return "I40E_AQ_RC_EEXIST"; - case I40E_AQ_RC_EINVAL: - return "I40E_AQ_RC_EINVAL"; - case I40E_AQ_RC_ENOTTY: - return "I40E_AQ_RC_ENOTTY"; - case I40E_AQ_RC_ENOSPC: - return "I40E_AQ_RC_ENOSPC"; - case I40E_AQ_RC_ENOSYS: - return "I40E_AQ_RC_ENOSYS"; - case I40E_AQ_RC_ERANGE: - return "I40E_AQ_RC_ERANGE"; - case I40E_AQ_RC_EFLUSHED: - return "I40E_AQ_RC_EFLUSHED"; - case I40E_AQ_RC_BAD_ADDR: - return "I40E_AQ_RC_BAD_ADDR"; - case I40E_AQ_RC_EMODE: - return "I40E_AQ_RC_EMODE"; - case I40E_AQ_RC_EFBIG: - return "I40E_AQ_RC_EFBIG"; - } - - snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); - return hw->err_str; -} - -/** - * i40evf_stat_str - convert status err code to a string - * @hw: pointer to the HW structure - * @stat_err: the status error code to convert - **/ -const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err) -{ - switch (stat_err) { - case 0: - return "OK"; - case I40E_ERR_NVM: - return "I40E_ERR_NVM"; - case I40E_ERR_NVM_CHECKSUM: - return "I40E_ERR_NVM_CHECKSUM"; - case I40E_ERR_PHY: - return "I40E_ERR_PHY"; - case I40E_ERR_CONFIG: - return "I40E_ERR_CONFIG"; - case I40E_ERR_PARAM: - return "I40E_ERR_PARAM"; - case I40E_ERR_MAC_TYPE: - return "I40E_ERR_MAC_TYPE"; - case I40E_ERR_UNKNOWN_PHY: - return "I40E_ERR_UNKNOWN_PHY"; - case I40E_ERR_LINK_SETUP: - return "I40E_ERR_LINK_SETUP"; - case I40E_ERR_ADAPTER_STOPPED: - return "I40E_ERR_ADAPTER_STOPPED"; - case I40E_ERR_INVALID_MAC_ADDR: - return "I40E_ERR_INVALID_MAC_ADDR"; - case I40E_ERR_DEVICE_NOT_SUPPORTED: - return "I40E_ERR_DEVICE_NOT_SUPPORTED"; - case I40E_ERR_MASTER_REQUESTS_PENDING: - return "I40E_ERR_MASTER_REQUESTS_PENDING"; - case I40E_ERR_INVALID_LINK_SETTINGS: - return "I40E_ERR_INVALID_LINK_SETTINGS"; - case I40E_ERR_AUTONEG_NOT_COMPLETE: - return "I40E_ERR_AUTONEG_NOT_COMPLETE"; - case I40E_ERR_RESET_FAILED: - return "I40E_ERR_RESET_FAILED"; - case I40E_ERR_SWFW_SYNC: - return "I40E_ERR_SWFW_SYNC"; - case I40E_ERR_NO_AVAILABLE_VSI: - return "I40E_ERR_NO_AVAILABLE_VSI"; - case I40E_ERR_NO_MEMORY: - return "I40E_ERR_NO_MEMORY"; - case I40E_ERR_BAD_PTR: - return "I40E_ERR_BAD_PTR"; - case I40E_ERR_RING_FULL: - return "I40E_ERR_RING_FULL"; - case I40E_ERR_INVALID_PD_ID: - return "I40E_ERR_INVALID_PD_ID"; - case I40E_ERR_INVALID_QP_ID: - return "I40E_ERR_INVALID_QP_ID"; - case I40E_ERR_INVALID_CQ_ID: - return "I40E_ERR_INVALID_CQ_ID"; - case I40E_ERR_INVALID_CEQ_ID: - return "I40E_ERR_INVALID_CEQ_ID"; - case I40E_ERR_INVALID_AEQ_ID: - return "I40E_ERR_INVALID_AEQ_ID"; - case I40E_ERR_INVALID_SIZE: - return "I40E_ERR_INVALID_SIZE"; - case I40E_ERR_INVALID_ARP_INDEX: - return "I40E_ERR_INVALID_ARP_INDEX"; - case I40E_ERR_INVALID_FPM_FUNC_ID: - return "I40E_ERR_INVALID_FPM_FUNC_ID"; - case I40E_ERR_QP_INVALID_MSG_SIZE: - return "I40E_ERR_QP_INVALID_MSG_SIZE"; - case I40E_ERR_QP_TOOMANY_WRS_POSTED: - return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; - case I40E_ERR_INVALID_FRAG_COUNT: - return "I40E_ERR_INVALID_FRAG_COUNT"; - case I40E_ERR_QUEUE_EMPTY: - return "I40E_ERR_QUEUE_EMPTY"; - case I40E_ERR_INVALID_ALIGNMENT: - return "I40E_ERR_INVALID_ALIGNMENT"; - case I40E_ERR_FLUSHED_QUEUE: - return "I40E_ERR_FLUSHED_QUEUE"; - case I40E_ERR_INVALID_PUSH_PAGE_INDEX: - return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; - case I40E_ERR_INVALID_IMM_DATA_SIZE: - return "I40E_ERR_INVALID_IMM_DATA_SIZE"; - case I40E_ERR_TIMEOUT: - return "I40E_ERR_TIMEOUT"; - case I40E_ERR_OPCODE_MISMATCH: - return "I40E_ERR_OPCODE_MISMATCH"; - case I40E_ERR_CQP_COMPL_ERROR: - return "I40E_ERR_CQP_COMPL_ERROR"; - case I40E_ERR_INVALID_VF_ID: - return "I40E_ERR_INVALID_VF_ID"; - case I40E_ERR_INVALID_HMCFN_ID: - return "I40E_ERR_INVALID_HMCFN_ID"; - case I40E_ERR_BACKING_PAGE_ERROR: - return "I40E_ERR_BACKING_PAGE_ERROR"; - case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: - return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; - case I40E_ERR_INVALID_PBLE_INDEX: - return "I40E_ERR_INVALID_PBLE_INDEX"; - case I40E_ERR_INVALID_SD_INDEX: - return "I40E_ERR_INVALID_SD_INDEX"; - case I40E_ERR_INVALID_PAGE_DESC_INDEX: - return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; - case I40E_ERR_INVALID_SD_TYPE: - return "I40E_ERR_INVALID_SD_TYPE"; - case I40E_ERR_MEMCPY_FAILED: - return "I40E_ERR_MEMCPY_FAILED"; - case I40E_ERR_INVALID_HMC_OBJ_INDEX: - return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; - case I40E_ERR_INVALID_HMC_OBJ_COUNT: - return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; - case I40E_ERR_INVALID_SRQ_ARM_LIMIT: - return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; - case I40E_ERR_SRQ_ENABLED: - return "I40E_ERR_SRQ_ENABLED"; - case I40E_ERR_ADMIN_QUEUE_ERROR: - return "I40E_ERR_ADMIN_QUEUE_ERROR"; - case I40E_ERR_ADMIN_QUEUE_TIMEOUT: - return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; - case I40E_ERR_BUF_TOO_SHORT: - return "I40E_ERR_BUF_TOO_SHORT"; - case I40E_ERR_ADMIN_QUEUE_FULL: - return "I40E_ERR_ADMIN_QUEUE_FULL"; - case I40E_ERR_ADMIN_QUEUE_NO_WORK: - return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; - case I40E_ERR_BAD_IWARP_CQE: - return "I40E_ERR_BAD_IWARP_CQE"; - case I40E_ERR_NVM_BLANK_MODE: - return "I40E_ERR_NVM_BLANK_MODE"; - case I40E_ERR_NOT_IMPLEMENTED: - return "I40E_ERR_NOT_IMPLEMENTED"; - case I40E_ERR_PE_DOORBELL_NOT_ENABLED: - return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; - case I40E_ERR_DIAG_TEST_FAILED: - return "I40E_ERR_DIAG_TEST_FAILED"; - case I40E_ERR_NOT_READY: - return "I40E_ERR_NOT_READY"; - case I40E_NOT_SUPPORTED: - return "I40E_NOT_SUPPORTED"; - case I40E_ERR_FIRMWARE_API_VERSION: - return "I40E_ERR_FIRMWARE_API_VERSION"; - case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: - return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; - } - - snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); - return hw->err_str; -} - -/** - * i40evf_debug_aq - * @hw: debug mask related to admin queue - * @mask: debug mask - * @desc: pointer to admin queue descriptor - * @buffer: pointer to command buffer - * @buf_len: max length of buffer - * - * Dumps debug log about adminq command with descriptor contents. - **/ -void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, - void *buffer, u16 buf_len) -{ - struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; - u8 *buf = (u8 *)buffer; - - if ((!(mask & hw->debug_mask)) || (desc == NULL)) - return; - - i40e_debug(hw, mask, - "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - le16_to_cpu(aq_desc->opcode), - le16_to_cpu(aq_desc->flags), - le16_to_cpu(aq_desc->datalen), - le16_to_cpu(aq_desc->retval)); - i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", - le32_to_cpu(aq_desc->cookie_high), - le32_to_cpu(aq_desc->cookie_low)); - i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", - le32_to_cpu(aq_desc->params.internal.param0), - le32_to_cpu(aq_desc->params.internal.param1)); - i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", - le32_to_cpu(aq_desc->params.external.addr_high), - le32_to_cpu(aq_desc->params.external.addr_low)); - - if ((buffer != NULL) && (aq_desc->datalen != 0)) { - u16 len = le16_to_cpu(aq_desc->datalen); - - i40e_debug(hw, mask, "AQ CMD Buffer:\n"); - if (buf_len < len) - len = buf_len; - /* write the full 16-byte chunks */ - if (hw->debug_mask & mask) { - char prefix[27]; - - snprintf(prefix, sizeof(prefix), - "i40evf %02x:%02x.%x: \t0x", - hw->bus.bus_id, - hw->bus.device, - hw->bus.func); - - print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, - 16, 1, buf, len, false); - } - } -} - -/** - * i40evf_check_asq_alive - * @hw: pointer to the hw struct - * - * Returns true if Queue is enabled else false. - **/ -bool i40evf_check_asq_alive(struct i40e_hw *hw) -{ - if (hw->aq.asq.len) - return !!(rd32(hw, hw->aq.asq.len) & - I40E_VF_ATQLEN1_ATQENABLE_MASK); - else - return false; -} - -/** - * i40evf_aq_queue_shutdown - * @hw: pointer to the hw struct - * @unloading: is the driver unloading itself - * - * Tell the Firmware that we're shutting down the AdminQ and whether - * or not the driver is unloading as well. - **/ -i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, - bool unloading) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_queue_shutdown *cmd = - (struct i40e_aqc_queue_shutdown *)&desc.params.raw; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_queue_shutdown); - - if (unloading) - cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); - status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL); - - return status; -} - -/** - * i40e_aq_get_set_rss_lut - * @hw: pointer to the hardware structure - * @vsi_id: vsi fw index - * @pf_lut: for PF table set true, for VSI table set false - * @lut: pointer to the lut buffer provided by the caller - * @lut_size: size of the lut buffer - * @set: set true to set the table, false to get the table - * - * Internal function to get or set RSS look up table - **/ -static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, - u16 vsi_id, bool pf_lut, - u8 *lut, u16 lut_size, - bool set) -{ - i40e_status status; - struct i40e_aq_desc desc; - struct i40e_aqc_get_set_rss_lut *cmd_resp = - (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; - - if (set) - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_rss_lut); - else - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_rss_lut); - - /* Indirect command */ - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); - - cmd_resp->vsi_id = - cpu_to_le16((u16)((vsi_id << - I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & - I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); - cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); - - if (pf_lut) - cmd_resp->flags |= cpu_to_le16((u16) - ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); - else - cmd_resp->flags |= cpu_to_le16((u16) - ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); - - status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL); - - return status; -} - -/** - * i40evf_aq_get_rss_lut - * @hw: pointer to the hardware structure - * @vsi_id: vsi fw index - * @pf_lut: for PF table set true, for VSI table set false - * @lut: pointer to the lut buffer provided by the caller - * @lut_size: size of the lut buffer - * - * get the RSS lookup table, PF or VSI type - **/ -i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, - bool pf_lut, u8 *lut, u16 lut_size) -{ - return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, - false); -} - -/** - * i40evf_aq_set_rss_lut - * @hw: pointer to the hardware structure - * @vsi_id: vsi fw index - * @pf_lut: for PF table set true, for VSI table set false - * @lut: pointer to the lut buffer provided by the caller - * @lut_size: size of the lut buffer - * - * set the RSS lookup table, PF or VSI type - **/ -i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, - bool pf_lut, u8 *lut, u16 lut_size) -{ - return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); -} - -/** - * i40e_aq_get_set_rss_key - * @hw: pointer to the hw struct - * @vsi_id: vsi fw index - * @key: pointer to key info struct - * @set: set true to set the key, false to get the key - * - * get the RSS key per VSI - **/ -static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, - u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key, - bool set) -{ - i40e_status status; - struct i40e_aq_desc desc; - struct i40e_aqc_get_set_rss_key *cmd_resp = - (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; - u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); - - if (set) - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_rss_key); - else - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_rss_key); - - /* Indirect command */ - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); - - cmd_resp->vsi_id = - cpu_to_le16((u16)((vsi_id << - I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & - I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); - cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); - - status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL); - - return status; -} - -/** - * i40evf_aq_get_rss_key - * @hw: pointer to the hw struct - * @vsi_id: vsi fw index - * @key: pointer to key info struct - * - **/ -i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, - u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key) -{ - return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); -} - -/** - * i40evf_aq_set_rss_key - * @hw: pointer to the hw struct - * @vsi_id: vsi fw index - * @key: pointer to key info struct - * - * set the RSS key per VSI - **/ -i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, - u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key) -{ - return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); -} - - -/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the - * hardware to a bit-field that can be used by SW to more easily determine the - * packet type. - * - * Macros are used to shorten the table lines and make this table human - * readable. - * - * We store the PTYPE in the top byte of the bit field - this is just so that - * we can check that the table doesn't have a row missing, as the index into - * the table should be the PTYPE. - * - * Typical work flow: - * - * IF NOT i40evf_ptype_lookup[ptype].known - * THEN - * Packet is unknown - * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP - * Use the rest of the fields to look at the tunnels, inner protocols, etc - * ELSE - * Use the enum i40e_rx_l2_ptype to decode the packet type - * ENDIF - */ - -/* macro to make the table lines short */ -#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - { PTYPE, \ - 1, \ - I40E_RX_PTYPE_OUTER_##OUTER_IP, \ - I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ - I40E_RX_PTYPE_##OUTER_FRAG, \ - I40E_RX_PTYPE_TUNNEL_##T, \ - I40E_RX_PTYPE_TUNNEL_END_##TE, \ - I40E_RX_PTYPE_##TEF, \ - I40E_RX_PTYPE_INNER_PROT_##I, \ - I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } - -#define I40E_PTT_UNUSED_ENTRY(PTYPE) \ - { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } - -/* shorter macros makes the table fit but are terse */ -#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG -#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG -#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC - -/* Lookup table mapping the HW PTYPE to the bit field for decoding */ -struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { - /* L2 Packet types */ - I40E_PTT_UNUSED_ENTRY(0), - I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), - I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT_UNUSED_ENTRY(4), - I40E_PTT_UNUSED_ENTRY(5), - I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT_UNUSED_ENTRY(8), - I40E_PTT_UNUSED_ENTRY(9), - I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - - /* Non Tunneled IPv4 */ - I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(25), - I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), - I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), - I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv4 --> IPv4 */ - I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(32), - I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> IPv6 */ - I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(39), - I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT */ - I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> IPv4 */ - I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(47), - I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> IPv6 */ - I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(54), - I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC */ - I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ - I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(62), - I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ - I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(69), - I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC/VLAN */ - I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ - I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(77), - I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ - I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(84), - I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* Non Tunneled IPv6 */ - I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), - I40E_PTT_UNUSED_ENTRY(91), - I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), - I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), - I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv6 --> IPv4 */ - I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(98), - I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> IPv6 */ - I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(105), - I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT */ - I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> IPv4 */ - I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(113), - I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> IPv6 */ - I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(120), - I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC */ - I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ - I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(128), - I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ - I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(135), - I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN */ - I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ - I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(143), - I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ - I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(150), - I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* unused entries */ - I40E_PTT_UNUSED_ENTRY(154), - I40E_PTT_UNUSED_ENTRY(155), - I40E_PTT_UNUSED_ENTRY(156), - I40E_PTT_UNUSED_ENTRY(157), - I40E_PTT_UNUSED_ENTRY(158), - I40E_PTT_UNUSED_ENTRY(159), - - I40E_PTT_UNUSED_ENTRY(160), - I40E_PTT_UNUSED_ENTRY(161), - I40E_PTT_UNUSED_ENTRY(162), - I40E_PTT_UNUSED_ENTRY(163), - I40E_PTT_UNUSED_ENTRY(164), - I40E_PTT_UNUSED_ENTRY(165), - I40E_PTT_UNUSED_ENTRY(166), - I40E_PTT_UNUSED_ENTRY(167), - I40E_PTT_UNUSED_ENTRY(168), - I40E_PTT_UNUSED_ENTRY(169), - - I40E_PTT_UNUSED_ENTRY(170), - I40E_PTT_UNUSED_ENTRY(171), - I40E_PTT_UNUSED_ENTRY(172), - I40E_PTT_UNUSED_ENTRY(173), - I40E_PTT_UNUSED_ENTRY(174), - I40E_PTT_UNUSED_ENTRY(175), - I40E_PTT_UNUSED_ENTRY(176), - I40E_PTT_UNUSED_ENTRY(177), - I40E_PTT_UNUSED_ENTRY(178), - I40E_PTT_UNUSED_ENTRY(179), - - I40E_PTT_UNUSED_ENTRY(180), - I40E_PTT_UNUSED_ENTRY(181), - I40E_PTT_UNUSED_ENTRY(182), - I40E_PTT_UNUSED_ENTRY(183), - I40E_PTT_UNUSED_ENTRY(184), - I40E_PTT_UNUSED_ENTRY(185), - I40E_PTT_UNUSED_ENTRY(186), - I40E_PTT_UNUSED_ENTRY(187), - I40E_PTT_UNUSED_ENTRY(188), - I40E_PTT_UNUSED_ENTRY(189), - - I40E_PTT_UNUSED_ENTRY(190), - I40E_PTT_UNUSED_ENTRY(191), - I40E_PTT_UNUSED_ENTRY(192), - I40E_PTT_UNUSED_ENTRY(193), - I40E_PTT_UNUSED_ENTRY(194), - I40E_PTT_UNUSED_ENTRY(195), - I40E_PTT_UNUSED_ENTRY(196), - I40E_PTT_UNUSED_ENTRY(197), - I40E_PTT_UNUSED_ENTRY(198), - I40E_PTT_UNUSED_ENTRY(199), - - I40E_PTT_UNUSED_ENTRY(200), - I40E_PTT_UNUSED_ENTRY(201), - I40E_PTT_UNUSED_ENTRY(202), - I40E_PTT_UNUSED_ENTRY(203), - I40E_PTT_UNUSED_ENTRY(204), - I40E_PTT_UNUSED_ENTRY(205), - I40E_PTT_UNUSED_ENTRY(206), - I40E_PTT_UNUSED_ENTRY(207), - I40E_PTT_UNUSED_ENTRY(208), - I40E_PTT_UNUSED_ENTRY(209), - - I40E_PTT_UNUSED_ENTRY(210), - I40E_PTT_UNUSED_ENTRY(211), - I40E_PTT_UNUSED_ENTRY(212), - I40E_PTT_UNUSED_ENTRY(213), - I40E_PTT_UNUSED_ENTRY(214), - I40E_PTT_UNUSED_ENTRY(215), - I40E_PTT_UNUSED_ENTRY(216), - I40E_PTT_UNUSED_ENTRY(217), - I40E_PTT_UNUSED_ENTRY(218), - I40E_PTT_UNUSED_ENTRY(219), - - I40E_PTT_UNUSED_ENTRY(220), - I40E_PTT_UNUSED_ENTRY(221), - I40E_PTT_UNUSED_ENTRY(222), - I40E_PTT_UNUSED_ENTRY(223), - I40E_PTT_UNUSED_ENTRY(224), - I40E_PTT_UNUSED_ENTRY(225), - I40E_PTT_UNUSED_ENTRY(226), - I40E_PTT_UNUSED_ENTRY(227), - I40E_PTT_UNUSED_ENTRY(228), - I40E_PTT_UNUSED_ENTRY(229), - - I40E_PTT_UNUSED_ENTRY(230), - I40E_PTT_UNUSED_ENTRY(231), - I40E_PTT_UNUSED_ENTRY(232), - I40E_PTT_UNUSED_ENTRY(233), - I40E_PTT_UNUSED_ENTRY(234), - I40E_PTT_UNUSED_ENTRY(235), - I40E_PTT_UNUSED_ENTRY(236), - I40E_PTT_UNUSED_ENTRY(237), - I40E_PTT_UNUSED_ENTRY(238), - I40E_PTT_UNUSED_ENTRY(239), - - I40E_PTT_UNUSED_ENTRY(240), - I40E_PTT_UNUSED_ENTRY(241), - I40E_PTT_UNUSED_ENTRY(242), - I40E_PTT_UNUSED_ENTRY(243), - I40E_PTT_UNUSED_ENTRY(244), - I40E_PTT_UNUSED_ENTRY(245), - I40E_PTT_UNUSED_ENTRY(246), - I40E_PTT_UNUSED_ENTRY(247), - I40E_PTT_UNUSED_ENTRY(248), - I40E_PTT_UNUSED_ENTRY(249), - - I40E_PTT_UNUSED_ENTRY(250), - I40E_PTT_UNUSED_ENTRY(251), - I40E_PTT_UNUSED_ENTRY(252), - I40E_PTT_UNUSED_ENTRY(253), - I40E_PTT_UNUSED_ENTRY(254), - I40E_PTT_UNUSED_ENTRY(255) -}; - -/** - * i40evf_aq_rx_ctl_read_register - use FW to read from an Rx control register - * @hw: pointer to the hw struct - * @reg_addr: register address - * @reg_val: ptr to register value - * @cmd_details: pointer to command details structure or NULL - * - * Use the firmware to read the Rx control register, - * especially useful if the Rx unit is under heavy pressure - **/ -i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = - (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; - i40e_status status; - - if (!reg_val) - return I40E_ERR_PARAM; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_rx_ctl_reg_read); - - cmd_resp->address = cpu_to_le32(reg_addr); - - status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - if (status == 0) - *reg_val = le32_to_cpu(cmd_resp->value); - - return status; -} - -/** - * i40evf_read_rx_ctl - read from an Rx control register - * @hw: pointer to the hw struct - * @reg_addr: register address - **/ -u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) -{ - i40e_status status = 0; - bool use_register; - int retry = 5; - u32 val = 0; - - use_register = (((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver < 5)) || - (hw->mac.type == I40E_MAC_X722)); - if (!use_register) { -do_retry: - status = i40evf_aq_rx_ctl_read_register(hw, reg_addr, - &val, NULL); - if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { - usleep_range(1000, 2000); - retry--; - goto do_retry; - } - } - - /* if the AQ access failed, try the old-fashioned way */ - if (status || use_register) - val = rd32(hw, reg_addr); - - return val; -} - -/** - * i40evf_aq_rx_ctl_write_register - * @hw: pointer to the hw struct - * @reg_addr: register address - * @reg_val: register value - * @cmd_details: pointer to command details structure or NULL - * - * Use the firmware to write to an Rx control register, - * especially useful if the Rx unit is under heavy pressure - **/ -i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_rx_ctl_reg_read_write *cmd = - (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_rx_ctl_reg_write); - - cmd->address = cpu_to_le32(reg_addr); - cmd->value = cpu_to_le32(reg_val); - - status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - return status; -} - -/** - * i40evf_write_rx_ctl - write to an Rx control register - * @hw: pointer to the hw struct - * @reg_addr: register address - * @reg_val: register value - **/ -void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) -{ - i40e_status status = 0; - bool use_register; - int retry = 5; - - use_register = (((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver < 5)) || - (hw->mac.type == I40E_MAC_X722)); - if (!use_register) { -do_retry: - status = i40evf_aq_rx_ctl_write_register(hw, reg_addr, - reg_val, NULL); - if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { - usleep_range(1000, 2000); - retry--; - goto do_retry; - } - } - - /* if the AQ access failed, try the old-fashioned way */ - if (status || use_register) - wr32(hw, reg_addr, reg_val); -} - -/** - * i40e_aq_send_msg_to_pf - * @hw: pointer to the hardware structure - * @v_opcode: opcodes for VF-PF communication - * @v_retval: return error code - * @msg: pointer to the msg buffer - * @msglen: msg length - * @cmd_details: pointer to command details - * - * Send message to PF driver using admin queue. By default, this message - * is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for - * completion before returning. - **/ -i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, - enum virtchnl_ops v_opcode, - i40e_status v_retval, - u8 *msg, u16 msglen, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_asq_cmd_details details; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); - desc.cookie_high = cpu_to_le32(v_opcode); - desc.cookie_low = cpu_to_le32(v_retval); - if (msglen) { - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF - | I40E_AQ_FLAG_RD)); - if (msglen > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - desc.datalen = cpu_to_le16(msglen); - } - if (!cmd_details) { - memset(&details, 0, sizeof(details)); - details.async = true; - cmd_details = &details; - } - status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details); - return status; -} - -/** - * i40e_vf_parse_hw_config - * @hw: pointer to the hardware structure - * @msg: pointer to the virtual channel VF resource structure - * - * Given a VF resource message from the PF, populate the hw struct - * with appropriate information. - **/ -void i40e_vf_parse_hw_config(struct i40e_hw *hw, - struct virtchnl_vf_resource *msg) -{ - struct virtchnl_vsi_resource *vsi_res; - int i; - - vsi_res = &msg->vsi_res[0]; - - hw->dev_caps.num_vsis = msg->num_vsis; - hw->dev_caps.num_rx_qp = msg->num_queue_pairs; - hw->dev_caps.num_tx_qp = msg->num_queue_pairs; - hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; - hw->dev_caps.dcb = msg->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_L2; - hw->dev_caps.fcoe = 0; - for (i = 0; i < msg->num_vsis; i++) { - if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { - ether_addr_copy(hw->mac.perm_addr, - vsi_res->default_mac_addr); - ether_addr_copy(hw->mac.addr, - vsi_res->default_mac_addr); - } - vsi_res++; - } -} - -/** - * i40e_vf_reset - * @hw: pointer to the hardware structure - * - * Send a VF_RESET message to the PF. Does not wait for response from PF - * as none will be forthcoming. Immediately after calling this function, - * the admin queue should be shut down and (optionally) reinitialized. - **/ -i40e_status i40e_vf_reset(struct i40e_hw *hw) -{ - return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, - 0, NULL, 0, NULL); -} - -/** - * i40evf_aq_write_ddp - Write dynamic device personalization (ddp) - * @hw: pointer to the hw struct - * @buff: command buffer (size in bytes = buff_size) - * @buff_size: buffer size in bytes - * @track_id: package tracking id - * @error_offset: returns error offset - * @error_info: returns error information - * @cmd_details: pointer to command details structure or NULL - **/ -enum -i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, - u16 buff_size, u32 track_id, - u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_write_personalization_profile *cmd = - (struct i40e_aqc_write_personalization_profile *) - &desc.params.raw; - struct i40e_aqc_write_ddp_resp *resp; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_write_personalization_profile); - - desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); - if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - - desc.datalen = cpu_to_le16(buff_size); - - cmd->profile_track_id = cpu_to_le32(track_id); - - status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); - if (!status) { - resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; - if (error_offset) - *error_offset = le32_to_cpu(resp->error_offset); - if (error_info) - *error_info = le32_to_cpu(resp->error_info); - } - - return status; -} - -/** - * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp) - * @hw: pointer to the hw struct - * @buff: command buffer (size in bytes = buff_size) - * @buff_size: buffer size in bytes - * @flags: AdminQ command flags - * @cmd_details: pointer to command details structure or NULL - **/ -enum -i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, - u16 buff_size, u8 flags, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_get_applied_profiles *cmd = - (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_personalization_profile_list); - - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - desc.datalen = cpu_to_le16(buff_size); - - cmd->flags = flags; - - status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); - - return status; -} - -/** - * i40evf_find_segment_in_package - * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) - * @pkg_hdr: pointer to the package header to be searched - * - * This function searches a package file for a particular segment type. On - * success it returns a pointer to the segment header, otherwise it will - * return NULL. - **/ -struct i40e_generic_seg_header * -i40evf_find_segment_in_package(u32 segment_type, - struct i40e_package_header *pkg_hdr) -{ - struct i40e_generic_seg_header *segment; - u32 i; - - /* Search all package segments for the requested segment type */ - for (i = 0; i < pkg_hdr->segment_count; i++) { - segment = - (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + - pkg_hdr->segment_offset[i]); - - if (segment->type == segment_type) - return segment; - } - - return NULL; -} - -/** - * i40evf_write_profile - * @hw: pointer to the hardware structure - * @profile: pointer to the profile segment of the package to be downloaded - * @track_id: package tracking id - * - * Handles the download of a complete package. - */ -enum i40e_status_code -i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, - u32 track_id) -{ - i40e_status status = 0; - struct i40e_section_table *sec_tbl; - struct i40e_profile_section_header *sec = NULL; - u32 dev_cnt; - u32 vendor_dev_id; - u32 *nvm; - u32 section_size = 0; - u32 offset = 0, info = 0; - u32 i; - - dev_cnt = profile->device_table_count; - - for (i = 0; i < dev_cnt; i++) { - vendor_dev_id = profile->device_table[i].vendor_dev_id; - if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL) - if (hw->device_id == (vendor_dev_id & 0xFFFF)) - break; - } - if (i == dev_cnt) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP"); - return I40E_ERR_DEVICE_NOT_SUPPORTED; - } - - nvm = (u32 *)&profile->device_table[dev_cnt]; - sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; - - for (i = 0; i < sec_tbl->section_count; i++) { - sec = (struct i40e_profile_section_header *)((u8 *)profile + - sec_tbl->section_offset[i]); - - /* Skip 'AQ', 'note' and 'name' sections */ - if (sec->section.type != SECTION_TYPE_MMIO) - continue; - - section_size = sec->section.size + - sizeof(struct i40e_profile_section_header); - - /* Write profile */ - status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size, - track_id, &offset, &info, NULL); - if (status) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, - "Failed to write profile: offset %d, info %d", - offset, info); - break; - } - } - return status; -} - -/** - * i40evf_add_pinfo_to_list - * @hw: pointer to the hardware structure - * @profile: pointer to the profile segment of the package - * @profile_info_sec: buffer for information section - * @track_id: package tracking id - * - * Register a profile to the list of loaded profiles. - */ -enum i40e_status_code -i40evf_add_pinfo_to_list(struct i40e_hw *hw, - struct i40e_profile_segment *profile, - u8 *profile_info_sec, u32 track_id) -{ - i40e_status status = 0; - struct i40e_profile_section_header *sec = NULL; - struct i40e_profile_info *pinfo; - u32 offset = 0, info = 0; - - sec = (struct i40e_profile_section_header *)profile_info_sec; - sec->tbl_size = 1; - sec->data_end = sizeof(struct i40e_profile_section_header) + - sizeof(struct i40e_profile_info); - sec->section.type = SECTION_TYPE_INFO; - sec->section.offset = sizeof(struct i40e_profile_section_header); - sec->section.size = sizeof(struct i40e_profile_info); - pinfo = (struct i40e_profile_info *)(profile_info_sec + - sec->section.offset); - pinfo->track_id = track_id; - pinfo->version = profile->version; - pinfo->op = I40E_DDP_ADD_TRACKID; - memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); - - status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end, - track_id, &offset, &info, NULL); - return status; -} diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h deleted file mode 100644 index f300bf271824..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ /dev/null @@ -1,34 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_DEVIDS_H_ -#define _I40E_DEVIDS_H_ - -/* Device IDs */ -#define I40E_DEV_ID_SFP_XL710 0x1572 -#define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_B 0x1580 -#define I40E_DEV_ID_KX_C 0x1581 -#define I40E_DEV_ID_QSFP_A 0x1583 -#define I40E_DEV_ID_QSFP_B 0x1584 -#define I40E_DEV_ID_QSFP_C 0x1585 -#define I40E_DEV_ID_10G_BASE_T 0x1586 -#define I40E_DEV_ID_20G_KR2 0x1587 -#define I40E_DEV_ID_20G_KR2_A 0x1588 -#define I40E_DEV_ID_10G_BASE_T4 0x1589 -#define I40E_DEV_ID_25G_B 0x158A -#define I40E_DEV_ID_25G_SFP28 0x158B -#define I40E_DEV_ID_VF 0x154C -#define I40E_DEV_ID_VF_HV 0x1571 -#define I40E_DEV_ID_ADAPTIVE_VF 0x1889 -#define I40E_DEV_ID_SFP_X722 0x37D0 -#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 -#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 -#define I40E_DEV_ID_SFP_I_X722 0x37D3 -#define I40E_DEV_ID_X722_VF 0x37CD - -#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ - (d) == I40E_DEV_ID_QSFP_B || \ - (d) == I40E_DEV_ID_QSFP_C) - -#endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h deleted file mode 100644 index 1c78de838857..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h +++ /dev/null @@ -1,215 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_HMC_H_ -#define _I40E_HMC_H_ - -#define I40E_HMC_MAX_BP_COUNT 512 - -/* forward-declare the HW struct for the compiler */ -struct i40e_hw; - -#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */ -#define I40E_HMC_PD_CNT_IN_SD 512 -#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ -#define I40E_HMC_PAGED_BP_SIZE 4096 -#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 -#define I40E_FIRST_VF_FPM_ID 16 - -struct i40e_hmc_obj_info { - u64 base; /* base addr in FPM */ - u32 max_cnt; /* max count available for this hmc func */ - u32 cnt; /* count of objects driver actually wants to create */ - u64 size; /* size in bytes of one object */ -}; - -enum i40e_sd_entry_type { - I40E_SD_TYPE_INVALID = 0, - I40E_SD_TYPE_PAGED = 1, - I40E_SD_TYPE_DIRECT = 2 -}; - -struct i40e_hmc_bp { - enum i40e_sd_entry_type entry_type; - struct i40e_dma_mem addr; /* populate to be used by hw */ - u32 sd_pd_index; - u32 ref_cnt; -}; - -struct i40e_hmc_pd_entry { - struct i40e_hmc_bp bp; - u32 sd_index; - bool rsrc_pg; - bool valid; -}; - -struct i40e_hmc_pd_table { - struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */ - struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */ - struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */ - - u32 ref_cnt; - u32 sd_index; -}; - -struct i40e_hmc_sd_entry { - enum i40e_sd_entry_type entry_type; - bool valid; - - union { - struct i40e_hmc_pd_table pd_table; - struct i40e_hmc_bp bp; - } u; -}; - -struct i40e_hmc_sd_table { - struct i40e_virt_mem addr; /* used to track sd_entry allocations */ - u32 sd_cnt; - u32 ref_cnt; - struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */ -}; - -struct i40e_hmc_info { - u32 signature; - /* equals to pci func num for PF and dynamically allocated for VFs */ - u8 hmc_fn_id; - u16 first_sd_index; /* index of the first available SD */ - - /* hmc objects */ - struct i40e_hmc_obj_info *hmc_obj; - struct i40e_virt_mem hmc_obj_virt_mem; - struct i40e_hmc_sd_table sd_table; -}; - -#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++) -#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++) -#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) - -#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--) -#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--) -#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--) - -/** - * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware - * @hw: pointer to our hw struct - * @pa: pointer to physical address - * @sd_index: segment descriptor index - * @type: if sd entry is direct or paged - **/ -#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \ -{ \ - u32 val1, val2, val3; \ - val1 = (u32)(upper_32_bits(pa)); \ - val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \ - I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ - ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ - I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ - BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ - val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ - wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ - wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ - wr32((hw), I40E_PFHMC_SDCMD, val3); \ -} - -/** - * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware - * @hw: pointer to our hw struct - * @sd_index: segment descriptor index - * @type: if sd entry is direct or paged - **/ -#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \ -{ \ - u32 val2, val3; \ - val2 = (I40E_HMC_MAX_BP_COUNT << \ - I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ - ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ - I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ - val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ - wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ - wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ - wr32((hw), I40E_PFHMC_SDCMD, val3); \ -} - -/** - * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware - * @hw: pointer to our hw struct - * @sd_idx: segment descriptor index - * @pd_idx: page descriptor index - **/ -#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ - wr32((hw), I40E_PFHMC_PDINV, \ - (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ - ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) - -/** - * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit - * @hmc_info: pointer to the HMC configuration information structure - * @type: type of HMC resources we're searching - * @index: starting index for the object - * @cnt: number of objects we're trying to create - * @sd_idx: pointer to return index of the segment descriptor in question - * @sd_limit: pointer to return the maximum number of segment descriptors - * - * This function calculates the segment descriptor index and index limit - * for the resource defined by i40e_hmc_rsrc_type. - **/ -#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\ -{ \ - u64 fpm_addr, fpm_limit; \ - fpm_addr = (hmc_info)->hmc_obj[(type)].base + \ - (hmc_info)->hmc_obj[(type)].size * (index); \ - fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\ - *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \ - *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \ - /* add one more to the limit to correct our range */ \ - *(sd_limit) += 1; \ -} - -/** - * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit - * @hmc_info: pointer to the HMC configuration information struct - * @type: HMC resource type we're examining - * @idx: starting index for the object - * @cnt: number of objects we're trying to create - * @pd_index: pointer to return page descriptor index - * @pd_limit: pointer to return page descriptor index limit - * - * Calculates the page descriptor index and index limit for the resource - * defined by i40e_hmc_rsrc_type. - **/ -#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\ -{ \ - u64 fpm_adr, fpm_limit; \ - fpm_adr = (hmc_info)->hmc_obj[(type)].base + \ - (hmc_info)->hmc_obj[(type)].size * (idx); \ - fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \ - *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \ - *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \ - /* add one more to the limit to correct our range */ \ - *(pd_limit) += 1; \ -} -i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 sd_index, - enum i40e_sd_entry_type type, - u64 direct_mode_sz); - -i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 pd_index, - struct i40e_dma_mem *rsrc_pg); -i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx, bool is_pf); -i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx, bool is_pf); - -#endif /* _I40E_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h deleted file mode 100644 index 82b00f70a632..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h +++ /dev/null @@ -1,158 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_LAN_HMC_H_ -#define _I40E_LAN_HMC_H_ - -/* forward-declare the HW struct for the compiler */ -struct i40e_hw; - -/* HMC element context information */ - -/* Rx queue context data - * - * The sizes of the variables may be larger than needed due to crossing byte - * boundaries. If we do not have the width of the variable set to the correct - * size then we could end up shifting bits off the top of the variable when the - * variable is at the top of a byte and crosses over into the next byte. - */ -struct i40e_hmc_obj_rxq { - u16 head; - u16 cpuid; /* bigger than needed, see above for reason */ - u64 base; - u16 qlen; -#define I40E_RXQ_CTX_DBUFF_SHIFT 7 - u16 dbuff; /* bigger than needed, see above for reason */ -#define I40E_RXQ_CTX_HBUFF_SHIFT 6 - u16 hbuff; /* bigger than needed, see above for reason */ - u8 dtype; - u8 dsize; - u8 crcstrip; - u8 fc_ena; - u8 l2tsel; - u8 hsplit_0; - u8 hsplit_1; - u8 showiv; - u32 rxmax; /* bigger than needed, see above for reason */ - u8 tphrdesc_ena; - u8 tphwdesc_ena; - u8 tphdata_ena; - u8 tphhead_ena; - u16 lrxqthresh; /* bigger than needed, see above for reason */ - u8 prefena; /* NOTE: normally must be set to 1 at init */ -}; - -/* Tx queue context data -* -* The sizes of the variables may be larger than needed due to crossing byte -* boundaries. If we do not have the width of the variable set to the correct -* size then we could end up shifting bits off the top of the variable when the -* variable is at the top of a byte and crosses over into the next byte. -*/ -struct i40e_hmc_obj_txq { - u16 head; - u8 new_context; - u64 base; - u8 fc_ena; - u8 timesync_ena; - u8 fd_ena; - u8 alt_vlan_ena; - u16 thead_wb; - u8 cpuid; - u8 head_wb_ena; - u16 qlen; - u8 tphrdesc_ena; - u8 tphrpacket_ena; - u8 tphwdesc_ena; - u64 head_wb_addr; - u32 crc; - u16 rdylist; - u8 rdylist_act; -}; - -/* for hsplit_0 field of Rx HMC context */ -enum i40e_hmc_obj_rx_hsplit_0 { - I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8, -}; - -/* fcoe_cntx and fcoe_filt are for debugging purpose only */ -struct i40e_hmc_obj_fcoe_cntx { - u32 rsv[32]; -}; - -struct i40e_hmc_obj_fcoe_filt { - u32 rsv[8]; -}; - -/* Context sizes for LAN objects */ -enum i40e_hmc_lan_object_size { - I40E_HMC_LAN_OBJ_SZ_8 = 0x3, - I40E_HMC_LAN_OBJ_SZ_16 = 0x4, - I40E_HMC_LAN_OBJ_SZ_32 = 0x5, - I40E_HMC_LAN_OBJ_SZ_64 = 0x6, - I40E_HMC_LAN_OBJ_SZ_128 = 0x7, - I40E_HMC_LAN_OBJ_SZ_256 = 0x8, - I40E_HMC_LAN_OBJ_SZ_512 = 0x9, -}; - -#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512 -#define I40E_HMC_OBJ_SIZE_TXQ 128 -#define I40E_HMC_OBJ_SIZE_RXQ 32 -#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128 -#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64 - -enum i40e_hmc_lan_rsrc_type { - I40E_HMC_LAN_FULL = 0, - I40E_HMC_LAN_TX = 1, - I40E_HMC_LAN_RX = 2, - I40E_HMC_FCOE_CTX = 3, - I40E_HMC_FCOE_FILT = 4, - I40E_HMC_LAN_MAX = 5 -}; - -enum i40e_hmc_model { - I40E_HMC_MODEL_DIRECT_PREFERRED = 0, - I40E_HMC_MODEL_DIRECT_ONLY = 1, - I40E_HMC_MODEL_PAGED_ONLY = 2, - I40E_HMC_MODEL_UNKNOWN, -}; - -struct i40e_hmc_lan_create_obj_info { - struct i40e_hmc_info *hmc_info; - u32 rsrc_type; - u32 start_idx; - u32 count; - enum i40e_sd_entry_type entry_type; - u64 direct_mode_sz; -}; - -struct i40e_hmc_lan_delete_obj_info { - struct i40e_hmc_info *hmc_info; - u32 rsrc_type; - u32 start_idx; - u32 count; -}; - -i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, - u32 rxq_num, u32 fcoe_cntx_num, - u32 fcoe_filt_num); -i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, - enum i40e_hmc_model model); -i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw); - -i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, - u16 queue); -i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, - u16 queue, - struct i40e_hmc_obj_txq *s); -i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, - u16 queue); -i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, - u16 queue, - struct i40e_hmc_obj_rxq *s); - -#endif /* _I40E_LAN_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h deleted file mode 100644 index a358f4b9d5aa..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ /dev/null @@ -1,130 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_PROTOTYPE_H_ -#define _I40E_PROTOTYPE_H_ - -#include "i40e_type.h" -#include "i40e_alloc.h" -#include <linux/avf/virtchnl.h> - -/* Prototypes for shared code functions that are not in - * the standard function pointer structures. These are - * mostly because they are needed even before the init - * has happened and will assist in the early SW and FW - * setup. - */ - -/* adminq functions */ -i40e_status i40evf_init_adminq(struct i40e_hw *hw); -i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw); -void i40e_adminq_init_ring_data(struct i40e_hw *hw); -i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, - struct i40e_arq_event_info *e, - u16 *events_pending); -i40e_status i40evf_asq_send_command(struct i40e_hw *hw, - struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct i40e_asq_cmd_details *cmd_details); -bool i40evf_asq_done(struct i40e_hw *hw); - -/* debug function for adminq */ -void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, - void *desc, void *buffer, u16 buf_len); - -void i40e_idle_aq(struct i40e_hw *hw); -void i40evf_resume_aq(struct i40e_hw *hw); -bool i40evf_check_asq_alive(struct i40e_hw *hw); -i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); -const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); -const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err); - -i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, - bool pf_lut, u8 *lut, u16 lut_size); -i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, - bool pf_lut, u8 *lut, u16 lut_size); -i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_get_set_rss_key_data *key); -i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_get_set_rss_key_data *key); - -i40e_status i40e_set_mac_type(struct i40e_hw *hw); - -extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[]; - -static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) -{ - return i40evf_ptype_lookup[ptype]; -} - -/* prototype for functions used for SW locks */ - -/* i40e_common for VF drivers*/ -void i40e_vf_parse_hw_config(struct i40e_hw *hw, - struct virtchnl_vf_resource *msg); -i40e_status i40e_vf_reset(struct i40e_hw *hw); -i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, - enum virtchnl_ops v_opcode, - i40e_status v_retval, - u8 *msg, u16 msglen, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_set_filter_control(struct i40e_hw *hw, - struct i40e_filter_control_settings *settings); -i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, - u8 *mac_addr, u16 ethtype, u16 flags, - u16 vsi_seid, u16 queue, bool is_add, - struct i40e_control_filter_stats *stats, - struct i40e_asq_cmd_details *cmd_details); -void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, - u16 vsi_seid); -i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details); -u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); -i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details); -void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); -i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details); - -i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, - u16 reg, u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, - u16 reg, u8 phy_addr, u16 value); -i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 value); -u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); -i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval); -i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, - u16 buff_size, u32 track_id, - u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details * - cmd_details); -i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, - u16 buff_size, u8 flags, - struct i40e_asq_cmd_details * - cmd_details); -struct i40e_generic_seg_header * -i40evf_find_segment_in_package(u32 segment_type, - struct i40e_package_header *pkg_header); -enum i40e_status_code -i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, - u32 track_id); -enum i40e_status_code -i40evf_add_pinfo_to_list(struct i40e_hw *hw, - struct i40e_profile_segment *profile, - u8 *profile_info_sec, u32 track_id); -#endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h deleted file mode 100644 index 49e1f57d99cc..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ /dev/null @@ -1,313 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_REGISTER_H_ -#define _I40E_REGISTER_H_ - -#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ -#define I40E_VFMSIX_PBA1_MAX_INDEX 19 -#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG1_MAX_INDEX 639 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 -#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) -#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ -#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 -#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) -#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ -#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 -#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT) -#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ -#define I40E_VF_ARQH1_ARQH_SHIFT 0 -#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) -#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ -#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 -#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT) -#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 -#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) -#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 -#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT) -#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 -#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) -#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) -#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ -#define I40E_VF_ARQT1_ARQT_SHIFT 0 -#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) -#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ -#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 -#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT) -#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ -#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 -#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT) -#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ -#define I40E_VF_ATQH1_ATQH_SHIFT 0 -#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT) -#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ -#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 -#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT) -#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 -#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) -#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 -#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT) -#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 -#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) -#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) -#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ -#define I40E_VF_ATQT1_ATQT_SHIFT 0 -#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) -#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ -#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 -#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) -#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ -#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) -#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 -#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) -#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 -#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) -#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ -#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 -#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 -#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 -#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 -#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 -#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT) -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT) -#define I40E_VFINT_ICR01_SWINT_SHIFT 31 -#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT) -#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ -#define I40E_VFINT_ITR01_MAX_INDEX 2 -#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT) -#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_ITRN1_MAX_INDEX 2 -#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) -#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_QRX_TAIL1_MAX_INDEX 15 -#define I40E_QRX_TAIL1_TAIL_SHIFT 0 -#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT) -#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ -#define I40E_QTX_TAIL1_MAX_INDEX 15 -#define I40E_QTX_TAIL1_TAIL_SHIFT 0 -#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT) -#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */ -#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD_MAX_INDEX 16 -#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG_MAX_INDEX 16 -#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD_MAX_INDEX 16 -#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 -#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT) -#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) -#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) -#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_VFQF_HENA_MAX_INDEX 1 -#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 -#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT) -#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ -#define I40E_VFQF_HKEY_MAX_INDEX 12 -#define I40E_VFQF_HKEY_KEY_0_SHIFT 0 -#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT) -#define I40E_VFQF_HKEY_KEY_1_SHIFT 8 -#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT) -#define I40E_VFQF_HKEY_KEY_2_SHIFT 16 -#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT) -#define I40E_VFQF_HKEY_KEY_3_SHIFT 24 -#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT) -#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_VFQF_HLUT_MAX_INDEX 15 -#define I40E_VFQF_HLUT_LUT0_SHIFT 0 -#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT) -#define I40E_VFQF_HLUT_LUT1_SHIFT 8 -#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT) -#define I40E_VFQF_HLUT_LUT2_SHIFT 16 -#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT) -#define I40E_VFQF_HLUT_LUT3_SHIFT 24 -#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT) -#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_VFQF_HREGION_MAX_INDEX 7 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) -#define I40E_VFQF_HREGION_REGION_0_SHIFT 1 -#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) -#define I40E_VFQF_HREGION_REGION_1_SHIFT 5 -#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) -#define I40E_VFQF_HREGION_REGION_2_SHIFT 9 -#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) -#define I40E_VFQF_HREGION_REGION_3_SHIFT 13 -#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) -#define I40E_VFQF_HREGION_REGION_4_SHIFT 17 -#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) -#define I40E_VFQF_HREGION_REGION_5_SHIFT 21 -#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) -#define I40E_VFQF_HREGION_REGION_6_SHIFT 25 -#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) -#define I40E_VFQF_HREGION_REGION_7_SHIFT 29 -#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) -#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) -#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ -#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 -#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) -#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) -#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) -#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) -#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ -#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 -#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) -#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ -#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 -#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) -#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ -#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 -#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) -#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) -#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ -#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 -#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) -#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ -#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 -#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) -#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) -#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) -#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) -#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ -#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 -#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) -#endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h deleted file mode 100644 index 094387db3c11..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ /dev/null @@ -1,1496 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_TYPE_H_ -#define _I40E_TYPE_H_ - -#include "i40e_status.h" -#include "i40e_osdep.h" -#include "i40e_register.h" -#include "i40e_adminq.h" -#include "i40e_hmc.h" -#include "i40e_lan_hmc.h" -#include "i40e_devids.h" - -/* I40E_MASK is a macro used on 32 bit registers */ -#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) - -#define I40E_MAX_VSI_QP 16 -#define I40E_MAX_VF_VSI 3 -#define I40E_MAX_CHAINED_RX_BUFFERS 5 -#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 - -/* Max default timeout in ms, */ -#define I40E_MAX_NVM_TIMEOUT 18000 - -/* Max timeout in ms for the phy to respond */ -#define I40E_MAX_PHY_TIMEOUT 500 - -/* Switch from ms to the 1usec global time (this is the GTIME resolution) */ -#define I40E_MS_TO_GTIME(time) ((time) * 1000) - -/* forward declaration */ -struct i40e_hw; -typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); - -/* Data type manipulation macros. */ - -#define I40E_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) - -/* bitfields for Tx queue mapping in QTX_CTL */ -#define I40E_QTX_CTL_VF_QUEUE 0x0 -#define I40E_QTX_CTL_VM_QUEUE 0x1 -#define I40E_QTX_CTL_PF_QUEUE 0x2 - -/* debug masks - set these bits in hw->debug_mask to control output */ -enum i40e_debug_mask { - I40E_DEBUG_INIT = 0x00000001, - I40E_DEBUG_RELEASE = 0x00000002, - - I40E_DEBUG_LINK = 0x00000010, - I40E_DEBUG_PHY = 0x00000020, - I40E_DEBUG_HMC = 0x00000040, - I40E_DEBUG_NVM = 0x00000080, - I40E_DEBUG_LAN = 0x00000100, - I40E_DEBUG_FLOW = 0x00000200, - I40E_DEBUG_DCB = 0x00000400, - I40E_DEBUG_DIAG = 0x00000800, - I40E_DEBUG_FD = 0x00001000, - I40E_DEBUG_PACKAGE = 0x00002000, - - I40E_DEBUG_AQ_MESSAGE = 0x01000000, - I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, - I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, - I40E_DEBUG_AQ_COMMAND = 0x06000000, - I40E_DEBUG_AQ = 0x0F000000, - - I40E_DEBUG_USER = 0xF0000000, - - I40E_DEBUG_ALL = 0xFFFFFFFF -}; - -/* These are structs for managing the hardware information and the operations. - * The structures of function pointers are filled out at init time when we - * know for sure exactly which hardware we're working with. This gives us the - * flexibility of using the same main driver code but adapting to slightly - * different hardware needs as new parts are developed. For this architecture, - * the Firmware and AdminQ are intended to insulate the driver from most of the - * future changes, but these structures will also do part of the job. - */ -enum i40e_mac_type { - I40E_MAC_UNKNOWN = 0, - I40E_MAC_XL710, - I40E_MAC_VF, - I40E_MAC_X722, - I40E_MAC_X722_VF, - I40E_MAC_GENERIC, -}; - -enum i40e_media_type { - I40E_MEDIA_TYPE_UNKNOWN = 0, - I40E_MEDIA_TYPE_FIBER, - I40E_MEDIA_TYPE_BASET, - I40E_MEDIA_TYPE_BACKPLANE, - I40E_MEDIA_TYPE_CX4, - I40E_MEDIA_TYPE_DA, - I40E_MEDIA_TYPE_VIRTUAL -}; - -enum i40e_fc_mode { - I40E_FC_NONE = 0, - I40E_FC_RX_PAUSE, - I40E_FC_TX_PAUSE, - I40E_FC_FULL, - I40E_FC_PFC, - I40E_FC_DEFAULT -}; - -enum i40e_set_fc_aq_failures { - I40E_SET_FC_AQ_FAIL_NONE = 0, - I40E_SET_FC_AQ_FAIL_GET = 1, - I40E_SET_FC_AQ_FAIL_SET = 2, - I40E_SET_FC_AQ_FAIL_UPDATE = 4, - I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6 -}; - -enum i40e_vsi_type { - I40E_VSI_MAIN = 0, - I40E_VSI_VMDQ1 = 1, - I40E_VSI_VMDQ2 = 2, - I40E_VSI_CTRL = 3, - I40E_VSI_FCOE = 4, - I40E_VSI_MIRROR = 5, - I40E_VSI_SRIOV = 6, - I40E_VSI_FDIR = 7, - I40E_VSI_TYPE_UNKNOWN -}; - -enum i40e_queue_type { - I40E_QUEUE_TYPE_RX = 0, - I40E_QUEUE_TYPE_TX, - I40E_QUEUE_TYPE_PE_CEQ, - I40E_QUEUE_TYPE_UNKNOWN -}; - -struct i40e_link_status { - enum i40e_aq_phy_type phy_type; - enum i40e_aq_link_speed link_speed; - u8 link_info; - u8 an_info; - u8 req_fec_info; - u8 fec_info; - u8 ext_info; - u8 loopback; - /* is Link Status Event notification to SW enabled */ - bool lse_enable; - u16 max_frame_size; - bool crc_enable; - u8 pacing; - u8 requested_speeds; - u8 module_type[3]; - /* 1st byte: module identifier */ -#define I40E_MODULE_TYPE_SFP 0x03 -#define I40E_MODULE_TYPE_QSFP 0x0D - /* 2nd byte: ethernet compliance codes for 10/40G */ -#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 -#define I40E_MODULE_TYPE_40G_LR4 0x02 -#define I40E_MODULE_TYPE_40G_SR4 0x04 -#define I40E_MODULE_TYPE_40G_CR4 0x08 -#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 -#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 -#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 -#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 - /* 3rd byte: ethernet compliance codes for 1G */ -#define I40E_MODULE_TYPE_1000BASE_SX 0x01 -#define I40E_MODULE_TYPE_1000BASE_LX 0x02 -#define I40E_MODULE_TYPE_1000BASE_CX 0x04 -#define I40E_MODULE_TYPE_1000BASE_T 0x08 -}; - -struct i40e_phy_info { - struct i40e_link_status link_info; - struct i40e_link_status link_info_old; - bool get_link_info; - enum i40e_media_type media_type; - /* all the phy types the NVM is capable of */ - u64 phy_types; -}; - -#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII) -#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) -#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) -#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) -#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) -#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI) -#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI) -#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI) -#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI) -#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI) -#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) -#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) -#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) -#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) -#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX) -#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T) -#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T) -#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) -#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) -#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) -#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) -#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) -#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) -#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) -#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) -#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) -#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \ - BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) -#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) -/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some - * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit - * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So, - * a shift is needed to adjust for this with values larger than 31. The - * only affected values are I40E_PHY_TYPE_25GBASE_*. - */ -#define I40E_PHY_TYPE_OFFSET 1 -#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_HW_CAP_MAX_GPIO 30 -/* Capabilities of a PF or a VF or the whole device */ -struct i40e_hw_capabilities { - u32 switch_mode; -#define I40E_NVM_IMAGE_TYPE_EVB 0x0 -#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 -#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 - - u32 management_mode; - u32 mng_protocols_over_mctp; -#define I40E_MNG_PROTOCOL_PLDM 0x2 -#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4 -#define I40E_MNG_PROTOCOL_NCSI 0x8 - u32 npar_enable; - u32 os2bmc; - u32 valid_functions; - bool sr_iov_1_1; - bool vmdq; - bool evb_802_1_qbg; /* Edge Virtual Bridging */ - bool evb_802_1_qbh; /* Bridge Port Extension */ - bool dcb; - bool fcoe; - bool iscsi; /* Indicates iSCSI enabled */ - bool flex10_enable; - bool flex10_capable; - u32 flex10_mode; -#define I40E_FLEX10_MODE_UNKNOWN 0x0 -#define I40E_FLEX10_MODE_DCC 0x1 -#define I40E_FLEX10_MODE_DCI 0x2 - - u32 flex10_status; -#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 -#define I40E_FLEX10_STATUS_VC_MODE 0x2 - - bool sec_rev_disabled; - bool update_disabled; -#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1 -#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2 - - bool mgmt_cem; - bool ieee_1588; - bool iwarp; - bool fd; - u32 fd_filters_guaranteed; - u32 fd_filters_best_effort; - bool rss; - u32 rss_table_size; - u32 rss_table_entry_width; - bool led[I40E_HW_CAP_MAX_GPIO]; - bool sdp[I40E_HW_CAP_MAX_GPIO]; - u32 nvm_image_type; - u32 num_flow_director_filters; - u32 num_vfs; - u32 vf_base_id; - u32 num_vsis; - u32 num_rx_qp; - u32 num_tx_qp; - u32 base_queue; - u32 num_msix_vectors; - u32 num_msix_vectors_vf; - u32 led_pin_num; - u32 sdp_pin_num; - u32 mdio_port_num; - u32 mdio_port_mode; - u8 rx_buf_chain_len; - u32 enabled_tcmap; - u32 maxtc; - u64 wr_csr_prot; -}; - -struct i40e_mac_info { - enum i40e_mac_type type; - u8 addr[ETH_ALEN]; - u8 perm_addr[ETH_ALEN]; - u8 san_addr[ETH_ALEN]; - u16 max_fcoeq; -}; - -enum i40e_aq_resources_ids { - I40E_NVM_RESOURCE_ID = 1 -}; - -enum i40e_aq_resource_access_type { - I40E_RESOURCE_READ = 1, - I40E_RESOURCE_WRITE -}; - -struct i40e_nvm_info { - u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ - u32 timeout; /* [ms] */ - u16 sr_size; /* Shadow RAM size in words */ - bool blank_nvm_mode; /* is NVM empty (no FW present)*/ - u16 version; /* NVM package version */ - u32 eetrack; /* NVM data version */ - u32 oem_ver; /* OEM version info */ -}; - -/* definitions used in NVM update support */ - -enum i40e_nvmupd_cmd { - I40E_NVMUPD_INVALID, - I40E_NVMUPD_READ_CON, - I40E_NVMUPD_READ_SNT, - I40E_NVMUPD_READ_LCB, - I40E_NVMUPD_READ_SA, - I40E_NVMUPD_WRITE_ERA, - I40E_NVMUPD_WRITE_CON, - I40E_NVMUPD_WRITE_SNT, - I40E_NVMUPD_WRITE_LCB, - I40E_NVMUPD_WRITE_SA, - I40E_NVMUPD_CSUM_CON, - I40E_NVMUPD_CSUM_SA, - I40E_NVMUPD_CSUM_LCB, - I40E_NVMUPD_STATUS, - I40E_NVMUPD_EXEC_AQ, - I40E_NVMUPD_GET_AQ_RESULT, - I40E_NVMUPD_GET_AQ_EVENT, -}; - -enum i40e_nvmupd_state { - I40E_NVMUPD_STATE_INIT, - I40E_NVMUPD_STATE_READING, - I40E_NVMUPD_STATE_WRITING, - I40E_NVMUPD_STATE_INIT_WAIT, - I40E_NVMUPD_STATE_WRITE_WAIT, - I40E_NVMUPD_STATE_ERROR -}; - -/* nvm_access definition and its masks/shifts need to be accessible to - * application, core driver, and shared code. Where is the right file? - */ -#define I40E_NVM_READ 0xB -#define I40E_NVM_WRITE 0xC - -#define I40E_NVM_MOD_PNT_MASK 0xFF - -#define I40E_NVM_TRANS_SHIFT 8 -#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) -#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12 -#define I40E_NVM_PRESERVATION_FLAGS_MASK \ - (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT) -#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01 -#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02 -#define I40E_NVM_CON 0x0 -#define I40E_NVM_SNT 0x1 -#define I40E_NVM_LCB 0x2 -#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) -#define I40E_NVM_ERA 0x4 -#define I40E_NVM_CSUM 0x8 -#define I40E_NVM_AQE 0xe -#define I40E_NVM_EXEC 0xf - -#define I40E_NVM_ADAPT_SHIFT 16 -#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) - -#define I40E_NVMUPD_MAX_DATA 4096 -#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ - -struct i40e_nvm_access { - u32 command; - u32 config; - u32 offset; /* in bytes */ - u32 data_size; /* in bytes */ - u8 data[1]; -}; - -/* (Q)SFP module access definitions */ -#define I40E_I2C_EEPROM_DEV_ADDR 0xA0 -#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 -#define I40E_MODULE_TYPE_ADDR 0x00 -#define I40E_MODULE_REVISION_ADDR 0x01 -#define I40E_MODULE_SFF_8472_COMP 0x5E -#define I40E_MODULE_SFF_8472_SWAP 0x5C -#define I40E_MODULE_SFF_ADDR_MODE 0x04 -#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D -#define I40E_MODULE_TYPE_QSFP28 0x11 -#define I40E_MODULE_QSFP_MAX_LEN 640 - -/* PCI bus types */ -enum i40e_bus_type { - i40e_bus_type_unknown = 0, - i40e_bus_type_pci, - i40e_bus_type_pcix, - i40e_bus_type_pci_express, - i40e_bus_type_reserved -}; - -/* PCI bus speeds */ -enum i40e_bus_speed { - i40e_bus_speed_unknown = 0, - i40e_bus_speed_33 = 33, - i40e_bus_speed_66 = 66, - i40e_bus_speed_100 = 100, - i40e_bus_speed_120 = 120, - i40e_bus_speed_133 = 133, - i40e_bus_speed_2500 = 2500, - i40e_bus_speed_5000 = 5000, - i40e_bus_speed_8000 = 8000, - i40e_bus_speed_reserved -}; - -/* PCI bus widths */ -enum i40e_bus_width { - i40e_bus_width_unknown = 0, - i40e_bus_width_pcie_x1 = 1, - i40e_bus_width_pcie_x2 = 2, - i40e_bus_width_pcie_x4 = 4, - i40e_bus_width_pcie_x8 = 8, - i40e_bus_width_32 = 32, - i40e_bus_width_64 = 64, - i40e_bus_width_reserved -}; - -/* Bus parameters */ -struct i40e_bus_info { - enum i40e_bus_speed speed; - enum i40e_bus_width width; - enum i40e_bus_type type; - - u16 func; - u16 device; - u16 lan_id; - u16 bus_id; -}; - -/* Flow control (FC) parameters */ -struct i40e_fc_info { - enum i40e_fc_mode current_mode; /* FC mode in effect */ - enum i40e_fc_mode requested_mode; /* FC mode requested by caller */ -}; - -#define I40E_MAX_TRAFFIC_CLASS 8 -#define I40E_MAX_USER_PRIORITY 8 -#define I40E_DCBX_MAX_APPS 32 -#define I40E_LLDPDU_SIZE 1500 - -/* IEEE 802.1Qaz ETS Configuration data */ -struct i40e_ieee_ets_config { - u8 willing; - u8 cbs; - u8 maxtcs; - u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; - u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; - u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* IEEE 802.1Qaz ETS Recommendation data */ -struct i40e_ieee_ets_recommend { - u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; - u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; - u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* IEEE 802.1Qaz PFC Configuration data */ -struct i40e_ieee_pfc_config { - u8 willing; - u8 mbc; - u8 pfccap; - u8 pfcenable; -}; - -/* IEEE 802.1Qaz Application Priority data */ -struct i40e_ieee_app_priority_table { - u8 priority; - u8 selector; - u16 protocolid; -}; - -struct i40e_dcbx_config { - u32 numapps; - u32 tlv_status; /* CEE mode TLV status */ - struct i40e_ieee_ets_config etscfg; - struct i40e_ieee_ets_recommend etsrec; - struct i40e_ieee_pfc_config pfc; - struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS]; -}; - -/* Port hardware description */ -struct i40e_hw { - u8 __iomem *hw_addr; - void *back; - - /* subsystem structs */ - struct i40e_phy_info phy; - struct i40e_mac_info mac; - struct i40e_bus_info bus; - struct i40e_nvm_info nvm; - struct i40e_fc_info fc; - - /* pci info */ - u16 device_id; - u16 vendor_id; - u16 subsystem_device_id; - u16 subsystem_vendor_id; - u8 revision_id; - u8 port; - bool adapter_stopped; - - /* capabilities for entire device and PCI func */ - struct i40e_hw_capabilities dev_caps; - struct i40e_hw_capabilities func_caps; - - /* Flow Director shared filter space */ - u16 fdir_shared_filter_count; - - /* device profile info */ - u8 pf_id; - u16 main_vsi_seid; - - /* for multi-function MACs */ - u16 partition_id; - u16 num_partitions; - u16 num_ports; - - /* Closest numa node to the device */ - u16 numa_node; - - /* Admin Queue info */ - struct i40e_adminq_info aq; - - /* state of nvm update process */ - enum i40e_nvmupd_state nvmupd_state; - struct i40e_aq_desc nvm_wb_desc; - struct i40e_aq_desc nvm_aq_event_desc; - struct i40e_virt_mem nvm_buff; - bool nvm_release_on_done; - u16 nvm_wait_opcode; - - /* HMC info */ - struct i40e_hmc_info hmc; /* HMC info struct */ - - /* LLDP/DCBX Status */ - u16 dcbx_status; - -#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) -#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) - - /* DCBX info */ - struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ - struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ - struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ - - /* Used in set switch config AQ command */ - u16 switch_tag; - u16 first_tag; - u16 second_tag; - - /* debug mask */ - u32 debug_mask; - char err_str[16]; -}; - -static inline bool i40e_is_vf(struct i40e_hw *hw) -{ - return (hw->mac.type == I40E_MAC_VF || - hw->mac.type == I40E_MAC_X722_VF); -} - -struct i40e_driver_version { - u8 major_version; - u8 minor_version; - u8 build_version; - u8 subbuild_version; - u8 driver_string[32]; -}; - -/* RX Descriptors */ -union i40e_16byte_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - } read; - struct { - struct { - struct { - union { - __le16 mirroring_status; - __le16 fcoe_ctx_id; - } mirr_fcoe; - __le16 l2tag1; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - __le32 fd_id; /* Flow director filter id */ - __le32 fcoe_param; /* FCoE DDP Context id */ - } hi_dword; - } qword0; - struct { - /* ext status/error/pktype/length */ - __le64 status_error_len; - } qword1; - } wb; /* writeback */ -}; - -union i40e_32byte_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - /* bit 0 of hdr_buffer_addr is DD bit */ - __le64 rsvd1; - __le64 rsvd2; - } read; - struct { - struct { - struct { - union { - __le16 mirroring_status; - __le16 fcoe_ctx_id; - } mirr_fcoe; - __le16 l2tag1; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - __le32 fcoe_param; /* FCoE DDP Context id */ - /* Flow director filter id in case of - * Programming status desc WB - */ - __le32 fd_id; - } hi_dword; - } qword0; - struct { - /* status/error/pktype/length */ - __le64 status_error_len; - } qword1; - struct { - __le16 ext_status; /* extended status */ - __le16 rsvd; - __le16 l2tag2_1; - __le16 l2tag2_2; - } qword2; - struct { - union { - __le32 flex_bytes_lo; - __le32 pe_status; - } lo_dword; - union { - __le32 flex_bytes_hi; - __le32 fd_id; - } hi_dword; - } qword3; - } wb; /* writeback */ -}; - -enum i40e_rx_desc_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_STATUS_DD_SHIFT = 0, - I40E_RX_DESC_STATUS_EOF_SHIFT = 1, - I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, - I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, - I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, - I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ - I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, - /* Note: Bit 8 is reserved in X710 and XL710 */ - I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, - I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ - I40E_RX_DESC_STATUS_FLM_SHIFT = 11, - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ - I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, - I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, - I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ - /* Note: For non-tunnel packets INT_UDP_0 is the right status for - * UDP header - */ - I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, - I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ -}; - -#define I40E_RXD_QW1_STATUS_SHIFT 0 -#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \ - << I40E_RXD_QW1_STATUS_SHIFT) - -#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ - I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) - -#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \ - BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) - -enum i40e_rx_desc_fltstat_values { - I40E_RX_DESC_FLTSTAT_NO_DATA = 0, - I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ - I40E_RX_DESC_FLTSTAT_RSV = 2, - I40E_RX_DESC_FLTSTAT_RSS_HASH = 3, -}; - -#define I40E_RXD_QW1_ERROR_SHIFT 19 -#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) - -enum i40e_rx_desc_error_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_ERROR_RXE_SHIFT = 0, - I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1, - I40E_RX_DESC_ERROR_HBO_SHIFT = 2, - I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ - I40E_RX_DESC_ERROR_IPE_SHIFT = 3, - I40E_RX_DESC_ERROR_L4E_SHIFT = 4, - I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, - I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, - I40E_RX_DESC_ERROR_PPRS_SHIFT = 7 -}; - -enum i40e_rx_desc_error_l3l4e_fcoe_masks { - I40E_RX_DESC_ERROR_L3L4E_NONE = 0, - I40E_RX_DESC_ERROR_L3L4E_PROT = 1, - I40E_RX_DESC_ERROR_L3L4E_FC = 2, - I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, - I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 -}; - -#define I40E_RXD_QW1_PTYPE_SHIFT 30 -#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) - -/* Packet type non-ip values */ -enum i40e_rx_l2_ptype { - I40E_RX_PTYPE_L2_RESERVED = 0, - I40E_RX_PTYPE_L2_MAC_PAY2 = 1, - I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, - I40E_RX_PTYPE_L2_FIP_PAY2 = 3, - I40E_RX_PTYPE_L2_OUI_PAY2 = 4, - I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, - I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, - I40E_RX_PTYPE_L2_ECP_PAY2 = 7, - I40E_RX_PTYPE_L2_EVB_PAY2 = 8, - I40E_RX_PTYPE_L2_QCN_PAY2 = 9, - I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, - I40E_RX_PTYPE_L2_ARP = 11, - I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, - I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, - I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, - I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, - I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, - I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, - I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, - I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, - I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, - I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, - I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, - I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 -}; - -struct i40e_rx_ptype_decoded { - u32 ptype:8; - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:1; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; -}; - -enum i40e_rx_ptype_outer_ip { - I40E_RX_PTYPE_OUTER_L2 = 0, - I40E_RX_PTYPE_OUTER_IP = 1 -}; - -enum i40e_rx_ptype_outer_ip_ver { - I40E_RX_PTYPE_OUTER_NONE = 0, - I40E_RX_PTYPE_OUTER_IPV4 = 0, - I40E_RX_PTYPE_OUTER_IPV6 = 1 -}; - -enum i40e_rx_ptype_outer_fragmented { - I40E_RX_PTYPE_NOT_FRAG = 0, - I40E_RX_PTYPE_FRAG = 1 -}; - -enum i40e_rx_ptype_tunnel_type { - I40E_RX_PTYPE_TUNNEL_NONE = 0, - I40E_RX_PTYPE_TUNNEL_IP_IP = 1, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum i40e_rx_ptype_tunnel_end_prot { - I40E_RX_PTYPE_TUNNEL_END_NONE = 0, - I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, - I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum i40e_rx_ptype_inner_prot { - I40E_RX_PTYPE_INNER_PROT_NONE = 0, - I40E_RX_PTYPE_INNER_PROT_UDP = 1, - I40E_RX_PTYPE_INNER_PROT_TCP = 2, - I40E_RX_PTYPE_INNER_PROT_SCTP = 3, - I40E_RX_PTYPE_INNER_PROT_ICMP = 4, - I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 -}; - -enum i40e_rx_ptype_payload_layer { - I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - -#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 -#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ - I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - -#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 -#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ - I40E_RXD_QW1_LENGTH_HBUF_SHIFT) - -#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 -#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) - -enum i40e_rx_desc_ext_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, - I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, - I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ - I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ - I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, - I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, - I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, -}; - -enum i40e_rx_desc_pe_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ - I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ - I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ - I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, - I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, - I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, - I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27, - I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, - I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 -}; - -#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 -#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 - -#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 -#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ - I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) - -#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 -#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ - I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) - -enum i40e_rx_prog_status_desc_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, - I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ -}; - -enum i40e_rx_prog_status_desc_prog_id_masks { - I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, - I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, - I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, -}; - -enum i40e_rx_prog_status_desc_error_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, - I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, - I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, - I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 -}; - -/* TX Descriptor */ -struct i40e_tx_desc { - __le64 buffer_addr; /* Address of descriptor's data buf */ - __le64 cmd_type_offset_bsz; -}; - -#define I40E_TXD_QW1_DTYPE_SHIFT 0 -#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) - -enum i40e_tx_desc_dtype_value { - I40E_TX_DESC_DTYPE_DATA = 0x0, - I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ - I40E_TX_DESC_DTYPE_CONTEXT = 0x1, - I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2, - I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8, - I40E_TX_DESC_DTYPE_DDP_CTX = 0x9, - I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB, - I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, - I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, - I40E_TX_DESC_DTYPE_DESC_DONE = 0xF -}; - -#define I40E_TXD_QW1_CMD_SHIFT 4 -#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) - -enum i40e_tx_desc_cmd_bits { - I40E_TX_DESC_CMD_EOP = 0x0001, - I40E_TX_DESC_CMD_RS = 0x0002, - I40E_TX_DESC_CMD_ICRC = 0x0004, - I40E_TX_DESC_CMD_IL2TAG1 = 0x0008, - I40E_TX_DESC_CMD_DUMMY = 0x0010, - I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ - I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ - I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ - I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ - I40E_TX_DESC_CMD_FCOET = 0x0080, - I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ -}; - -#define I40E_TXD_QW1_OFFSET_SHIFT 16 -#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ - I40E_TXD_QW1_OFFSET_SHIFT) - -enum i40e_tx_desc_length_fields { - /* Note: These are predefined bit offsets */ - I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ - I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ -}; - -#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 -#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ - I40E_TXD_QW1_TX_BUF_SZ_SHIFT) - -#define I40E_TXD_QW1_L2TAG1_SHIFT 48 -#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) - -/* Context descriptors */ -struct i40e_tx_context_desc { - __le32 tunneling_params; - __le16 l2tag2; - __le16 rsvd; - __le64 type_cmd_tso_mss; -}; - -#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 -#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) - -#define I40E_TXD_CTX_QW1_CMD_SHIFT 4 -#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) - -enum i40e_tx_ctx_desc_cmd_bits { - I40E_TX_CTX_DESC_TSO = 0x01, - I40E_TX_CTX_DESC_TSYN = 0x02, - I40E_TX_CTX_DESC_IL2TAG2 = 0x04, - I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, - I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, - I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, - I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, - I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, - I40E_TX_CTX_DESC_SWPE = 0x40 -}; - -#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 -#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ - I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) - -#define I40E_TXD_CTX_QW1_MSS_SHIFT 50 -#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ - I40E_TXD_CTX_QW1_MSS_SHIFT) - -#define I40E_TXD_CTX_QW1_VSI_SHIFT 50 -#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) - -#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 -#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ - I40E_TXD_CTX_QW0_EXT_IP_SHIFT) - -enum i40e_tx_ctx_desc_eipt_offload { - I40E_TX_CTX_EXT_IP_NONE = 0x0, - I40E_TX_CTX_EXT_IP_IPV6 = 0x1, - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, - I40E_TX_CTX_EXT_IP_IPV4 = 0x3 -}; - -#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 -#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) - -#define I40E_TXD_CTX_QW0_NATT_SHIFT 9 -#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) - -#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) -#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) - -#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 -#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ - BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) - -#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK - -#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 -#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ - I40E_TXD_CTX_QW0_NATLEN_SHIFT) - -#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 -#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ - I40E_TXD_CTX_QW0_DECTTL_SHIFT) - -#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 -#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) -struct i40e_filter_program_desc { - __le32 qindex_flex_ptype_vsi; - __le32 rsvd; - __le32 dtype_cmd_cntindex; - __le32 fd_id; -}; -#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0 -#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \ - I40E_TXD_FLTR_QW0_QINDEX_SHIFT) -#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11 -#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \ - I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) -#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17 -#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ - I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) - -/* Packet Classifier Types for filters */ -enum i40e_filter_pctype { - /* Note: Values 0-28 are reserved for future use. - * Value 29, 30, 32 are not supported on XL710 and X710. - */ - I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, - I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, - I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, - I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, - I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, - I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, - I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, - I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, - /* Note: Values 37-38 are reserved for future use. - * Value 39, 40, 42 are not supported on XL710 and X710. - */ - I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, - I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, - I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, - I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, - I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, - I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, - I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, - I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, - /* Note: Value 47 is reserved for future use */ - I40E_FILTER_PCTYPE_FCOE_OX = 48, - I40E_FILTER_PCTYPE_FCOE_RX = 49, - I40E_FILTER_PCTYPE_FCOE_OTHER = 50, - /* Note: Values 51-62 are reserved for future use */ - I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, -}; - -enum i40e_filter_program_desc_dest { - I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, - I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, - I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2, -}; - -enum i40e_filter_program_desc_fd_status { - I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0, - I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1, - I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2, - I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3, -}; - -#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 -#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ - I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) - -#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 -#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ - I40E_TXD_FLTR_QW1_CMD_SHIFT) - -#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) - -enum i40e_filter_program_desc_pcmd { - I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, - I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2, -}; - -#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) - -#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) - -#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ - I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ - I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) - -#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 -#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ - I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) - -enum i40e_filter_type { - I40E_FLOW_DIRECTOR_FLTR = 0, - I40E_PE_QUAD_HASH_FLTR = 1, - I40E_ETHERTYPE_FLTR, - I40E_FCOE_CTX_FLTR, - I40E_MAC_VLAN_FLTR, - I40E_HASH_FLTR -}; - -struct i40e_vsi_context { - u16 seid; - u16 uplink_seid; - u16 vsi_number; - u16 vsis_allocated; - u16 vsis_unallocated; - u16 flags; - u8 pf_num; - u8 vf_num; - u8 connection_type; - struct i40e_aqc_vsi_properties_data info; -}; - -struct i40e_veb_context { - u16 seid; - u16 uplink_seid; - u16 veb_number; - u16 vebs_allocated; - u16 vebs_unallocated; - u16 flags; - struct i40e_aqc_get_veb_parameters_completion info; -}; - -/* Statistics collected by each port, VSI, VEB, and S-channel */ -struct i40e_eth_stats { - u64 rx_bytes; /* gorc */ - u64 rx_unicast; /* uprc */ - u64 rx_multicast; /* mprc */ - u64 rx_broadcast; /* bprc */ - u64 rx_discards; /* rdpc */ - u64 rx_unknown_protocol; /* rupp */ - u64 tx_bytes; /* gotc */ - u64 tx_unicast; /* uptc */ - u64 tx_multicast; /* mptc */ - u64 tx_broadcast; /* bptc */ - u64 tx_discards; /* tdpc */ - u64 tx_errors; /* tepc */ -}; - -/* Statistics collected per VEB per TC */ -struct i40e_veb_tc_stats { - u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS]; - u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS]; - u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS]; - u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* Statistics collected by the MAC */ -struct i40e_hw_port_stats { - /* eth stats collected by the port */ - struct i40e_eth_stats eth; - - /* additional port specific stats */ - u64 tx_dropped_link_down; /* tdold */ - u64 crc_errors; /* crcerrs */ - u64 illegal_bytes; /* illerrc */ - u64 error_bytes; /* errbc */ - u64 mac_local_faults; /* mlfc */ - u64 mac_remote_faults; /* mrfc */ - u64 rx_length_errors; /* rlec */ - u64 link_xon_rx; /* lxonrxc */ - u64 link_xoff_rx; /* lxoffrxc */ - u64 priority_xon_rx[8]; /* pxonrxc[8] */ - u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ - u64 link_xon_tx; /* lxontxc */ - u64 link_xoff_tx; /* lxofftxc */ - u64 priority_xon_tx[8]; /* pxontxc[8] */ - u64 priority_xoff_tx[8]; /* pxofftxc[8] */ - u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ - u64 rx_size_64; /* prc64 */ - u64 rx_size_127; /* prc127 */ - u64 rx_size_255; /* prc255 */ - u64 rx_size_511; /* prc511 */ - u64 rx_size_1023; /* prc1023 */ - u64 rx_size_1522; /* prc1522 */ - u64 rx_size_big; /* prc9522 */ - u64 rx_undersize; /* ruc */ - u64 rx_fragments; /* rfc */ - u64 rx_oversize; /* roc */ - u64 rx_jabber; /* rjc */ - u64 tx_size_64; /* ptc64 */ - u64 tx_size_127; /* ptc127 */ - u64 tx_size_255; /* ptc255 */ - u64 tx_size_511; /* ptc511 */ - u64 tx_size_1023; /* ptc1023 */ - u64 tx_size_1522; /* ptc1522 */ - u64 tx_size_big; /* ptc9522 */ - u64 mac_short_packet_dropped; /* mspdc */ - u64 checksum_error; /* xec */ - /* flow director stats */ - u64 fd_atr_match; - u64 fd_sb_match; - u64 fd_atr_tunnel_match; - u32 fd_atr_status; - u32 fd_sb_status; - /* EEE LPI */ - u32 tx_lpi_status; - u32 rx_lpi_status; - u64 tx_lpi_count; /* etlpic */ - u64 rx_lpi_count; /* erlpic */ -}; - -/* Checksum and Shadow RAM pointers */ -#define I40E_SR_NVM_CONTROL_WORD 0x00 -#define I40E_EMP_MODULE_PTR 0x0F -#define I40E_SR_EMP_MODULE_PTR 0x48 -#define I40E_NVM_OEM_VER_OFF 0x83 -#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 -#define I40E_SR_NVM_WAKE_ON_LAN 0x19 -#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 -#define I40E_SR_NVM_EETRACK_LO 0x2D -#define I40E_SR_NVM_EETRACK_HI 0x2E -#define I40E_SR_VPD_PTR 0x2F -#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E -#define I40E_SR_SW_CHECKSUM_WORD 0x3F - -/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ -#define I40E_SR_VPD_MODULE_MAX_SIZE 1024 -#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 -#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 -#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) -#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) -#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) -#define I40E_PTR_TYPE BIT(15) - -/* Shadow RAM related */ -#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 -#define I40E_SR_WORDS_IN_1KB 512 -/* Checksum should be calculated such that after adding all the words, - * including the checksum word itself, the sum should be 0xBABA. - */ -#define I40E_SR_SW_CHECKSUM_BASE 0xBABA - -#define I40E_SRRD_SRCTL_ATTEMPTS 100000 - -enum i40e_switch_element_types { - I40E_SWITCH_ELEMENT_TYPE_MAC = 1, - I40E_SWITCH_ELEMENT_TYPE_PF = 2, - I40E_SWITCH_ELEMENT_TYPE_VF = 3, - I40E_SWITCH_ELEMENT_TYPE_EMP = 4, - I40E_SWITCH_ELEMENT_TYPE_BMC = 6, - I40E_SWITCH_ELEMENT_TYPE_PE = 16, - I40E_SWITCH_ELEMENT_TYPE_VEB = 17, - I40E_SWITCH_ELEMENT_TYPE_PA = 18, - I40E_SWITCH_ELEMENT_TYPE_VSI = 19, -}; - -/* Supported EtherType filters */ -enum i40e_ether_type_index { - I40E_ETHER_TYPE_1588 = 0, - I40E_ETHER_TYPE_FIP = 1, - I40E_ETHER_TYPE_OUI_EXTENDED = 2, - I40E_ETHER_TYPE_MAC_CONTROL = 3, - I40E_ETHER_TYPE_LLDP = 4, - I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5, - I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6, - I40E_ETHER_TYPE_QCN_CNM = 7, - I40E_ETHER_TYPE_8021X = 8, - I40E_ETHER_TYPE_ARP = 9, - I40E_ETHER_TYPE_RSV1 = 10, - I40E_ETHER_TYPE_RSV2 = 11, -}; - -/* Filter context base size is 1K */ -#define I40E_HASH_FILTER_BASE_SIZE 1024 -/* Supported Hash filter values */ -enum i40e_hash_filter_size { - I40E_HASH_FILTER_SIZE_1K = 0, - I40E_HASH_FILTER_SIZE_2K = 1, - I40E_HASH_FILTER_SIZE_4K = 2, - I40E_HASH_FILTER_SIZE_8K = 3, - I40E_HASH_FILTER_SIZE_16K = 4, - I40E_HASH_FILTER_SIZE_32K = 5, - I40E_HASH_FILTER_SIZE_64K = 6, - I40E_HASH_FILTER_SIZE_128K = 7, - I40E_HASH_FILTER_SIZE_256K = 8, - I40E_HASH_FILTER_SIZE_512K = 9, - I40E_HASH_FILTER_SIZE_1M = 10, -}; - -/* DMA context base size is 0.5K */ -#define I40E_DMA_CNTX_BASE_SIZE 512 -/* Supported DMA context values */ -enum i40e_dma_cntx_size { - I40E_DMA_CNTX_SIZE_512 = 0, - I40E_DMA_CNTX_SIZE_1K = 1, - I40E_DMA_CNTX_SIZE_2K = 2, - I40E_DMA_CNTX_SIZE_4K = 3, - I40E_DMA_CNTX_SIZE_8K = 4, - I40E_DMA_CNTX_SIZE_16K = 5, - I40E_DMA_CNTX_SIZE_32K = 6, - I40E_DMA_CNTX_SIZE_64K = 7, - I40E_DMA_CNTX_SIZE_128K = 8, - I40E_DMA_CNTX_SIZE_256K = 9, -}; - -/* Supported Hash look up table (LUT) sizes */ -enum i40e_hash_lut_size { - I40E_HASH_LUT_SIZE_128 = 0, - I40E_HASH_LUT_SIZE_512 = 1, -}; - -/* Structure to hold a per PF filter control settings */ -struct i40e_filter_control_settings { - /* number of PE Quad Hash filter buckets */ - enum i40e_hash_filter_size pe_filt_num; - /* number of PE Quad Hash contexts */ - enum i40e_dma_cntx_size pe_cntx_num; - /* number of FCoE filter buckets */ - enum i40e_hash_filter_size fcoe_filt_num; - /* number of FCoE DDP contexts */ - enum i40e_dma_cntx_size fcoe_cntx_num; - /* size of the Hash LUT */ - enum i40e_hash_lut_size hash_lut_size; - /* enable FDIR filters for PF and its VFs */ - bool enable_fdir; - /* enable Ethertype filters for PF and its VFs */ - bool enable_ethtype; - /* enable MAC/VLAN filters for PF and its VFs */ - bool enable_macvlan; -}; - -/* Structure to hold device level control filter counts */ -struct i40e_control_filter_stats { - u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */ - u16 etype_used; /* Used perfect EtherType filters */ - u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */ - u16 etype_free; /* Un-used perfect EtherType filters */ -}; - -enum i40e_reset_type { - I40E_RESET_POR = 0, - I40E_RESET_CORER = 1, - I40E_RESET_GLOBR = 2, - I40E_RESET_EMPR = 3, -}; - -/* IEEE 802.1AB LLDP Agent Variables from NVM */ -#define I40E_NVM_LLDP_CFG_PTR 0x06 -#define I40E_SR_LLDP_CFG_PTR 0x31 - -/* RSS Hash Table Size */ -#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 - -/* INPUT SET MASK for RSS, flow director and flexible payload */ -#define I40E_FD_INSET_L3_SRC_SHIFT 47 -#define I40E_FD_INSET_L3_SRC_WORD_MASK (0x3ULL << \ - I40E_FD_INSET_L3_SRC_SHIFT) -#define I40E_FD_INSET_L3_DST_SHIFT 35 -#define I40E_FD_INSET_L3_DST_WORD_MASK (0x3ULL << \ - I40E_FD_INSET_L3_DST_SHIFT) -#define I40E_FD_INSET_L4_SRC_SHIFT 34 -#define I40E_FD_INSET_L4_SRC_WORD_MASK (0x1ULL << \ - I40E_FD_INSET_L4_SRC_SHIFT) -#define I40E_FD_INSET_L4_DST_SHIFT 33 -#define I40E_FD_INSET_L4_DST_WORD_MASK (0x1ULL << \ - I40E_FD_INSET_L4_DST_SHIFT) -#define I40E_FD_INSET_VERIFY_TAG_SHIFT 31 -#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK (0x3ULL << \ - I40E_FD_INSET_VERIFY_TAG_SHIFT) - -#define I40E_FD_INSET_FLEX_WORD50_SHIFT 17 -#define I40E_FD_INSET_FLEX_WORD50_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD50_SHIFT) -#define I40E_FD_INSET_FLEX_WORD51_SHIFT 16 -#define I40E_FD_INSET_FLEX_WORD51_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD51_SHIFT) -#define I40E_FD_INSET_FLEX_WORD52_SHIFT 15 -#define I40E_FD_INSET_FLEX_WORD52_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD52_SHIFT) -#define I40E_FD_INSET_FLEX_WORD53_SHIFT 14 -#define I40E_FD_INSET_FLEX_WORD53_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD53_SHIFT) -#define I40E_FD_INSET_FLEX_WORD54_SHIFT 13 -#define I40E_FD_INSET_FLEX_WORD54_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD54_SHIFT) -#define I40E_FD_INSET_FLEX_WORD55_SHIFT 12 -#define I40E_FD_INSET_FLEX_WORD55_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD55_SHIFT) -#define I40E_FD_INSET_FLEX_WORD56_SHIFT 11 -#define I40E_FD_INSET_FLEX_WORD56_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD56_SHIFT) -#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10 -#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD57_SHIFT) - -/* Version format for Dynamic Device Personalization(DDP) */ -struct i40e_ddp_version { - u8 major; - u8 minor; - u8 update; - u8 draft; -}; - -#define I40E_DDP_NAME_SIZE 32 - -/* Package header */ -struct i40e_package_header { - struct i40e_ddp_version version; - u32 segment_count; - u32 segment_offset[1]; -}; - -/* Generic segment header */ -struct i40e_generic_seg_header { -#define SEGMENT_TYPE_METADATA 0x00000001 -#define SEGMENT_TYPE_NOTES 0x00000002 -#define SEGMENT_TYPE_I40E 0x00000011 -#define SEGMENT_TYPE_X722 0x00000012 - u32 type; - struct i40e_ddp_version version; - u32 size; - char name[I40E_DDP_NAME_SIZE]; -}; - -struct i40e_metadata_segment { - struct i40e_generic_seg_header header; - struct i40e_ddp_version version; - u32 track_id; - char name[I40E_DDP_NAME_SIZE]; -}; - -struct i40e_device_id_entry { - u32 vendor_dev_id; - u32 sub_vendor_dev_id; -}; - -struct i40e_profile_segment { - struct i40e_generic_seg_header header; - struct i40e_ddp_version version; - char name[I40E_DDP_NAME_SIZE]; - u32 device_table_count; - struct i40e_device_id_entry device_table[1]; -}; - -struct i40e_section_table { - u32 section_count; - u32 section_offset[1]; -}; - -struct i40e_profile_section_header { - u16 tbl_size; - u16 data_end; - struct { -#define SECTION_TYPE_INFO 0x00000010 -#define SECTION_TYPE_MMIO 0x00000800 -#define SECTION_TYPE_AQ 0x00000801 -#define SECTION_TYPE_NOTE 0x80000000 -#define SECTION_TYPE_NAME 0x80000001 - u32 type; - u32 offset; - u32 size; - } section; -}; - -struct i40e_profile_info { - u32 track_id; - struct i40e_ddp_version version; - u8 op; -#define I40E_DDP_ADD_TRACKID 0x01 -#define I40E_DDP_REMOVE_TRACKID 0x02 - u8 reserved[7]; - u8 name[I40E_DDP_NAME_SIZE]; -}; -#endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h deleted file mode 100644 index 96e537a35000..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ /dev/null @@ -1,427 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40EVF_H_ -#define _I40EVF_H_ - -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/aer.h> -#include <linux/netdevice.h> -#include <linux/vmalloc.h> -#include <linux/interrupt.h> -#include <linux/ethtool.h> -#include <linux/if_vlan.h> -#include <linux/ip.h> -#include <linux/tcp.h> -#include <linux/sctp.h> -#include <linux/ipv6.h> -#include <linux/kernel.h> -#include <linux/bitops.h> -#include <linux/timer.h> -#include <linux/workqueue.h> -#include <linux/wait.h> -#include <linux/delay.h> -#include <linux/gfp.h> -#include <linux/skbuff.h> -#include <linux/dma-mapping.h> -#include <linux/etherdevice.h> -#include <linux/socket.h> -#include <linux/jiffies.h> -#include <net/ip6_checksum.h> -#include <net/pkt_cls.h> -#include <net/udp.h> -#include <net/tc_act/tc_gact.h> -#include <net/tc_act/tc_mirred.h> - -#include "i40e_type.h" -#include <linux/avf/virtchnl.h> -#include "i40e_txrx.h" - -#define DEFAULT_DEBUG_LEVEL_SHIFT 3 -#define PFX "i40evf: " - -/* VSI state flags shared with common code */ -enum i40evf_vsi_state_t { - __I40E_VSI_DOWN, - /* This must be last as it determines the size of the BITMAP */ - __I40E_VSI_STATE_SIZE__, -}; - -/* dummy struct to make common code less painful */ -struct i40e_vsi { - struct i40evf_adapter *back; - struct net_device *netdev; - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - u16 seid; - u16 id; - DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__); - int base_vector; - u16 work_limit; - u16 qs_handle; - void *priv; /* client driver data reference. */ -}; - -/* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -#define I40EVF_DEFAULT_TXD 512 -#define I40EVF_DEFAULT_RXD 512 -#define I40EVF_MAX_TXD 4096 -#define I40EVF_MIN_TXD 64 -#define I40EVF_MAX_RXD 4096 -#define I40EVF_MIN_RXD 64 -#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32 -#define I40EVF_MAX_AQ_BUF_SIZE 4096 -#define I40EVF_AQ_LEN 32 -#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ - -#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) - -#define I40E_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])) -#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i])) -#define I40E_TX_CTXTDESC(R, i) \ - (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) -#define I40EVF_MAX_REQ_QUEUES 4 - -#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) -#define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4) -#define I40EVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ - -/* MAX_MSIX_Q_VECTORS of these are allocated, - * but we only use one per queue-specific vector. - */ -struct i40e_q_vector { - struct i40evf_adapter *adapter; - struct i40e_vsi *vsi; - struct napi_struct napi; - struct i40e_ring_container rx; - struct i40e_ring_container tx; - u32 ring_mask; - u8 itr_countdown; /* when 0 should adjust adaptive ITR */ - u8 num_ringpairs; /* total number of ring pairs in vector */ - u16 v_idx; /* index in the vsi->q_vector array. */ - u16 reg_idx; /* register index of the interrupt */ - char name[IFNAMSIZ + 15]; - bool arm_wb_state; - cpumask_t affinity_mask; - struct irq_affinity_notify affinity_notify; -}; - -/* Helper macros to switch between ints/sec and what the register uses. - * And yes, it's the same math going both ways. The lowest value - * supported by all of the i40e hardware is 8. - */ -#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ - ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) -#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG - -#define I40EVF_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) - -#define I40EVF_RX_DESC_ADV(R, i) \ - (&(((union i40e_adv_rx_desc *)((R).desc))[i])) -#define I40EVF_TX_DESC_ADV(R, i) \ - (&(((union i40e_adv_tx_desc *)((R).desc))[i])) -#define I40EVF_TX_CTXTDESC_ADV(R, i) \ - (&(((struct i40e_adv_tx_context_desc *)((R).desc))[i])) - -#define OTHER_VECTOR 1 -#define NONQ_VECS (OTHER_VECTOR) - -#define MIN_MSIX_Q_VECTORS 1 -#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS) - -#define I40EVF_QUEUE_END_OF_LIST 0x7FF -#define I40EVF_FREE_VECTOR 0x7FFF -struct i40evf_mac_filter { - struct list_head list; - u8 macaddr[ETH_ALEN]; - bool remove; /* filter needs to be removed */ - bool add; /* filter needs to be added */ -}; - -struct i40evf_vlan_filter { - struct list_head list; - u16 vlan; - bool remove; /* filter needs to be removed */ - bool add; /* filter needs to be added */ -}; - -#define I40EVF_MAX_TRAFFIC_CLASS 4 -/* State of traffic class creation */ -enum i40evf_tc_state_t { - __I40EVF_TC_INVALID, /* no traffic class, default state */ - __I40EVF_TC_RUNNING, /* traffic classes have been created */ -}; - -/* channel info */ -struct i40evf_channel_config { - struct virtchnl_channel_info ch_info[I40EVF_MAX_TRAFFIC_CLASS]; - enum i40evf_tc_state_t state; - u8 total_qps; -}; - -/* State of cloud filter */ -enum i40evf_cloud_filter_state_t { - __I40EVF_CF_INVALID, /* cloud filter not added */ - __I40EVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */ - __I40EVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */ - __I40EVF_CF_ACTIVE, /* cloud filter is active */ -}; - -/* Driver state. The order of these is important! */ -enum i40evf_state_t { - __I40EVF_STARTUP, /* driver loaded, probe complete */ - __I40EVF_REMOVE, /* driver is being unloaded */ - __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ - __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ - __I40EVF_INIT_SW, /* got resources, setting up structs */ - __I40EVF_RESETTING, /* in reset */ - /* Below here, watchdog is running */ - __I40EVF_DOWN, /* ready, can be opened */ - __I40EVF_DOWN_PENDING, /* descending, waiting for watchdog */ - __I40EVF_TESTING, /* in ethtool self-test */ - __I40EVF_RUNNING, /* opened, working */ -}; - -enum i40evf_critical_section_t { - __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */ - __I40EVF_IN_CLIENT_TASK, - __I40EVF_IN_REMOVE_TASK, /* device being removed */ -}; - -#define I40EVF_CLOUD_FIELD_OMAC 0x01 -#define I40EVF_CLOUD_FIELD_IMAC 0x02 -#define I40EVF_CLOUD_FIELD_IVLAN 0x04 -#define I40EVF_CLOUD_FIELD_TEN_ID 0x08 -#define I40EVF_CLOUD_FIELD_IIP 0x10 - -#define I40EVF_CF_FLAGS_OMAC I40EVF_CLOUD_FIELD_OMAC -#define I40EVF_CF_FLAGS_IMAC I40EVF_CLOUD_FIELD_IMAC -#define I40EVF_CF_FLAGS_IMAC_IVLAN (I40EVF_CLOUD_FIELD_IMAC |\ - I40EVF_CLOUD_FIELD_IVLAN) -#define I40EVF_CF_FLAGS_IMAC_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\ - I40EVF_CLOUD_FIELD_TEN_ID) -#define I40EVF_CF_FLAGS_OMAC_TEN_ID_IMAC (I40EVF_CLOUD_FIELD_OMAC |\ - I40EVF_CLOUD_FIELD_IMAC |\ - I40EVF_CLOUD_FIELD_TEN_ID) -#define I40EVF_CF_FLAGS_IMAC_IVLAN_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\ - I40EVF_CLOUD_FIELD_IVLAN |\ - I40EVF_CLOUD_FIELD_TEN_ID) -#define I40EVF_CF_FLAGS_IIP I40E_CLOUD_FIELD_IIP - -/* bookkeeping of cloud filters */ -struct i40evf_cloud_filter { - enum i40evf_cloud_filter_state_t state; - struct list_head list; - struct virtchnl_filter f; - unsigned long cookie; - bool del; /* filter needs to be deleted */ - bool add; /* filter needs to be added */ -}; - -/* board specific private data structure */ -struct i40evf_adapter { - struct timer_list watchdog_timer; - struct work_struct reset_task; - struct work_struct adminq_task; - struct delayed_work client_task; - struct delayed_work init_task; - wait_queue_head_t down_waitqueue; - struct i40e_q_vector *q_vectors; - struct list_head vlan_filter_list; - struct list_head mac_filter_list; - /* Lock to protect accesses to MAC and VLAN lists */ - spinlock_t mac_vlan_list_lock; - char misc_vector_name[IFNAMSIZ + 9]; - int num_active_queues; - int num_req_queues; - - /* TX */ - struct i40e_ring *tx_rings; - u32 tx_timeout_count; - u32 tx_desc_count; - - /* RX */ - struct i40e_ring *rx_rings; - u64 hw_csum_rx_error; - u32 rx_desc_count; - int num_msix_vectors; - int num_iwarp_msix; - int iwarp_base_vector; - u32 client_pending; - struct i40e_client_instance *cinst; - struct msix_entry *msix_entries; - - u32 flags; -#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) -#define I40EVF_FLAG_PF_COMMS_FAILED BIT(3) -#define I40EVF_FLAG_RESET_PENDING BIT(4) -#define I40EVF_FLAG_RESET_NEEDED BIT(5) -#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) -#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(8) -#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9) -#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(10) -#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11) -#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12) -#define I40EVF_FLAG_PROMISC_ON BIT(13) -#define I40EVF_FLAG_ALLMULTI_ON BIT(14) -#define I40EVF_FLAG_LEGACY_RX BIT(15) -#define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(16) -#define I40EVF_FLAG_QUEUES_DISABLED BIT(17) -/* duplicates for common code */ -#define I40E_FLAG_DCB_ENABLED 0 -#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED -#define I40E_FLAG_LEGACY_RX I40EVF_FLAG_LEGACY_RX - /* flags for admin queue service task */ - u32 aq_required; -#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0) -#define I40EVF_FLAG_AQ_DISABLE_QUEUES BIT(1) -#define I40EVF_FLAG_AQ_ADD_MAC_FILTER BIT(2) -#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3) -#define I40EVF_FLAG_AQ_DEL_MAC_FILTER BIT(4) -#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5) -#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) -#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7) -#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8) -#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ -#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10) -/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ -#define I40EVF_FLAG_AQ_GET_HENA BIT(11) -#define I40EVF_FLAG_AQ_SET_HENA BIT(12) -#define I40EVF_FLAG_AQ_SET_RSS_KEY BIT(13) -#define I40EVF_FLAG_AQ_SET_RSS_LUT BIT(14) -#define I40EVF_FLAG_AQ_REQUEST_PROMISC BIT(15) -#define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16) -#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17) -#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) -#define I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19) -#define I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20) -#define I40EVF_FLAG_AQ_ENABLE_CHANNELS BIT(21) -#define I40EVF_FLAG_AQ_DISABLE_CHANNELS BIT(22) -#define I40EVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23) -#define I40EVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24) - - /* OS defined structs */ - struct net_device *netdev; - struct pci_dev *pdev; - - struct i40e_hw hw; /* defined in i40e_type.h */ - - enum i40evf_state_t state; - unsigned long crit_section; - - struct work_struct watchdog_task; - bool netdev_registered; - bool link_up; - enum virtchnl_link_speed link_speed; - enum virtchnl_ops current_op; -#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ - (_a)->vf_res->vf_cap_flags & \ - VIRTCHNL_VF_OFFLOAD_IWARP : \ - 0) -#define CLIENT_ENABLED(_a) ((_a)->cinst) -/* RSS by the PF should be preferred over RSS via other methods. */ -#define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \ - VIRTCHNL_VF_OFFLOAD_RSS_PF) -#define RSS_AQ(_a) ((_a)->vf_res->vf_cap_flags & \ - VIRTCHNL_VF_OFFLOAD_RSS_AQ) -#define RSS_REG(_a) (!((_a)->vf_res->vf_cap_flags & \ - (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \ - VIRTCHNL_VF_OFFLOAD_RSS_PF))) -#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ - VIRTCHNL_VF_OFFLOAD_VLAN) - struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ - struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ - struct virtchnl_version_info pf_version; -#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ - ((_a)->pf_version.minor == 1)) - u16 msg_enable; - struct i40e_eth_stats current_stats; - struct i40e_vsi vsi; - u32 aq_wait_count; - /* RSS stuff */ - u64 hena; - u16 rss_key_size; - u16 rss_lut_size; - u8 *rss_key; - u8 *rss_lut; - /* ADQ related members */ - struct i40evf_channel_config ch_config; - u8 num_tc; - struct list_head cloud_filter_list; - /* lock to protest access to the cloud filter list */ - spinlock_t cloud_filter_list_lock; - u16 num_cloud_filters; -}; - - -/* Ethtool Private Flags */ - -/* lan device */ -struct i40e_device { - struct list_head list; - struct i40evf_adapter *vf; -}; - -/* needed by i40evf_ethtool.c */ -extern char i40evf_driver_name[]; -extern const char i40evf_driver_version[]; - -int i40evf_up(struct i40evf_adapter *adapter); -void i40evf_down(struct i40evf_adapter *adapter); -int i40evf_process_config(struct i40evf_adapter *adapter); -void i40evf_schedule_reset(struct i40evf_adapter *adapter); -void i40evf_reset(struct i40evf_adapter *adapter); -void i40evf_set_ethtool_ops(struct net_device *netdev); -void i40evf_update_stats(struct i40evf_adapter *adapter); -void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter); -int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter); -void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask); -void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter); -void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter); - -void i40e_napi_add_all(struct i40evf_adapter *adapter); -void i40e_napi_del_all(struct i40evf_adapter *adapter); - -int i40evf_send_api_ver(struct i40evf_adapter *adapter); -int i40evf_verify_api_ver(struct i40evf_adapter *adapter); -int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter); -int i40evf_get_vf_config(struct i40evf_adapter *adapter); -void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush); -void i40evf_configure_queues(struct i40evf_adapter *adapter); -void i40evf_deconfigure_queues(struct i40evf_adapter *adapter); -void i40evf_enable_queues(struct i40evf_adapter *adapter); -void i40evf_disable_queues(struct i40evf_adapter *adapter); -void i40evf_map_queues(struct i40evf_adapter *adapter); -int i40evf_request_queues(struct i40evf_adapter *adapter, int num); -void i40evf_add_ether_addrs(struct i40evf_adapter *adapter); -void i40evf_del_ether_addrs(struct i40evf_adapter *adapter); -void i40evf_add_vlans(struct i40evf_adapter *adapter); -void i40evf_del_vlans(struct i40evf_adapter *adapter); -void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags); -void i40evf_request_stats(struct i40evf_adapter *adapter); -void i40evf_request_reset(struct i40evf_adapter *adapter); -void i40evf_get_hena(struct i40evf_adapter *adapter); -void i40evf_set_hena(struct i40evf_adapter *adapter); -void i40evf_set_rss_key(struct i40evf_adapter *adapter); -void i40evf_set_rss_lut(struct i40evf_adapter *adapter); -void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter); -void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter); -void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, - enum virtchnl_ops v_opcode, - i40e_status v_retval, u8 *msg, u16 msglen); -int i40evf_config_rss(struct i40evf_adapter *adapter); -int i40evf_lan_add_device(struct i40evf_adapter *adapter); -int i40evf_lan_del_device(struct i40evf_adapter *adapter); -void i40evf_client_subtask(struct i40evf_adapter *adapter); -void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len); -void i40evf_notify_client_l2_params(struct i40e_vsi *vsi); -void i40evf_notify_client_open(struct i40e_vsi *vsi); -void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset); -void i40evf_enable_channels(struct i40evf_adapter *adapter); -void i40evf_disable_channels(struct i40evf_adapter *adapter); -void i40evf_add_cloud_filter(struct i40evf_adapter *adapter); -void i40evf_del_cloud_filter(struct i40evf_adapter *adapter); -#endif /* _I40EVF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c deleted file mode 100644 index 69efe0aec76a..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ /dev/null @@ -1,820 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -/* ethtool support for i40evf */ -#include "i40evf.h" - -#include <linux/uaccess.h> - -struct i40evf_stats { - char stat_string[ETH_GSTRING_LEN]; - int stat_offset; -}; - -#define I40EVF_STAT(_name, _stat) { \ - .stat_string = _name, \ - .stat_offset = offsetof(struct i40evf_adapter, _stat) \ -} - -/* All stats are u64, so we don't need to track the size of the field. */ -static const struct i40evf_stats i40evf_gstrings_stats[] = { - I40EVF_STAT("rx_bytes", current_stats.rx_bytes), - I40EVF_STAT("rx_unicast", current_stats.rx_unicast), - I40EVF_STAT("rx_multicast", current_stats.rx_multicast), - I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast), - I40EVF_STAT("rx_discards", current_stats.rx_discards), - I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol), - I40EVF_STAT("tx_bytes", current_stats.tx_bytes), - I40EVF_STAT("tx_unicast", current_stats.tx_unicast), - I40EVF_STAT("tx_multicast", current_stats.tx_multicast), - I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast), - I40EVF_STAT("tx_discards", current_stats.tx_discards), - I40EVF_STAT("tx_errors", current_stats.tx_errors), -}; - -#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) -#define I40EVF_QUEUE_STATS_LEN(_dev) \ - (((struct i40evf_adapter *)\ - netdev_priv(_dev))->num_active_queues \ - * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64))) -#define I40EVF_STATS_LEN(_dev) \ - (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) - -/* For now we have one and only one private flag and it is only defined - * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead - * of leaving all this code sitting around empty we will strip it unless - * our one private flag is actually available. - */ -struct i40evf_priv_flags { - char flag_string[ETH_GSTRING_LEN]; - u32 flag; - bool read_only; -}; - -#define I40EVF_PRIV_FLAG(_name, _flag, _read_only) { \ - .flag_string = _name, \ - .flag = _flag, \ - .read_only = _read_only, \ -} - -static const struct i40evf_priv_flags i40evf_gstrings_priv_flags[] = { - I40EVF_PRIV_FLAG("legacy-rx", I40EVF_FLAG_LEGACY_RX, 0), -}; - -#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_gstrings_priv_flags) - -/** - * i40evf_get_link_ksettings - Get Link Speed and Duplex settings - * @netdev: network interface device structure - * @cmd: ethtool command - * - * Reports speed/duplex settings. Because this is a VF, we don't know what - * kind of link we really have, so we fake it. - **/ -static int i40evf_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - ethtool_link_ksettings_zero_link_mode(cmd, supported); - cmd->base.autoneg = AUTONEG_DISABLE; - cmd->base.port = PORT_NONE; - /* Set speed and duplex */ - switch (adapter->link_speed) { - case I40E_LINK_SPEED_40GB: - cmd->base.speed = SPEED_40000; - break; - case I40E_LINK_SPEED_25GB: -#ifdef SPEED_25000 - cmd->base.speed = SPEED_25000; -#else - netdev_info(netdev, - "Speed is 25G, display not supported by this version of ethtool.\n"); -#endif - break; - case I40E_LINK_SPEED_20GB: - cmd->base.speed = SPEED_20000; - break; - case I40E_LINK_SPEED_10GB: - cmd->base.speed = SPEED_10000; - break; - case I40E_LINK_SPEED_1GB: - cmd->base.speed = SPEED_1000; - break; - case I40E_LINK_SPEED_100MB: - cmd->base.speed = SPEED_100; - break; - default: - break; - } - cmd->base.duplex = DUPLEX_FULL; - - return 0; -} - -/** - * i40evf_get_sset_count - Get length of string set - * @netdev: network interface device structure - * @sset: id of string set - * - * Reports size of string table. This driver only supports - * strings for statistics. - **/ -static int i40evf_get_sset_count(struct net_device *netdev, int sset) -{ - if (sset == ETH_SS_STATS) - return I40EVF_STATS_LEN(netdev); - else if (sset == ETH_SS_PRIV_FLAGS) - return I40EVF_PRIV_FLAGS_STR_LEN; - else - return -EINVAL; -} - -/** - * i40evf_get_ethtool_stats - report device statistics - * @netdev: network interface device structure - * @stats: ethtool statistics structure - * @data: pointer to data buffer - * - * All statistics are added to the data buffer as an array of u64. - **/ -static void i40evf_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - unsigned int i, j; - char *p; - - for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { - p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset; - data[i] = *(u64 *)p; - } - for (j = 0; j < adapter->num_active_queues; j++) { - data[i++] = adapter->tx_rings[j].stats.packets; - data[i++] = adapter->tx_rings[j].stats.bytes; - } - for (j = 0; j < adapter->num_active_queues; j++) { - data[i++] = adapter->rx_rings[j].stats.packets; - data[i++] = adapter->rx_rings[j].stats.bytes; - } -} - -/** - * i40evf_get_strings - Get string set - * @netdev: network interface device structure - * @sset: id of string set - * @data: buffer for string data - * - * Builds stats string table. - **/ -static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u8 *p = data; - int i; - - if (sset == ETH_SS_STATS) { - for (i = 0; i < (int)I40EVF_GLOBAL_STATS_LEN; i++) { - memcpy(p, i40evf_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < adapter->num_active_queues; i++) { - snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < adapter->num_active_queues; i++) { - snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); - p += ETH_GSTRING_LEN; - } - } else if (sset == ETH_SS_PRIV_FLAGS) { - for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40evf_gstrings_priv_flags[i].flag_string); - p += ETH_GSTRING_LEN; - } - } -} - -/** - * i40evf_get_priv_flags - report device private flags - * @netdev: network interface device structure - * - * The get string set count and the string set should be matched for each - * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags - * array. - * - * Returns a u32 bitmap of flags. - **/ -static u32 i40evf_get_priv_flags(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u32 i, ret_flags = 0; - - for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { - const struct i40evf_priv_flags *priv_flags; - - priv_flags = &i40evf_gstrings_priv_flags[i]; - - if (priv_flags->flag & adapter->flags) - ret_flags |= BIT(i); - } - - return ret_flags; -} - -/** - * i40evf_set_priv_flags - set private flags - * @netdev: network interface device structure - * @flags: bit flags to be set - **/ -static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u32 orig_flags, new_flags, changed_flags; - u32 i; - - orig_flags = READ_ONCE(adapter->flags); - new_flags = orig_flags; - - for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { - const struct i40evf_priv_flags *priv_flags; - - priv_flags = &i40evf_gstrings_priv_flags[i]; - - if (flags & BIT(i)) - new_flags |= priv_flags->flag; - else - new_flags &= ~(priv_flags->flag); - - if (priv_flags->read_only && - ((orig_flags ^ new_flags) & ~BIT(i))) - return -EOPNOTSUPP; - } - - /* Before we finalize any flag changes, any checks which we need to - * perform to determine if the new flags will be supported should go - * here... - */ - - /* Compare and exchange the new flags into place. If we failed, that - * is if cmpxchg returns anything but the old value, this means - * something else must have modified the flags variable since we - * copied it. We'll just punt with an error and log something in the - * message buffer. - */ - if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) { - dev_warn(&adapter->pdev->dev, - "Unable to update adapter->flags as it was modified by another thread...\n"); - return -EAGAIN; - } - - changed_flags = orig_flags ^ new_flags; - - /* Process any additional changes needed as a result of flag changes. - * The changed_flags value reflects the list of bits that were changed - * in the code above. - */ - - /* issue a reset to force legacy-rx change to take effect */ - if (changed_flags & I40EVF_FLAG_LEGACY_RX) { - if (netif_running(netdev)) { - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; - schedule_work(&adapter->reset_task); - } - } - - return 0; -} - -/** - * i40evf_get_msglevel - Get debug message level - * @netdev: network interface device structure - * - * Returns current debug message level. - **/ -static u32 i40evf_get_msglevel(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - return adapter->msg_enable; -} - -/** - * i40evf_set_msglevel - Set debug message level - * @netdev: network interface device structure - * @data: message level - * - * Set current debug message level. Higher values cause the driver to - * be noisier. - **/ -static void i40evf_set_msglevel(struct net_device *netdev, u32 data) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - if (I40E_DEBUG_USER & data) - adapter->hw.debug_mask = data; - adapter->msg_enable = data; -} - -/** - * i40evf_get_drvinfo - Get driver info - * @netdev: network interface device structure - * @drvinfo: ethool driver info structure - * - * Returns information about the driver and device for display to the user. - **/ -static void i40evf_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - strlcpy(drvinfo->driver, i40evf_driver_name, 32); - strlcpy(drvinfo->version, i40evf_driver_version, 32); - strlcpy(drvinfo->fw_version, "N/A", 4); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN; -} - -/** - * i40evf_get_ringparam - Get ring parameters - * @netdev: network interface device structure - * @ring: ethtool ringparam structure - * - * Returns current ring parameters. TX and RX rings are reported separately, - * but the number of rings is not reported. - **/ -static void i40evf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - ring->rx_max_pending = I40EVF_MAX_RXD; - ring->tx_max_pending = I40EVF_MAX_TXD; - ring->rx_pending = adapter->rx_desc_count; - ring->tx_pending = adapter->tx_desc_count; -} - -/** - * i40evf_set_ringparam - Set ring parameters - * @netdev: network interface device structure - * @ring: ethtool ringparam structure - * - * Sets ring parameters. TX and RX rings are controlled separately, but the - * number of rings is not specified, so all rings get the same settings. - **/ -static int i40evf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u32 new_rx_count, new_tx_count; - - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) - return -EINVAL; - - new_tx_count = clamp_t(u32, ring->tx_pending, - I40EVF_MIN_TXD, - I40EVF_MAX_TXD); - new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); - - new_rx_count = clamp_t(u32, ring->rx_pending, - I40EVF_MIN_RXD, - I40EVF_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); - - /* if nothing to do return success */ - if ((new_tx_count == adapter->tx_desc_count) && - (new_rx_count == adapter->rx_desc_count)) - return 0; - - adapter->tx_desc_count = new_tx_count; - adapter->rx_desc_count = new_rx_count; - - if (netif_running(netdev)) { - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; - schedule_work(&adapter->reset_task); - } - - return 0; -} - -/** - * __i40evf_get_coalesce - get per-queue coalesce settings - * @netdev: the netdev to check - * @ec: ethtool coalesce data structure - * @queue: which queue to pick - * - * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs - * are per queue. If queue is <0 then we default to queue 0 as the - * representative value. - **/ -static int __i40evf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec, - int queue) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_vsi *vsi = &adapter->vsi; - struct i40e_ring *rx_ring, *tx_ring; - - ec->tx_max_coalesced_frames = vsi->work_limit; - ec->rx_max_coalesced_frames = vsi->work_limit; - - /* Rx and Tx usecs per queue value. If user doesn't specify the - * queue, return queue 0's value to represent. - */ - if (queue < 0) - queue = 0; - else if (queue >= adapter->num_active_queues) - return -EINVAL; - - rx_ring = &adapter->rx_rings[queue]; - tx_ring = &adapter->tx_rings[queue]; - - if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) - ec->use_adaptive_rx_coalesce = 1; - - if (ITR_IS_DYNAMIC(tx_ring->itr_setting)) - ec->use_adaptive_tx_coalesce = 1; - - ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC; - ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC; - - return 0; -} - -/** - * i40evf_get_coalesce - Get interrupt coalescing settings - * @netdev: network interface device structure - * @ec: ethtool coalesce structure - * - * Returns current coalescing settings. This is referred to elsewhere in the - * driver as Interrupt Throttle Rate, as this is how the hardware describes - * this functionality. Note that if per-queue settings have been modified this - * only represents the settings of queue 0. - **/ -static int i40evf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - return __i40evf_get_coalesce(netdev, ec, -1); -} - -/** - * i40evf_get_per_queue_coalesce - get coalesce values for specific queue - * @netdev: netdev to read - * @ec: coalesce settings from ethtool - * @queue: the queue to read - * - * Read specific queue's coalesce settings. - **/ -static int i40evf_get_per_queue_coalesce(struct net_device *netdev, - u32 queue, - struct ethtool_coalesce *ec) -{ - return __i40evf_get_coalesce(netdev, ec, queue); -} - -/** - * i40evf_set_itr_per_queue - set ITR values for specific queue - * @adapter: the VF adapter struct to set values for - * @ec: coalesce settings from ethtool - * @queue: the queue to modify - * - * Change the ITR settings for a specific queue. - **/ -static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter, - struct ethtool_coalesce *ec, - int queue) -{ - struct i40e_ring *rx_ring = &adapter->rx_rings[queue]; - struct i40e_ring *tx_ring = &adapter->tx_rings[queue]; - struct i40e_q_vector *q_vector; - - rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); - tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); - - rx_ring->itr_setting |= I40E_ITR_DYNAMIC; - if (!ec->use_adaptive_rx_coalesce) - rx_ring->itr_setting ^= I40E_ITR_DYNAMIC; - - tx_ring->itr_setting |= I40E_ITR_DYNAMIC; - if (!ec->use_adaptive_tx_coalesce) - tx_ring->itr_setting ^= I40E_ITR_DYNAMIC; - - q_vector = rx_ring->q_vector; - q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); - - q_vector = tx_ring->q_vector; - q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); - - /* The interrupt handler itself will take care of programming - * the Tx and Rx ITR values based on the values we have entered - * into the q_vector, no need to write the values now. - */ -} - -/** - * __i40evf_set_coalesce - set coalesce settings for particular queue - * @netdev: the netdev to change - * @ec: ethtool coalesce settings - * @queue: the queue to change - * - * Sets the coalesce settings for a particular queue. - **/ -static int __i40evf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec, - int queue) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_vsi *vsi = &adapter->vsi; - int i; - - if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) - vsi->work_limit = ec->tx_max_coalesced_frames_irq; - - if (ec->rx_coalesce_usecs == 0) { - if (ec->use_adaptive_rx_coalesce) - netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); - } else if ((ec->rx_coalesce_usecs < I40E_MIN_ITR) || - (ec->rx_coalesce_usecs > I40E_MAX_ITR)) { - netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); - return -EINVAL; - } - - else - if (ec->tx_coalesce_usecs == 0) { - if (ec->use_adaptive_tx_coalesce) - netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); - } else if ((ec->tx_coalesce_usecs < I40E_MIN_ITR) || - (ec->tx_coalesce_usecs > I40E_MAX_ITR)) { - netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); - return -EINVAL; - } - - /* Rx and Tx usecs has per queue value. If user doesn't specify the - * queue, apply to all queues. - */ - if (queue < 0) { - for (i = 0; i < adapter->num_active_queues; i++) - i40evf_set_itr_per_queue(adapter, ec, i); - } else if (queue < adapter->num_active_queues) { - i40evf_set_itr_per_queue(adapter, ec, queue); - } else { - netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", - adapter->num_active_queues - 1); - return -EINVAL; - } - - return 0; -} - -/** - * i40evf_set_coalesce - Set interrupt coalescing settings - * @netdev: network interface device structure - * @ec: ethtool coalesce structure - * - * Change current coalescing settings for every queue. - **/ -static int i40evf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - return __i40evf_set_coalesce(netdev, ec, -1); -} - -/** - * i40evf_set_per_queue_coalesce - set specific queue's coalesce settings - * @netdev: the netdev to change - * @ec: ethtool's coalesce settings - * @queue: the queue to modify - * - * Modifies a specific queue's coalesce settings. - */ -static int i40evf_set_per_queue_coalesce(struct net_device *netdev, - u32 queue, - struct ethtool_coalesce *ec) -{ - return __i40evf_set_coalesce(netdev, ec, queue); -} - -/** - * i40evf_get_rxnfc - command to get RX flow classification rules - * @netdev: network interface device structure - * @cmd: ethtool rxnfc command - * @rule_locs: pointer to store rule locations - * - * Returns Success if the command is supported. - **/ -static int i40evf_get_rxnfc(struct net_device *netdev, - struct ethtool_rxnfc *cmd, - u32 *rule_locs) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - int ret = -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = adapter->num_active_queues; - ret = 0; - break; - case ETHTOOL_GRXFH: - netdev_info(netdev, - "RSS hash info is not available to vf, use pf.\n"); - break; - default: - break; - } - - return ret; -} -/** - * i40evf_get_channels: get the number of channels supported by the device - * @netdev: network interface device structure - * @ch: channel information structure - * - * For the purposes of our device, we only use combined channels, i.e. a tx/rx - * queue pair. Report one extra channel to match our "other" MSI-X vector. - **/ -static void i40evf_get_channels(struct net_device *netdev, - struct ethtool_channels *ch) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - /* Report maximum channels */ - ch->max_combined = I40EVF_MAX_REQ_QUEUES; - - ch->max_other = NONQ_VECS; - ch->other_count = NONQ_VECS; - - ch->combined_count = adapter->num_active_queues; -} - -/** - * i40evf_set_channels: set the new channel count - * @netdev: network interface device structure - * @ch: channel information structure - * - * Negotiate a new number of channels with the PF then do a reset. During - * reset we'll realloc queues and fix the RSS table. Returns 0 on success, - * negative on failure. - **/ -static int i40evf_set_channels(struct net_device *netdev, - struct ethtool_channels *ch) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - int num_req = ch->combined_count; - - if (num_req != adapter->num_active_queues && - !(adapter->vf_res->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) { - dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n"); - return -EINVAL; - } - - if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && - adapter->num_tc) { - dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n"); - return -EINVAL; - } - - /* All of these should have already been checked by ethtool before this - * even gets to us, but just to be sure. - */ - if (num_req <= 0 || num_req > I40EVF_MAX_REQ_QUEUES) - return -EINVAL; - - if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS) - return -EINVAL; - - adapter->num_req_queues = num_req; - return i40evf_request_queues(adapter, num_req); -} - -/** - * i40evf_get_rxfh_key_size - get the RSS hash key size - * @netdev: network interface device structure - * - * Returns the table size. - **/ -static u32 i40evf_get_rxfh_key_size(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - return adapter->rss_key_size; -} - -/** - * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size - * @netdev: network interface device structure - * - * Returns the table size. - **/ -static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - return adapter->rss_lut_size; -} - -/** - * i40evf_get_rxfh - get the rx flow hash indirection table - * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function in use - * - * Reads the indirection table directly from the hardware. Always returns 0. - **/ -static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u16 i; - - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; - if (!indir) - return 0; - - memcpy(key, adapter->rss_key, adapter->rss_key_size); - - /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < adapter->rss_lut_size; i++) - indir[i] = (u32)adapter->rss_lut[i]; - - return 0; -} - -/** - * i40evf_set_rxfh - set the rx flow hash indirection table - * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function to use - * - * Returns -EINVAL if the table specifies an inavlid queue id, otherwise - * returns 0 after programming the table. - **/ -static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u16 i; - - /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) - return -EOPNOTSUPP; - if (!indir) - return 0; - - if (key) { - memcpy(adapter->rss_key, key, adapter->rss_key_size); - } - - /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < adapter->rss_lut_size; i++) - adapter->rss_lut[i] = (u8)(indir[i]); - - return i40evf_config_rss(adapter); -} - -static const struct ethtool_ops i40evf_ethtool_ops = { - .get_drvinfo = i40evf_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_ringparam = i40evf_get_ringparam, - .set_ringparam = i40evf_set_ringparam, - .get_strings = i40evf_get_strings, - .get_ethtool_stats = i40evf_get_ethtool_stats, - .get_sset_count = i40evf_get_sset_count, - .get_priv_flags = i40evf_get_priv_flags, - .set_priv_flags = i40evf_set_priv_flags, - .get_msglevel = i40evf_get_msglevel, - .set_msglevel = i40evf_set_msglevel, - .get_coalesce = i40evf_get_coalesce, - .set_coalesce = i40evf_set_coalesce, - .get_per_queue_coalesce = i40evf_get_per_queue_coalesce, - .set_per_queue_coalesce = i40evf_set_per_queue_coalesce, - .get_rxnfc = i40evf_get_rxnfc, - .get_rxfh_indir_size = i40evf_get_rxfh_indir_size, - .get_rxfh = i40evf_get_rxfh, - .set_rxfh = i40evf_set_rxfh, - .get_channels = i40evf_get_channels, - .set_channels = i40evf_set_channels, - .get_rxfh_key_size = i40evf_get_rxfh_key_size, - .get_link_ksettings = i40evf_get_link_ksettings, -}; - -/** - * i40evf_set_ethtool_ops - Initialize ethtool ops struct - * @netdev: network interface device structure - * - * Sets ethtool ops struct in our netdev so that ethtool can call - * our functions. - **/ -void i40evf_set_ethtool_ops(struct net_device *netdev) -{ - netdev->ethtool_ops = &i40evf_ethtool_ops; -} diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile new file mode 100644 index 000000000000..9cbb5743ed12 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2013 - 2018 Intel Corporation. +# +# Makefile for the Intel(R) Ethernet Adaptive Virtual Function (iavf) +# driver +# +# + +ccflags-y += -I$(src) +subdir-ccflags-y += -I$(src) + +obj-$(CONFIG_IAVF) += iavf.o + +iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \ + iavf_txrx.o iavf_common.o i40e_adminq.o iavf_client.o diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/iavf/i40e_adminq.c index 21a0dbf6ccf6..fca1ecfd9f71 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.c @@ -1,21 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include "i40e_status.h" -#include "i40e_type.h" -#include "i40e_register.h" +#include "iavf_status.h" +#include "iavf_type.h" +#include "iavf_register.h" #include "i40e_adminq.h" -#include "i40e_prototype.h" - -/** - * i40e_is_nvm_update_op - return true if this is an NVM update operation - * @desc: API request descriptor - **/ -static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) -{ - return (desc->opcode == i40e_aqc_opc_nvm_erase) || - (desc->opcode == i40e_aqc_opc_nvm_update); -} +#include "iavf_prototype.h" /** * i40e_adminq_init_regs - Initialize AdminQ registers @@ -23,44 +13,42 @@ static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) * * This assumes the alloc_asq and alloc_arq functions have already been called **/ -static void i40e_adminq_init_regs(struct i40e_hw *hw) +static void i40e_adminq_init_regs(struct iavf_hw *hw) { /* set head and tail registers in our local struct */ - if (i40e_is_vf(hw)) { - hw->aq.asq.tail = I40E_VF_ATQT1; - hw->aq.asq.head = I40E_VF_ATQH1; - hw->aq.asq.len = I40E_VF_ATQLEN1; - hw->aq.asq.bal = I40E_VF_ATQBAL1; - hw->aq.asq.bah = I40E_VF_ATQBAH1; - hw->aq.arq.tail = I40E_VF_ARQT1; - hw->aq.arq.head = I40E_VF_ARQH1; - hw->aq.arq.len = I40E_VF_ARQLEN1; - hw->aq.arq.bal = I40E_VF_ARQBAL1; - hw->aq.arq.bah = I40E_VF_ARQBAH1; - } + hw->aq.asq.tail = IAVF_VF_ATQT1; + hw->aq.asq.head = IAVF_VF_ATQH1; + hw->aq.asq.len = IAVF_VF_ATQLEN1; + hw->aq.asq.bal = IAVF_VF_ATQBAL1; + hw->aq.asq.bah = IAVF_VF_ATQBAH1; + hw->aq.arq.tail = IAVF_VF_ARQT1; + hw->aq.arq.head = IAVF_VF_ARQH1; + hw->aq.arq.len = IAVF_VF_ARQLEN1; + hw->aq.arq.bal = IAVF_VF_ARQBAL1; + hw->aq.arq.bah = IAVF_VF_ARQBAH1; } /** * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) +static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw) { - i40e_status ret_code; + iavf_status ret_code; - ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, + ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, i40e_mem_atq_ring, (hw->aq.num_asq_entries * sizeof(struct i40e_aq_desc)), - I40E_ADMINQ_DESC_ALIGNMENT); + IAVF_ADMINQ_DESC_ALIGNMENT); if (ret_code) return ret_code; - ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, + ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, (hw->aq.num_asq_entries * sizeof(struct i40e_asq_cmd_details))); if (ret_code) { - i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); return ret_code; } @@ -71,15 +59,15 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) +static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw) { - i40e_status ret_code; + iavf_status ret_code; - ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, + ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, i40e_mem_arq_ring, (hw->aq.num_arq_entries * sizeof(struct i40e_aq_desc)), - I40E_ADMINQ_DESC_ALIGNMENT); + IAVF_ADMINQ_DESC_ALIGNMENT); return ret_code; } @@ -91,9 +79,9 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) * This assumes the posted send buffers have already been cleaned * and de-allocated **/ -static void i40e_free_adminq_asq(struct i40e_hw *hw) +static void i40e_free_adminq_asq(struct iavf_hw *hw) { - i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); } /** @@ -103,20 +91,20 @@ static void i40e_free_adminq_asq(struct i40e_hw *hw) * This assumes the posted receive buffers have already been cleaned * and de-allocated **/ -static void i40e_free_adminq_arq(struct i40e_hw *hw) +static void i40e_free_adminq_arq(struct iavf_hw *hw) { - i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); + iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf); } /** * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) +static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw) { - i40e_status ret_code; struct i40e_aq_desc *desc; - struct i40e_dma_mem *bi; + struct iavf_dma_mem *bi; + iavf_status ret_code; int i; /* We'll be allocating the buffer info memory first, then we can @@ -124,24 +112,25 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) */ /* buffer_info structures do not need alignment */ - ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, - (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); + ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head, + (hw->aq.num_arq_entries * + sizeof(struct iavf_dma_mem))); if (ret_code) goto alloc_arq_bufs; - hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; + hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va; /* allocate the mapped buffers */ for (i = 0; i < hw->aq.num_arq_entries; i++) { bi = &hw->aq.arq.r.arq_bi[i]; - ret_code = i40e_allocate_dma_mem(hw, bi, + ret_code = iavf_allocate_dma_mem(hw, bi, i40e_mem_arq_buf, hw->aq.arq_buf_size, - I40E_ADMINQ_DESC_ALIGNMENT); + IAVF_ADMINQ_DESC_ALIGNMENT); if (ret_code) goto unwind_alloc_arq_bufs; /* now configure the descriptors for use */ - desc = I40E_ADMINQ_DESC(hw->aq.arq, i); + desc = IAVF_ADMINQ_DESC(hw->aq.arq, i); desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) @@ -169,8 +158,8 @@ unwind_alloc_arq_bufs: /* don't try to free the one that failed... */ i--; for (; i >= 0; i--) - i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); - i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); + iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); return ret_code; } @@ -179,26 +168,27 @@ unwind_alloc_arq_bufs: * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) +static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw) { - i40e_status ret_code; - struct i40e_dma_mem *bi; + struct iavf_dma_mem *bi; + iavf_status ret_code; int i; /* No mapped memory needed yet, just the buffer info structures */ - ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, - (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); + ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head, + (hw->aq.num_asq_entries * + sizeof(struct iavf_dma_mem))); if (ret_code) goto alloc_asq_bufs; - hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; + hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va; /* allocate the mapped buffers */ for (i = 0; i < hw->aq.num_asq_entries; i++) { bi = &hw->aq.asq.r.asq_bi[i]; - ret_code = i40e_allocate_dma_mem(hw, bi, + ret_code = iavf_allocate_dma_mem(hw, bi, i40e_mem_asq_buf, hw->aq.asq_buf_size, - I40E_ADMINQ_DESC_ALIGNMENT); + IAVF_ADMINQ_DESC_ALIGNMENT); if (ret_code) goto unwind_alloc_asq_bufs; } @@ -209,8 +199,8 @@ unwind_alloc_asq_bufs: /* don't try to free the one that failed... */ i--; for (; i >= 0; i--) - i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); - i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); + iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); return ret_code; } @@ -219,42 +209,42 @@ unwind_alloc_asq_bufs: * i40e_free_arq_bufs - Free receive queue buffer info elements * @hw: pointer to the hardware structure **/ -static void i40e_free_arq_bufs(struct i40e_hw *hw) +static void i40e_free_arq_bufs(struct iavf_hw *hw) { int i; /* free descriptors */ for (i = 0; i < hw->aq.num_arq_entries; i++) - i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); /* free the descriptor memory */ - i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); + iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf); /* free the dma header */ - i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); + iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); } /** * i40e_free_asq_bufs - Free send queue buffer info elements * @hw: pointer to the hardware structure **/ -static void i40e_free_asq_bufs(struct i40e_hw *hw) +static void i40e_free_asq_bufs(struct iavf_hw *hw) { int i; /* only unmap if the address is non-NULL */ for (i = 0; i < hw->aq.num_asq_entries; i++) if (hw->aq.asq.r.asq_bi[i].pa) - i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); /* free the buffer info list */ - i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); + iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf); /* free the descriptor memory */ - i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); /* free the dma header */ - i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); + iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); } /** @@ -263,9 +253,9 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw) * * Configure base address and length registers for the transmit queue **/ -static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) +static iavf_status i40e_config_asq_regs(struct iavf_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; u32 reg = 0; /* Clear Head and Tail */ @@ -274,7 +264,7 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) /* set starting point */ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | - I40E_VF_ATQLEN1_ATQENABLE_MASK)); + IAVF_VF_ATQLEN1_ATQENABLE_MASK)); wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); @@ -292,9 +282,9 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) * * Configure base address and length registers for the receive (event queue) **/ -static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) +static iavf_status i40e_config_arq_regs(struct iavf_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; u32 reg = 0; /* Clear Head and Tail */ @@ -303,7 +293,7 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) /* set starting point */ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | - I40E_VF_ARQLEN1_ARQENABLE_MASK)); + IAVF_VF_ARQLEN1_ARQENABLE_MASK)); wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); @@ -331,9 +321,9 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ -static i40e_status i40e_init_asq(struct i40e_hw *hw) +static iavf_status i40e_init_asq(struct iavf_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; if (hw->aq.asq.count > 0) { /* queue already initialized */ @@ -390,9 +380,9 @@ init_adminq_exit: * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ -static i40e_status i40e_init_arq(struct i40e_hw *hw) +static iavf_status i40e_init_arq(struct iavf_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; if (hw->aq.arq.count > 0) { /* queue already initialized */ @@ -442,9 +432,9 @@ init_adminq_exit: * * The main shutdown routine for the Admin Send Queue **/ -static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) +static iavf_status i40e_shutdown_asq(struct iavf_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; mutex_lock(&hw->aq.asq_mutex); @@ -476,9 +466,9 @@ shutdown_asq_out: * * The main shutdown routine for the Admin Receive Queue **/ -static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) +static iavf_status i40e_shutdown_arq(struct iavf_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; mutex_lock(&hw->aq.arq_mutex); @@ -505,7 +495,7 @@ shutdown_arq_out: } /** - * i40evf_init_adminq - main initialization routine for Admin Queue + * iavf_init_adminq - main initialization routine for Admin Queue * @hw: pointer to the hardware structure * * Prior to calling this function, drivers *MUST* set the following fields @@ -515,9 +505,9 @@ shutdown_arq_out: * - hw->aq.arq_buf_size * - hw->aq.asq_buf_size **/ -i40e_status i40evf_init_adminq(struct i40e_hw *hw) +iavf_status iavf_init_adminq(struct iavf_hw *hw) { - i40e_status ret_code; + iavf_status ret_code; /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || @@ -556,22 +546,19 @@ init_adminq_exit: } /** - * i40evf_shutdown_adminq - shutdown routine for the Admin Queue + * iavf_shutdown_adminq - shutdown routine for the Admin Queue * @hw: pointer to the hardware structure **/ -i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) +iavf_status iavf_shutdown_adminq(struct iavf_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; - if (i40evf_check_asq_alive(hw)) - i40evf_aq_queue_shutdown(hw, true); + if (iavf_check_asq_alive(hw)) + iavf_aq_queue_shutdown(hw, true); i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); - if (hw->nvm_buff.va) - i40e_free_virt_mem(hw, &hw->nvm_buff); - return ret_code; } @@ -581,18 +568,18 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) * * returns the number of free desc **/ -static u16 i40e_clean_asq(struct i40e_hw *hw) +static u16 i40e_clean_asq(struct iavf_hw *hw) { - struct i40e_adminq_ring *asq = &(hw->aq.asq); + struct iavf_adminq_ring *asq = &hw->aq.asq; struct i40e_asq_cmd_details *details; u16 ntc = asq->next_to_clean; struct i40e_aq_desc desc_cb; struct i40e_aq_desc *desc; - desc = I40E_ADMINQ_DESC(*asq, ntc); + desc = IAVF_ADMINQ_DESC(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc); while (rd32(hw, hw->aq.asq.head) != ntc) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); if (details->callback) { @@ -607,33 +594,32 @@ static u16 i40e_clean_asq(struct i40e_hw *hw) ntc++; if (ntc == asq->count) ntc = 0; - desc = I40E_ADMINQ_DESC(*asq, ntc); + desc = IAVF_ADMINQ_DESC(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc); } asq->next_to_clean = ntc; - return I40E_DESC_UNUSED(asq); + return IAVF_DESC_UNUSED(asq); } /** - * i40evf_asq_done - check if FW has processed the Admin Send Queue + * iavf_asq_done - check if FW has processed the Admin Send Queue * @hw: pointer to the hw struct * * Returns true if the firmware has processed all descriptors on the * admin send queue. Returns false if there are still requests pending. **/ -bool i40evf_asq_done(struct i40e_hw *hw) +bool iavf_asq_done(struct iavf_hw *hw) { /* AQ designers suggest use of head for better * timing reliability than DD bit */ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; - } /** - * i40evf_asq_send_command - send command to Admin Queue + * iavf_asq_send_command - send command to Admin Queue * @hw: pointer to the hw struct * @desc: prefilled descriptor describing the command (non DMA mem) * @buff: buffer to use for indirect commands @@ -643,24 +629,23 @@ bool i40evf_asq_done(struct i40e_hw *hw) * This is the main send command driver routine for the Admin Queue send * queue. It runs the queue, cleans the queue, etc **/ -i40e_status i40evf_asq_send_command(struct i40e_hw *hw, - struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct i40e_asq_cmd_details *cmd_details) +iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) { - i40e_status status = 0; - struct i40e_dma_mem *dma_buff = NULL; + struct iavf_dma_mem *dma_buff = NULL; struct i40e_asq_cmd_details *details; struct i40e_aq_desc *desc_on_ring; bool cmd_completed = false; + iavf_status status = 0; u16 retval = 0; u32 val = 0; mutex_lock(&hw->aq.asq_mutex); if (hw->aq.asq.count == 0) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: Admin queue not initialized.\n"); status = I40E_ERR_QUEUE_EMPTY; goto asq_send_command_error; @@ -670,7 +655,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, val = rd32(hw, hw->aq.asq.head); if (val >= hw->aq.num_asq_entries) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); status = I40E_ERR_QUEUE_EMPTY; goto asq_send_command_error; @@ -699,8 +684,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, desc->flags |= cpu_to_le16(details->flags_ena); if (buff_size > hw->aq.asq_buf_size) { - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, + IAVF_DEBUG_AQ_MESSAGE, "AQTX: Invalid buffer size: %d.\n", buff_size); status = I40E_ERR_INVALID_SIZE; @@ -708,8 +693,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, } if (details->postpone && !details->async) { - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, + IAVF_DEBUG_AQ_MESSAGE, "AQTX: Async flag not set along with postpone flag"); status = I40E_ERR_PARAM; goto asq_send_command_error; @@ -723,22 +708,22 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, * in case of asynchronous completions */ if (i40e_clean_asq(hw) == 0) { - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, + IAVF_DEBUG_AQ_MESSAGE, "AQTX: Error queue is full.\n"); status = I40E_ERR_ADMIN_QUEUE_FULL; goto asq_send_command_error; } /* initialize the temp desc pointer with the right desc */ - desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); + desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); /* if the desc is available copy the temp desc to the right place */ *desc_on_ring = *desc; /* if buff is not NULL assume indirect command */ - if (buff != NULL) { - dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); + if (buff) { + dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]; /* copy the user buff into the respective DMA buff */ memcpy(dma_buff->va, buff, buff_size); desc_on_ring->datalen = cpu_to_le16(buff_size); @@ -753,9 +738,9 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, } /* bump the tail */ - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); - i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, - buff, buff_size); + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); + iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring, + buff, buff_size); (hw->aq.asq.next_to_use)++; if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; @@ -772,7 +757,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, /* AQ designers suggest use of head for better * timing reliability than DD bit */ - if (i40evf_asq_done(hw)) + if (iavf_asq_done(hw)) break; udelay(50); total_delay += 50; @@ -780,14 +765,14 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, } /* if ready, copy the desc back to temp */ - if (i40evf_asq_done(hw)) { + if (iavf_asq_done(hw)) { *desc = *desc_on_ring; - if (buff != NULL) + if (buff) memcpy(buff, dma_buff->va, buff_size); retval = le16_to_cpu(desc->retval); if (retval != 0) { - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, + IAVF_DEBUG_AQ_MESSAGE, "AQTX: Command completed with error 0x%X.\n", retval); @@ -804,10 +789,9 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; } - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer writeback:\n"); - i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, - buff_size); + iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); /* save writeback aq if requested */ if (details->wb_desc) @@ -816,12 +800,12 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { - if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: AQ Critical error.\n"); status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; } else { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: Writeback timeout.\n"); status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; } @@ -833,14 +817,13 @@ asq_send_command_error: } /** - * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function + * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function * @desc: pointer to the temp descriptor (non DMA mem) * @opcode: the opcode can be used to decide which flags to turn off or on * * Fill the desc with default values **/ -void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, - u16 opcode) +void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode) { /* zero out the desc */ memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); @@ -849,7 +832,7 @@ void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, } /** - * i40evf_clean_arq_element + * iavf_clean_arq_element * @hw: pointer to the hw struct * @e: event info from the receive descriptor, includes any buffers * @pending: number of events that could be left to process @@ -858,14 +841,14 @@ void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, * the contents through e. It can also return how many events are * left to process through 'pending' **/ -i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, - struct i40e_arq_event_info *e, - u16 *pending) +iavf_status iavf_clean_arq_element(struct iavf_hw *hw, + struct i40e_arq_event_info *e, + u16 *pending) { - i40e_status ret_code = 0; u16 ntc = hw->aq.arq.next_to_clean; struct i40e_aq_desc *desc; - struct i40e_dma_mem *bi; + iavf_status ret_code = 0; + struct iavf_dma_mem *bi; u16 desc_idx; u16 datalen; u16 flags; @@ -878,14 +861,14 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, mutex_lock(&hw->aq.arq_mutex); if (hw->aq.arq.count == 0) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: Admin queue not initialized.\n"); ret_code = I40E_ERR_QUEUE_EMPTY; goto clean_arq_element_err; } /* set next_to_use to head */ - ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK; + ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK; if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; @@ -893,7 +876,7 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, } /* now clean the next descriptor */ - desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); + desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc); desc_idx = ntc; hw->aq.arq_last_status = @@ -901,8 +884,8 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, flags = le16_to_cpu(desc->flags); if (flags & I40E_AQ_FLAG_ERR) { ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, + IAVF_DEBUG_AQ_MESSAGE, "AQRX: Event received with error 0x%X.\n", hw->aq.arq_last_status); } @@ -910,13 +893,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, e->desc = *desc; datalen = le16_to_cpu(desc->datalen); e->msg_len = min(datalen, e->buf_len); - if (e->msg_buf != NULL && (e->msg_len != 0)) + if (e->msg_buf && (e->msg_len != 0)) memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, e->msg_len); - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); - i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, - hw->aq.arq_buf_size); + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); + iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, + hw->aq.arq_buf_size); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message @@ -943,7 +926,7 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, clean_arq_element_out: /* Set pending if needed, unlock and return */ - if (pending != NULL) + if (pending) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); clean_arq_element_err: @@ -951,17 +934,3 @@ clean_arq_element_err: return ret_code; } - -void i40evf_resume_aq(struct i40e_hw *hw) -{ - /* Registers are reset after PF reset */ - hw->aq.asq.next_to_use = 0; - hw->aq.asq.next_to_clean = 0; - - i40e_config_asq_regs(hw); - - hw->aq.arq.next_to_use = 0; - hw->aq.arq.next_to_clean = 0; - - i40e_config_arq_regs(hw); -} diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/iavf/i40e_adminq.h index 1f264b9b6805..ee983889eab0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.h @@ -1,26 +1,26 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#ifndef _I40E_ADMINQ_H_ -#define _I40E_ADMINQ_H_ +#ifndef _IAVF_ADMINQ_H_ +#define _IAVF_ADMINQ_H_ -#include "i40e_osdep.h" -#include "i40e_status.h" +#include "iavf_osdep.h" +#include "iavf_status.h" #include "i40e_adminq_cmd.h" -#define I40E_ADMINQ_DESC(R, i) \ +#define IAVF_ADMINQ_DESC(R, i) \ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i])) -#define I40E_ADMINQ_DESC_ALIGNMENT 4096 +#define IAVF_ADMINQ_DESC_ALIGNMENT 4096 -struct i40e_adminq_ring { - struct i40e_virt_mem dma_head; /* space for dma structures */ - struct i40e_dma_mem desc_buf; /* descriptor ring memory */ - struct i40e_virt_mem cmd_buf; /* command buffer memory */ +struct iavf_adminq_ring { + struct iavf_virt_mem dma_head; /* space for dma structures */ + struct iavf_dma_mem desc_buf; /* descriptor ring memory */ + struct iavf_virt_mem cmd_buf; /* command buffer memory */ union { - struct i40e_dma_mem *asq_bi; - struct i40e_dma_mem *arq_bi; + struct iavf_dma_mem *asq_bi; + struct iavf_dma_mem *arq_bi; } r; u16 count; /* Number of descriptors */ @@ -61,9 +61,9 @@ struct i40e_arq_event_info { }; /* Admin Queue information */ -struct i40e_adminq_info { - struct i40e_adminq_ring arq; /* receive queue */ - struct i40e_adminq_ring asq; /* send queue */ +struct iavf_adminq_info { + struct iavf_adminq_ring arq; /* receive queue */ + struct iavf_adminq_ring asq; /* send queue */ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/ u16 num_arq_entries; /* receive queue depth */ u16 num_asq_entries; /* send queue depth */ @@ -130,7 +130,6 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) #define I40E_AQ_LARGE_BUF 512 #define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */ -void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, - u16 opcode); +void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode); -#endif /* _I40E_ADMINQ_H_ */ +#endif /* _IAVF_ADMINQ_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h new file mode 100644 index 000000000000..af4f94a6541e --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h @@ -0,0 +1,530 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_ADMINQ_CMD_H_ +#define _I40E_ADMINQ_CMD_H_ + +/* This header file defines the i40e Admin Queue commands and is shared between + * i40e Firmware and Software. Do not change the names in this file to IAVF + * because this file should be diff-able against the i40e version, even + * though many parts have been removed in this VF version. + * + * This file needs to comply with the Linux Kernel coding style. + */ + +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR_X722 0x0005 +#define I40E_FW_API_VERSION_MINOR_X710 0x0007 + +#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ + I40E_FW_API_VERSION_MINOR_X710 : \ + I40E_FW_API_VERSION_MINOR_X722) + +/* API version 1.7 implements additional link and PHY-specific APIs */ +#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 + +struct i40e_aq_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + struct { + __le32 param0; + __le32 param1; + __le32 param2; + __le32 param3; + } internal; + struct { + __le32 param0; + __le32 param1; + __le32 addr_high; + __le32 addr_low; + } external; + u8 raw[16]; + } params; +}; + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ + +/* command flags and offsets*/ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ + +/* error codes */ +enum i40e_admin_queue_err { + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ +}; + +/* Admin Queue command opcodes */ +enum i40e_admin_queue_opc { + /* aq commands */ + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, + + /* resource ownership */ + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, + + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, + + /* Proxy commands */ + i40e_aqc_opc_set_proxy_config = 0x0104, + i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105, + + /* LAA */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, + + /* PXE */ + i40e_aqc_opc_clear_pxe_mode = 0x0110, + + /* WoL commands */ + i40e_aqc_opc_set_wol_filter = 0x0120, + i40e_aqc_opc_get_wake_reason = 0x0121, + + /* internal switch commands */ + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + i40e_aqc_opc_set_switch_config = 0x0205, + i40e_aqc_opc_rx_ctl_reg_read = 0x0206, + i40e_aqc_opc_rx_ctl_reg_write = 0x0207, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + i40e_aqc_opc_clear_wol_switch_filters = 0x025E, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, + + /* Dynamic Device Personalization */ + i40e_aqc_opc_write_personalization_profile = 0x0270, + i40e_aqc_opc_get_personalization_profile_list = 0x0271, + + /* DCB commands */ + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, + i40e_aqc_opc_set_dcb_parameters = 0x0303, + + /* TX scheduler */ + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + + /* phy commands*/ + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_run_phy_activity = 0x0626, + i40e_aqc_opc_set_phy_register = 0x0628, + i40e_aqc_opc_get_phy_register = 0x0629, + + /* NVM commands */ + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_oem_post_update = 0x0720, + i40e_aqc_opc_thermal_sensor = 0x0721, + + /* virtualization commands */ + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, + + /* alternate structure */ + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, + + /* LLDP commands */ + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, + + /* Tunnel commands */ + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_set_rss_key = 0x0B02, + i40e_aqc_opc_set_rss_lut = 0x0B03, + i40e_aqc_opc_get_rss_key = 0x0B04, + i40e_aqc_opc_get_rss_lut = 0x0B05, + + /* Async Events */ + i40e_aqc_opc_event_lan_overflow = 0x1001, + + /* OEM commands */ + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, + i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, + i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, + + /* debug commands */ + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, +}; + +/* command structures and indirect data structures */ + +/* Structure naming conventions: + * - no suffix for direct command descriptor structures + * - _data for indirect sent data + * - _resp for indirect return data (data which is both will use _data) + * - _completion for direct return data + * - _element_ for repeated elements (may also be _data or _resp) + * + * Command structures are expected to overlay the params.raw member of the basic + * descriptor, and as such cannot exceed 16 bytes in length. + */ + +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ + { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } + +/* This macro is used extensively to ensure that command structures are 16 + * bytes in length as they have to map to the raw array of that size. + */ +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) + +/* Queue Shutdown (direct 0x0003) */ +struct i40e_aqc_queue_shutdown { + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); + +struct i40e_aqc_vsi_properties_data { + /* first 96 byte are written by SW */ + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + /* switch section */ + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; + /* security section */ + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; + /* VLAN section */ + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; + /* ingress egress up sections */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ + /* cascaded PV section */ + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; + /* queue mapping section */ + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + /* queueing option section */ + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 +#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 + u8 queueing_opt_reserved[3]; + /* scheduler section */ + u8 up_enable_bits; + u8 sched_reserved; + /* outer up section */ + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + u8 cmd_reserved[8]; + /* last 32 bytes are written by FW */ + __le16 qs_handle[8]; +#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); + +/* Get VEB Parameters (direct 0x0232) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_veb_parameters_completion { + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); + +#define I40E_LINK_SPEED_100MB_SHIFT 0x1 +#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 +#define I40E_LINK_SPEED_10GB_SHIFT 0x3 +#define I40E_LINK_SPEED_40GB_SHIFT 0x4 +#define I40E_LINK_SPEED_20GB_SHIFT 0x5 +#define I40E_LINK_SPEED_25GB_SHIFT 0x6 + +enum i40e_aq_link_speed { + I40E_LINK_SPEED_UNKNOWN = 0, + I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT), + I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT), +}; + +/* Send to PF command (indirect 0x0801) id is only used by PF + * Send to VF command (indirect 0x0802) id is only used by PF + * Send to Peer PF command (indirect 0x0803) + */ +struct i40e_aqc_pf_vf_message { + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); + +struct i40e_aqc_get_set_rss_key { +#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) +#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) + __le16 vsi_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); + +struct i40e_aqc_get_set_rss_key_data { + u8 standard_rss_key[0x28]; + u8 extended_hash_key[0xc]; +}; + +I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); + +struct i40e_aqc_get_set_rss_lut { +#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) +#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) + __le16 vsi_id; +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ + BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) + +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 + __le16 flags; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); +#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h new file mode 100644 index 000000000000..272d76b733aa --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -0,0 +1,418 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _IAVF_H_ +#define _IAVF_H_ + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/aer.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> +#include <linux/interrupt.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/sctp.h> +#include <linux/ipv6.h> +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/timer.h> +#include <linux/workqueue.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/gfp.h> +#include <linux/skbuff.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/socket.h> +#include <linux/jiffies.h> +#include <net/ip6_checksum.h> +#include <net/pkt_cls.h> +#include <net/udp.h> +#include <net/tc_act/tc_gact.h> +#include <net/tc_act/tc_mirred.h> + +#include "iavf_type.h" +#include <linux/avf/virtchnl.h> +#include "iavf_txrx.h" + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 +#define PFX "iavf: " + +/* VSI state flags shared with common code */ +enum iavf_vsi_state_t { + __IAVF_VSI_DOWN, + /* This must be last as it determines the size of the BITMAP */ + __IAVF_VSI_STATE_SIZE__, +}; + +/* dummy struct to make common code less painful */ +struct iavf_vsi { + struct iavf_adapter *back; + struct net_device *netdev; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 seid; + u16 id; + DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__); + int base_vector; + u16 work_limit; + u16 qs_handle; + void *priv; /* client driver data reference. */ +}; + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IAVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define IAVF_DEFAULT_TXD 512 +#define IAVF_DEFAULT_RXD 512 +#define IAVF_MAX_TXD 4096 +#define IAVF_MIN_TXD 64 +#define IAVF_MAX_RXD 4096 +#define IAVF_MIN_RXD 64 +#define IAVF_REQ_DESCRIPTOR_MULTIPLE 32 +#define IAVF_MAX_AQ_BUF_SIZE 4096 +#define IAVF_AQ_LEN 32 +#define IAVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + +#define IAVF_RX_DESC(R, i) (&(((union iavf_32byte_rx_desc *)((R)->desc))[i])) +#define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i])) +#define IAVF_TX_CTXTDESC(R, i) \ + (&(((struct iavf_tx_context_desc *)((R)->desc))[i])) +#define IAVF_MAX_REQ_QUEUES 4 + +#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4) +#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4) +#define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct iavf_q_vector { + struct iavf_adapter *adapter; + struct iavf_vsi *vsi; + struct napi_struct napi; + struct iavf_ring_container rx; + struct iavf_ring_container tx; + u32 ring_mask; + u8 itr_countdown; /* when 0 should adjust adaptive ITR */ + u8 num_ringpairs; /* total number of ring pairs in vector */ + u16 v_idx; /* index in the vsi->q_vector array. */ + u16 reg_idx; /* register index of the interrupt */ + char name[IFNAMSIZ + 15]; + bool arm_wb_state; + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; +}; + +/* Helper macros to switch between ints/sec and what the register uses. + * And yes, it's the same math going both ways. The lowest value + * supported by all of the i40e hardware is 8. + */ +#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ + ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) +#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG + +#define IAVF_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +#define OTHER_VECTOR 1 +#define NONQ_VECS (OTHER_VECTOR) + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS) + +#define IAVF_QUEUE_END_OF_LIST 0x7FF +#define IAVF_FREE_VECTOR 0x7FFF +struct iavf_mac_filter { + struct list_head list; + u8 macaddr[ETH_ALEN]; + bool remove; /* filter needs to be removed */ + bool add; /* filter needs to be added */ +}; + +struct iavf_vlan_filter { + struct list_head list; + u16 vlan; + bool remove; /* filter needs to be removed */ + bool add; /* filter needs to be added */ +}; + +#define IAVF_MAX_TRAFFIC_CLASS 4 +/* State of traffic class creation */ +enum iavf_tc_state_t { + __IAVF_TC_INVALID, /* no traffic class, default state */ + __IAVF_TC_RUNNING, /* traffic classes have been created */ +}; + +/* channel info */ +struct iavf_channel_config { + struct virtchnl_channel_info ch_info[IAVF_MAX_TRAFFIC_CLASS]; + enum iavf_tc_state_t state; + u8 total_qps; +}; + +/* State of cloud filter */ +enum iavf_cloud_filter_state_t { + __IAVF_CF_INVALID, /* cloud filter not added */ + __IAVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */ + __IAVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */ + __IAVF_CF_ACTIVE, /* cloud filter is active */ +}; + +/* Driver state. The order of these is important! */ +enum iavf_state_t { + __IAVF_STARTUP, /* driver loaded, probe complete */ + __IAVF_REMOVE, /* driver is being unloaded */ + __IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ + __IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ + __IAVF_INIT_SW, /* got resources, setting up structs */ + __IAVF_RESETTING, /* in reset */ + /* Below here, watchdog is running */ + __IAVF_DOWN, /* ready, can be opened */ + __IAVF_DOWN_PENDING, /* descending, waiting for watchdog */ + __IAVF_TESTING, /* in ethtool self-test */ + __IAVF_RUNNING, /* opened, working */ +}; + +enum iavf_critical_section_t { + __IAVF_IN_CRITICAL_TASK, /* cannot be interrupted */ + __IAVF_IN_CLIENT_TASK, + __IAVF_IN_REMOVE_TASK, /* device being removed */ +}; + +#define IAVF_CLOUD_FIELD_OMAC 0x01 +#define IAVF_CLOUD_FIELD_IMAC 0x02 +#define IAVF_CLOUD_FIELD_IVLAN 0x04 +#define IAVF_CLOUD_FIELD_TEN_ID 0x08 +#define IAVF_CLOUD_FIELD_IIP 0x10 + +#define IAVF_CF_FLAGS_OMAC IAVF_CLOUD_FIELD_OMAC +#define IAVF_CF_FLAGS_IMAC IAVF_CLOUD_FIELD_IMAC +#define IAVF_CF_FLAGS_IMAC_IVLAN (IAVF_CLOUD_FIELD_IMAC |\ + IAVF_CLOUD_FIELD_IVLAN) +#define IAVF_CF_FLAGS_IMAC_TEN_ID (IAVF_CLOUD_FIELD_IMAC |\ + IAVF_CLOUD_FIELD_TEN_ID) +#define IAVF_CF_FLAGS_OMAC_TEN_ID_IMAC (IAVF_CLOUD_FIELD_OMAC |\ + IAVF_CLOUD_FIELD_IMAC |\ + IAVF_CLOUD_FIELD_TEN_ID) +#define IAVF_CF_FLAGS_IMAC_IVLAN_TEN_ID (IAVF_CLOUD_FIELD_IMAC |\ + IAVF_CLOUD_FIELD_IVLAN |\ + IAVF_CLOUD_FIELD_TEN_ID) +#define IAVF_CF_FLAGS_IIP IAVF_CLOUD_FIELD_IIP + +/* bookkeeping of cloud filters */ +struct iavf_cloud_filter { + enum iavf_cloud_filter_state_t state; + struct list_head list; + struct virtchnl_filter f; + unsigned long cookie; + bool del; /* filter needs to be deleted */ + bool add; /* filter needs to be added */ +}; + +/* board specific private data structure */ +struct iavf_adapter { + struct timer_list watchdog_timer; + struct work_struct reset_task; + struct work_struct adminq_task; + struct delayed_work client_task; + struct delayed_work init_task; + wait_queue_head_t down_waitqueue; + struct iavf_q_vector *q_vectors; + struct list_head vlan_filter_list; + struct list_head mac_filter_list; + /* Lock to protect accesses to MAC and VLAN lists */ + spinlock_t mac_vlan_list_lock; + char misc_vector_name[IFNAMSIZ + 9]; + int num_active_queues; + int num_req_queues; + + /* TX */ + struct iavf_ring *tx_rings; + u32 tx_timeout_count; + u32 tx_desc_count; + + /* RX */ + struct iavf_ring *rx_rings; + u64 hw_csum_rx_error; + u32 rx_desc_count; + int num_msix_vectors; + int num_iwarp_msix; + int iwarp_base_vector; + u32 client_pending; + struct i40e_client_instance *cinst; + struct msix_entry *msix_entries; + + u32 flags; +#define IAVF_FLAG_RX_CSUM_ENABLED BIT(0) +#define IAVF_FLAG_PF_COMMS_FAILED BIT(3) +#define IAVF_FLAG_RESET_PENDING BIT(4) +#define IAVF_FLAG_RESET_NEEDED BIT(5) +#define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) +#define IAVF_FLAG_ADDR_SET_BY_PF BIT(8) +#define IAVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9) +#define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10) +#define IAVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11) +#define IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12) +#define IAVF_FLAG_PROMISC_ON BIT(13) +#define IAVF_FLAG_ALLMULTI_ON BIT(14) +#define IAVF_FLAG_LEGACY_RX BIT(15) +#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16) +#define IAVF_FLAG_QUEUES_DISABLED BIT(17) +/* duplicates for common code */ +#define IAVF_FLAG_DCB_ENABLED 0 + /* flags for admin queue service task */ + u32 aq_required; +#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT(0) +#define IAVF_FLAG_AQ_DISABLE_QUEUES BIT(1) +#define IAVF_FLAG_AQ_ADD_MAC_FILTER BIT(2) +#define IAVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3) +#define IAVF_FLAG_AQ_DEL_MAC_FILTER BIT(4) +#define IAVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5) +#define IAVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) +#define IAVF_FLAG_AQ_MAP_VECTORS BIT(7) +#define IAVF_FLAG_AQ_HANDLE_RESET BIT(8) +#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ +#define IAVF_FLAG_AQ_GET_CONFIG BIT(10) +/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ +#define IAVF_FLAG_AQ_GET_HENA BIT(11) +#define IAVF_FLAG_AQ_SET_HENA BIT(12) +#define IAVF_FLAG_AQ_SET_RSS_KEY BIT(13) +#define IAVF_FLAG_AQ_SET_RSS_LUT BIT(14) +#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT(15) +#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT(16) +#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17) +#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) +#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19) +#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20) +#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT(21) +#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22) +#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23) +#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24) + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + struct iavf_hw hw; /* defined in iavf_type.h */ + + enum iavf_state_t state; + unsigned long crit_section; + + struct work_struct watchdog_task; + bool netdev_registered; + bool link_up; + enum virtchnl_link_speed link_speed; + enum virtchnl_ops current_op; +#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ + (_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_IWARP : \ + 0) +#define CLIENT_ENABLED(_a) ((_a)->cinst) +/* RSS by the PF should be preferred over RSS via other methods. */ +#define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_RSS_PF) +#define RSS_AQ(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_RSS_AQ) +#define RSS_REG(_a) (!((_a)->vf_res->vf_cap_flags & \ + (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF))) +#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_VLAN) + struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ + struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ + struct virtchnl_version_info pf_version; +#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ + ((_a)->pf_version.minor == 1)) + u16 msg_enable; + struct iavf_eth_stats current_stats; + struct iavf_vsi vsi; + u32 aq_wait_count; + /* RSS stuff */ + u64 hena; + u16 rss_key_size; + u16 rss_lut_size; + u8 *rss_key; + u8 *rss_lut; + /* ADQ related members */ + struct iavf_channel_config ch_config; + u8 num_tc; + struct list_head cloud_filter_list; + /* lock to protect access to the cloud filter list */ + spinlock_t cloud_filter_list_lock; + u16 num_cloud_filters; +}; + + +/* Ethtool Private Flags */ + +/* lan device, used by client interface */ +struct i40e_device { + struct list_head list; + struct iavf_adapter *vf; +}; + +/* needed by iavf_ethtool.c */ +extern char iavf_driver_name[]; +extern const char iavf_driver_version[]; + +int iavf_up(struct iavf_adapter *adapter); +void iavf_down(struct iavf_adapter *adapter); +int iavf_process_config(struct iavf_adapter *adapter); +void iavf_schedule_reset(struct iavf_adapter *adapter); +void iavf_reset(struct iavf_adapter *adapter); +void iavf_set_ethtool_ops(struct net_device *netdev); +void iavf_update_stats(struct iavf_adapter *adapter); +void iavf_reset_interrupt_capability(struct iavf_adapter *adapter); +int iavf_init_interrupt_scheme(struct iavf_adapter *adapter); +void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask); +void iavf_free_all_tx_resources(struct iavf_adapter *adapter); +void iavf_free_all_rx_resources(struct iavf_adapter *adapter); + +void iavf_napi_add_all(struct iavf_adapter *adapter); +void iavf_napi_del_all(struct iavf_adapter *adapter); + +int iavf_send_api_ver(struct iavf_adapter *adapter); +int iavf_verify_api_ver(struct iavf_adapter *adapter); +int iavf_send_vf_config_msg(struct iavf_adapter *adapter); +int iavf_get_vf_config(struct iavf_adapter *adapter); +void iavf_irq_enable(struct iavf_adapter *adapter, bool flush); +void iavf_configure_queues(struct iavf_adapter *adapter); +void iavf_deconfigure_queues(struct iavf_adapter *adapter); +void iavf_enable_queues(struct iavf_adapter *adapter); +void iavf_disable_queues(struct iavf_adapter *adapter); +void iavf_map_queues(struct iavf_adapter *adapter); +int iavf_request_queues(struct iavf_adapter *adapter, int num); +void iavf_add_ether_addrs(struct iavf_adapter *adapter); +void iavf_del_ether_addrs(struct iavf_adapter *adapter); +void iavf_add_vlans(struct iavf_adapter *adapter); +void iavf_del_vlans(struct iavf_adapter *adapter); +void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags); +void iavf_request_stats(struct iavf_adapter *adapter); +void iavf_request_reset(struct iavf_adapter *adapter); +void iavf_get_hena(struct iavf_adapter *adapter); +void iavf_set_hena(struct iavf_adapter *adapter); +void iavf_set_rss_key(struct iavf_adapter *adapter); +void iavf_set_rss_lut(struct iavf_adapter *adapter); +void iavf_enable_vlan_stripping(struct iavf_adapter *adapter); +void iavf_disable_vlan_stripping(struct iavf_adapter *adapter); +void iavf_virtchnl_completion(struct iavf_adapter *adapter, + enum virtchnl_ops v_opcode, + iavf_status v_retval, u8 *msg, u16 msglen); +int iavf_config_rss(struct iavf_adapter *adapter); +int iavf_lan_add_device(struct iavf_adapter *adapter); +int iavf_lan_del_device(struct iavf_adapter *adapter); +void iavf_client_subtask(struct iavf_adapter *adapter); +void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len); +void iavf_notify_client_l2_params(struct iavf_vsi *vsi); +void iavf_notify_client_open(struct iavf_vsi *vsi); +void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset); +void iavf_enable_channels(struct iavf_adapter *adapter); +void iavf_disable_channels(struct iavf_adapter *adapter); +void iavf_add_cloud_filter(struct iavf_adapter *adapter); +void iavf_del_cloud_filter(struct iavf_adapter *adapter); +#endif /* _IAVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_alloc.h b/drivers/net/ethernet/intel/iavf/iavf_alloc.h new file mode 100644 index 000000000000..bf2753146f30 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_alloc.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _IAVF_ALLOC_H_ +#define _IAVF_ALLOC_H_ + +struct iavf_hw; + +/* Memory allocation types */ +enum iavf_memory_type { + iavf_mem_arq_buf = 0, /* ARQ indirect command buffer */ + iavf_mem_asq_buf = 1, + iavf_mem_atq_buf = 2, /* ATQ indirect command buffer */ + iavf_mem_arq_ring = 3, /* ARQ descriptor ring */ + iavf_mem_atq_ring = 4, /* ATQ descriptor ring */ + iavf_mem_pd = 5, /* Page Descriptor */ + iavf_mem_bp = 6, /* Backing Page - 4KB */ + iavf_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ + iavf_mem_reserved +}; + +/* prototype for functions used for dynamic memory allocation */ +iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem, + enum iavf_memory_type type, + u64 size, u32 alignment); +iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem); +iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw, + struct iavf_virt_mem *mem, u32 size); +iavf_status iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem); + +#endif /* _IAVF_ALLOC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c index 3cc9d60d0d72..aea45364fd1c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c +++ b/drivers/net/ethernet/intel/iavf/iavf_client.c @@ -4,36 +4,36 @@ #include <linux/list.h> #include <linux/errno.h> -#include "i40evf.h" -#include "i40e_prototype.h" -#include "i40evf_client.h" +#include "iavf.h" +#include "iavf_prototype.h" +#include "iavf_client.h" static -const char i40evf_client_interface_version_str[] = I40EVF_CLIENT_VERSION_STR; +const char iavf_client_interface_version_str[] = IAVF_CLIENT_VERSION_STR; static struct i40e_client *vf_registered_client; -static LIST_HEAD(i40evf_devices); -static DEFINE_MUTEX(i40evf_device_mutex); +static LIST_HEAD(i40e_devices); +static DEFINE_MUTEX(iavf_device_mutex); -static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, - struct i40e_client *client, - u8 *msg, u16 len); +static u32 iavf_client_virtchnl_send(struct i40e_info *ldev, + struct i40e_client *client, + u8 *msg, u16 len); -static int i40evf_client_setup_qvlist(struct i40e_info *ldev, - struct i40e_client *client, - struct i40e_qvlist_info *qvlist_info); +static int iavf_client_setup_qvlist(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_qvlist_info *qvlist_info); -static struct i40e_ops i40evf_lan_ops = { - .virtchnl_send = i40evf_client_virtchnl_send, - .setup_qvlist = i40evf_client_setup_qvlist, +static struct i40e_ops iavf_lan_ops = { + .virtchnl_send = iavf_client_virtchnl_send, + .setup_qvlist = iavf_client_setup_qvlist, }; /** - * i40evf_client_get_params - retrieve relevant client parameters + * iavf_client_get_params - retrieve relevant client parameters * @vsi: VSI with parameters * @params: client param struct **/ static -void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params) +void iavf_client_get_params(struct iavf_vsi *vsi, struct i40e_params *params) { int i; @@ -41,21 +41,21 @@ void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params) params->mtu = vsi->netdev->mtu; params->link_up = vsi->back->link_up; - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + for (i = 0; i < IAVF_MAX_USER_PRIORITY; i++) { params->qos.prio_qos[i].tc = 0; params->qos.prio_qos[i].qs_handle = vsi->qs_handle; } } /** - * i40evf_notify_client_message - call the client message receive callback + * iavf_notify_client_message - call the client message receive callback * @vsi: the VSI associated with this client * @msg: message buffer * @len: length of message * * If there is a client to this VSI, call the client **/ -void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len) +void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len) { struct i40e_client_instance *cinst; @@ -74,12 +74,12 @@ void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len) } /** - * i40evf_notify_client_l2_params - call the client notify callback + * iavf_notify_client_l2_params - call the client notify callback * @vsi: the VSI with l2 param changes * * If there is a client to this VSI, call the client **/ -void i40evf_notify_client_l2_params(struct i40e_vsi *vsi) +void iavf_notify_client_l2_params(struct iavf_vsi *vsi) { struct i40e_client_instance *cinst; struct i40e_params params; @@ -95,21 +95,21 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi) "Cannot locate client instance l2_param_change function\n"); return; } - i40evf_client_get_params(vsi, ¶ms); + iavf_client_get_params(vsi, ¶ms); cinst->lan_info.params = params; cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client, ¶ms); } /** - * i40evf_notify_client_open - call the client open callback + * iavf_notify_client_open - call the client open callback * @vsi: the VSI with netdev opened * * If there is a client to this netdev, call the client with open **/ -void i40evf_notify_client_open(struct i40e_vsi *vsi) +void iavf_notify_client_open(struct iavf_vsi *vsi) { - struct i40evf_adapter *adapter = vsi->back; + struct iavf_adapter *adapter = vsi->back; struct i40e_client_instance *cinst = adapter->cinst; int ret; @@ -127,22 +127,22 @@ void i40evf_notify_client_open(struct i40e_vsi *vsi) } /** - * i40evf_client_release_qvlist - send a message to the PF to release iwarp qv map + * iavf_client_release_qvlist - send a message to the PF to release iwarp qv map * @ldev: pointer to L2 context. * * Return 0 on success or < 0 on error **/ -static int i40evf_client_release_qvlist(struct i40e_info *ldev) +static int iavf_client_release_qvlist(struct i40e_info *ldev) { - struct i40evf_adapter *adapter = ldev->vf; - i40e_status err; + struct iavf_adapter *adapter = ldev->vf; + iavf_status err; if (adapter->aq_required) return -EAGAIN; - err = i40e_aq_send_msg_to_pf(&adapter->hw, - VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, - I40E_SUCCESS, NULL, 0, NULL); + err = iavf_aq_send_msg_to_pf(&adapter->hw, + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, + I40E_SUCCESS, NULL, 0, NULL); if (err) dev_err(&adapter->pdev->dev, @@ -153,15 +153,15 @@ static int i40evf_client_release_qvlist(struct i40e_info *ldev) } /** - * i40evf_notify_client_close - call the client close callback + * iavf_notify_client_close - call the client close callback * @vsi: the VSI with netdev closed * @reset: true when close called due to reset pending * * If there is a client to this netdev, call the client with close **/ -void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset) +void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset) { - struct i40evf_adapter *adapter = vsi->back; + struct iavf_adapter *adapter = vsi->back; struct i40e_client_instance *cinst = adapter->cinst; if (!cinst || !cinst->client || !cinst->client->ops || @@ -171,21 +171,21 @@ void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset) return; } cinst->client->ops->close(&cinst->lan_info, cinst->client, reset); - i40evf_client_release_qvlist(&cinst->lan_info); + iavf_client_release_qvlist(&cinst->lan_info); clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); } /** - * i40evf_client_add_instance - add a client instance to the instance list + * iavf_client_add_instance - add a client instance to the instance list * @adapter: pointer to the board struct * * Returns cinst ptr on success, NULL on failure **/ static struct i40e_client_instance * -i40evf_client_add_instance(struct i40evf_adapter *adapter) +iavf_client_add_instance(struct iavf_adapter *adapter) { struct i40e_client_instance *cinst = NULL; - struct i40e_vsi *vsi = &adapter->vsi; + struct iavf_vsi *vsi = &adapter->vsi; struct netdev_hw_addr *mac = NULL; struct i40e_params params; @@ -207,11 +207,11 @@ i40evf_client_add_instance(struct i40evf_adapter *adapter) cinst->lan_info.fid = 0; cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF; cinst->lan_info.hw_addr = adapter->hw.hw_addr; - cinst->lan_info.ops = &i40evf_lan_ops; - cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR; - cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR; - cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD; - i40evf_client_get_params(vsi, ¶ms); + cinst->lan_info.ops = &iavf_lan_ops; + cinst->lan_info.version.major = IAVF_CLIENT_VERSION_MAJOR; + cinst->lan_info.version.minor = IAVF_CLIENT_VERSION_MINOR; + cinst->lan_info.version.build = IAVF_CLIENT_VERSION_BUILD; + iavf_client_get_params(vsi, ¶ms); cinst->lan_info.params = params; set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state); @@ -233,28 +233,28 @@ out: } /** - * i40evf_client_del_instance - removes a client instance from the list + * iavf_client_del_instance - removes a client instance from the list * @adapter: pointer to the board struct * **/ static -void i40evf_client_del_instance(struct i40evf_adapter *adapter) +void iavf_client_del_instance(struct iavf_adapter *adapter) { kfree(adapter->cinst); adapter->cinst = NULL; } /** - * i40evf_client_subtask - client maintenance work + * iavf_client_subtask - client maintenance work * @adapter: board private structure **/ -void i40evf_client_subtask(struct i40evf_adapter *adapter) +void iavf_client_subtask(struct iavf_adapter *adapter) { struct i40e_client *client = vf_registered_client; struct i40e_client_instance *cinst; int ret = 0; - if (adapter->state < __I40EVF_DOWN) + if (adapter->state < __IAVF_DOWN) return; /* first check client is registered */ @@ -262,7 +262,7 @@ void i40evf_client_subtask(struct i40evf_adapter *adapter) return; /* Add the client instance to the instance list */ - cinst = i40evf_client_add_instance(adapter); + cinst = iavf_client_add_instance(adapter); if (!cinst) return; @@ -279,23 +279,23 @@ void i40evf_client_subtask(struct i40evf_adapter *adapter) &cinst->state); else /* remove client instance */ - i40evf_client_del_instance(adapter); + iavf_client_del_instance(adapter); } } /** - * i40evf_lan_add_device - add a lan device struct to the list of lan devices + * iavf_lan_add_device - add a lan device struct to the list of lan devices * @adapter: pointer to the board struct * * Returns 0 on success or none 0 on error **/ -int i40evf_lan_add_device(struct i40evf_adapter *adapter) +int iavf_lan_add_device(struct iavf_adapter *adapter) { struct i40e_device *ldev; int ret = 0; - mutex_lock(&i40evf_device_mutex); - list_for_each_entry(ldev, &i40evf_devices, list) { + mutex_lock(&iavf_device_mutex); + list_for_each_entry(ldev, &i40e_devices, list) { if (ldev->vf == adapter) { ret = -EEXIST; goto out; @@ -308,7 +308,7 @@ int i40evf_lan_add_device(struct i40evf_adapter *adapter) } ldev->vf = adapter; INIT_LIST_HEAD(&ldev->list); - list_add(&ldev->list, &i40evf_devices); + list_add(&ldev->list, &i40e_devices); dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", adapter->hw.bus.bus_id, adapter->hw.bus.device, adapter->hw.bus.func); @@ -316,26 +316,26 @@ int i40evf_lan_add_device(struct i40evf_adapter *adapter) /* Since in some cases register may have happened before a device gets * added, we can schedule a subtask to go initiate the clients. */ - adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; out: - mutex_unlock(&i40evf_device_mutex); + mutex_unlock(&iavf_device_mutex); return ret; } /** - * i40evf_lan_del_device - removes a lan device from the device list + * iavf_lan_del_device - removes a lan device from the device list * @adapter: pointer to the board struct * * Returns 0 on success or non-0 on error **/ -int i40evf_lan_del_device(struct i40evf_adapter *adapter) +int iavf_lan_del_device(struct iavf_adapter *adapter) { struct i40e_device *ldev, *tmp; int ret = -ENODEV; - mutex_lock(&i40evf_device_mutex); - list_for_each_entry_safe(ldev, tmp, &i40evf_devices, list) { + mutex_lock(&iavf_device_mutex); + list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) { if (ldev->vf == adapter) { dev_info(&adapter->pdev->dev, "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", @@ -348,23 +348,23 @@ int i40evf_lan_del_device(struct i40evf_adapter *adapter) } } - mutex_unlock(&i40evf_device_mutex); + mutex_unlock(&iavf_device_mutex); return ret; } /** - * i40evf_client_release - release client specific resources + * iavf_client_release - release client specific resources * @client: pointer to the registered client * **/ -static void i40evf_client_release(struct i40e_client *client) +static void iavf_client_release(struct i40e_client *client) { struct i40e_client_instance *cinst; struct i40e_device *ldev; - struct i40evf_adapter *adapter; + struct iavf_adapter *adapter; - mutex_lock(&i40evf_device_mutex); - list_for_each_entry(ldev, &i40evf_devices, list) { + mutex_lock(&iavf_device_mutex); + list_for_each_entry(ldev, &i40e_devices, list) { adapter = ldev->vf; cinst = adapter->cinst; if (!cinst) @@ -373,41 +373,41 @@ static void i40evf_client_release(struct i40e_client *client) if (client->ops && client->ops->close) client->ops->close(&cinst->lan_info, client, false); - i40evf_client_release_qvlist(&cinst->lan_info); + iavf_client_release_qvlist(&cinst->lan_info); clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); dev_warn(&adapter->pdev->dev, "Client %s instance closed\n", client->name); } /* delete the client instance */ - i40evf_client_del_instance(adapter); + iavf_client_del_instance(adapter); dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n", client->name); } - mutex_unlock(&i40evf_device_mutex); + mutex_unlock(&iavf_device_mutex); } /** - * i40evf_client_prepare - prepare client specific resources + * iavf_client_prepare - prepare client specific resources * @client: pointer to the registered client * **/ -static void i40evf_client_prepare(struct i40e_client *client) +static void iavf_client_prepare(struct i40e_client *client) { struct i40e_device *ldev; - struct i40evf_adapter *adapter; + struct iavf_adapter *adapter; - mutex_lock(&i40evf_device_mutex); - list_for_each_entry(ldev, &i40evf_devices, list) { + mutex_lock(&iavf_device_mutex); + list_for_each_entry(ldev, &i40e_devices, list) { adapter = ldev->vf; /* Signal the watchdog to service the client */ - adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; } - mutex_unlock(&i40evf_device_mutex); + mutex_unlock(&iavf_device_mutex); } /** - * i40evf_client_virtchnl_send - send a message to the PF instance + * iavf_client_virtchnl_send - send a message to the PF instance * @ldev: pointer to L2 context. * @client: Client pointer. * @msg: pointer to message buffer @@ -415,17 +415,17 @@ static void i40evf_client_prepare(struct i40e_client *client) * * Return 0 on success or < 0 on error **/ -static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, - struct i40e_client *client, - u8 *msg, u16 len) +static u32 iavf_client_virtchnl_send(struct i40e_info *ldev, + struct i40e_client *client, + u8 *msg, u16 len) { - struct i40evf_adapter *adapter = ldev->vf; - i40e_status err; + struct iavf_adapter *adapter = ldev->vf; + iavf_status err; if (adapter->aq_required) return -EAGAIN; - err = i40e_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP, + err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP, I40E_SUCCESS, msg, len, NULL); if (err) dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n", @@ -435,21 +435,21 @@ static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, } /** - * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map + * iavf_client_setup_qvlist - send a message to the PF to setup iwarp qv map * @ldev: pointer to L2 context. * @client: Client pointer. * @qvlist_info: queue and vector list * * Return 0 on success or < 0 on error **/ -static int i40evf_client_setup_qvlist(struct i40e_info *ldev, - struct i40e_client *client, - struct i40e_qvlist_info *qvlist_info) +static int iavf_client_setup_qvlist(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_qvlist_info *qvlist_info) { struct virtchnl_iwarp_qvlist_info *v_qvlist_info; - struct i40evf_adapter *adapter = ldev->vf; + struct iavf_adapter *adapter = ldev->vf; struct i40e_qv_info *qv_info; - i40e_status err; + iavf_status err; u32 v_idx, i; u32 msg_size; @@ -474,9 +474,9 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev, (v_qvlist_info->num_vectors - 1)); adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP); - err = i40e_aq_send_msg_to_pf(&adapter->hw, - VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, - I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL); + err = iavf_aq_send_msg_to_pf(&adapter->hw, + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, I40E_SUCCESS, + (u8 *)v_qvlist_info, msg_size, NULL); if (err) { dev_err(&adapter->pdev->dev, @@ -499,12 +499,12 @@ out: } /** - * i40evf_register_client - Register a i40e client driver with the L2 driver + * iavf_register_client - Register a i40e client driver with the L2 driver * @client: pointer to the i40e_client struct * * Returns 0 on success or non-0 on error **/ -int i40evf_register_client(struct i40e_client *client) +int iavf_register_client(struct i40e_client *client) { int ret = 0; @@ -514,48 +514,48 @@ int i40evf_register_client(struct i40e_client *client) } if (strlen(client->name) == 0) { - pr_info("i40evf: Failed to register client with no name\n"); + pr_info("iavf: Failed to register client with no name\n"); ret = -EIO; goto out; } if (vf_registered_client) { - pr_info("i40evf: Client %s has already been registered!\n", + pr_info("iavf: Client %s has already been registered!\n", client->name); ret = -EEXIST; goto out; } - if ((client->version.major != I40EVF_CLIENT_VERSION_MAJOR) || - (client->version.minor != I40EVF_CLIENT_VERSION_MINOR)) { - pr_info("i40evf: Failed to register client %s due to mismatched client interface version\n", + if ((client->version.major != IAVF_CLIENT_VERSION_MAJOR) || + (client->version.minor != IAVF_CLIENT_VERSION_MINOR)) { + pr_info("iavf: Failed to register client %s due to mismatched client interface version\n", client->name); pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n", client->version.major, client->version.minor, client->version.build, - i40evf_client_interface_version_str); + iavf_client_interface_version_str); ret = -EIO; goto out; } vf_registered_client = client; - i40evf_client_prepare(client); + iavf_client_prepare(client); - pr_info("i40evf: Registered client %s with return code %d\n", + pr_info("iavf: Registered client %s with return code %d\n", client->name, ret); out: return ret; } -EXPORT_SYMBOL(i40evf_register_client); +EXPORT_SYMBOL(iavf_register_client); /** - * i40evf_unregister_client - Unregister a i40e client driver with the L2 driver + * iavf_unregister_client - Unregister a i40e client driver with the L2 driver * @client: pointer to the i40e_client struct * * Returns 0 on success or non-0 on error **/ -int i40evf_unregister_client(struct i40e_client *client) +int iavf_unregister_client(struct i40e_client *client) { int ret = 0; @@ -563,17 +563,17 @@ int i40evf_unregister_client(struct i40e_client *client) * a close for each of the client instances that were opened. * client_release function is called to handle this. */ - i40evf_client_release(client); + iavf_client_release(client); if (vf_registered_client != client) { - pr_info("i40evf: Client %s has not been registered\n", + pr_info("iavf: Client %s has not been registered\n", client->name); ret = -ENODEV; goto out; } vf_registered_client = NULL; - pr_info("i40evf: Unregistered client %s\n", client->name); + pr_info("iavf: Unregistered client %s\n", client->name); out: return ret; } -EXPORT_SYMBOL(i40evf_unregister_client); +EXPORT_SYMBOL(iavf_unregister_client); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h index 5585f362048a..e216fc9dfd81 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.h +++ b/drivers/net/ethernet/intel/iavf/iavf_client.h @@ -1,21 +1,21 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#ifndef _I40EVF_CLIENT_H_ -#define _I40EVF_CLIENT_H_ +#ifndef _IAVF_CLIENT_H_ +#define _IAVF_CLIENT_H_ -#define I40EVF_CLIENT_STR_LENGTH 10 +#define IAVF_CLIENT_STR_LENGTH 10 /* Client interface version should be updated anytime there is a change in the * existing APIs or data structures. */ -#define I40EVF_CLIENT_VERSION_MAJOR 0 -#define I40EVF_CLIENT_VERSION_MINOR 01 -#define I40EVF_CLIENT_VERSION_BUILD 00 -#define I40EVF_CLIENT_VERSION_STR \ - __stringify(I40EVF_CLIENT_VERSION_MAJOR) "." \ - __stringify(I40EVF_CLIENT_VERSION_MINOR) "." \ - __stringify(I40EVF_CLIENT_VERSION_BUILD) +#define IAVF_CLIENT_VERSION_MAJOR 0 +#define IAVF_CLIENT_VERSION_MINOR 01 +#define IAVF_CLIENT_VERSION_BUILD 00 +#define IAVF_CLIENT_VERSION_STR \ + __stringify(IAVF_CLIENT_VERSION_MAJOR) "." \ + __stringify(IAVF_CLIENT_VERSION_MINOR) "." \ + __stringify(IAVF_CLIENT_VERSION_BUILD) struct i40e_client_version { u8 major; @@ -90,7 +90,7 @@ struct i40e_info { #define I40E_CLIENT_FTYPE_PF 0 #define I40E_CLIENT_FTYPE_VF 1 u8 ftype; /* function type, PF or VF */ - void *vf; /* cast to i40evf_adapter */ + void *vf; /* cast to iavf_adapter */ /* All L2 params that could change during the life span of the device * and needs to be communicated to the client when they change @@ -151,7 +151,7 @@ struct i40e_client_instance { struct i40e_client { struct list_head list; /* list of registered clients */ - char name[I40EVF_CLIENT_STR_LENGTH]; + char name[IAVF_CLIENT_STR_LENGTH]; struct i40e_client_version version; unsigned long state; /* client state */ atomic_t ref_cnt; /* Count of all the client devices of this kind */ @@ -164,6 +164,6 @@ struct i40e_client { }; /* used by clients */ -int i40evf_register_client(struct i40e_client *client); -int i40evf_unregister_client(struct i40e_client *client); -#endif /* _I40EVF_CLIENT_H_ */ +int iavf_register_client(struct i40e_client *client); +int iavf_unregister_client(struct i40e_client *client); +#endif /* _IAVF_CLIENT_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c new file mode 100644 index 000000000000..768369c89e77 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -0,0 +1,955 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#include "iavf_type.h" +#include "i40e_adminq.h" +#include "iavf_prototype.h" +#include <linux/avf/virtchnl.h> + +/** + * iavf_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +iavf_status iavf_set_mac_type(struct iavf_hw *hw) +{ + iavf_status status = 0; + + if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { + switch (hw->device_id) { + case IAVF_DEV_ID_X722_VF: + hw->mac.type = IAVF_MAC_X722_VF; + break; + case IAVF_DEV_ID_VF: + case IAVF_DEV_ID_VF_HV: + case IAVF_DEV_ID_ADAPTIVE_VF: + hw->mac.type = IAVF_MAC_VF; + break; + default: + hw->mac.type = IAVF_MAC_GENERIC; + break; + } + } else { + status = I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + hw_dbg(hw, "found mac: %d, returns: %d\n", hw->mac.type, status); + return status; +} + +/** + * iavf_aq_str - convert AQ err code to a string + * @hw: pointer to the HW structure + * @aq_err: the AQ error code to convert + **/ +const char *iavf_aq_str(struct iavf_hw *hw, enum i40e_admin_queue_err aq_err) +{ + switch (aq_err) { + case I40E_AQ_RC_OK: + return "OK"; + case I40E_AQ_RC_EPERM: + return "I40E_AQ_RC_EPERM"; + case I40E_AQ_RC_ENOENT: + return "I40E_AQ_RC_ENOENT"; + case I40E_AQ_RC_ESRCH: + return "I40E_AQ_RC_ESRCH"; + case I40E_AQ_RC_EINTR: + return "I40E_AQ_RC_EINTR"; + case I40E_AQ_RC_EIO: + return "I40E_AQ_RC_EIO"; + case I40E_AQ_RC_ENXIO: + return "I40E_AQ_RC_ENXIO"; + case I40E_AQ_RC_E2BIG: + return "I40E_AQ_RC_E2BIG"; + case I40E_AQ_RC_EAGAIN: + return "I40E_AQ_RC_EAGAIN"; + case I40E_AQ_RC_ENOMEM: + return "I40E_AQ_RC_ENOMEM"; + case I40E_AQ_RC_EACCES: + return "I40E_AQ_RC_EACCES"; + case I40E_AQ_RC_EFAULT: + return "I40E_AQ_RC_EFAULT"; + case I40E_AQ_RC_EBUSY: + return "I40E_AQ_RC_EBUSY"; + case I40E_AQ_RC_EEXIST: + return "I40E_AQ_RC_EEXIST"; + case I40E_AQ_RC_EINVAL: + return "I40E_AQ_RC_EINVAL"; + case I40E_AQ_RC_ENOTTY: + return "I40E_AQ_RC_ENOTTY"; + case I40E_AQ_RC_ENOSPC: + return "I40E_AQ_RC_ENOSPC"; + case I40E_AQ_RC_ENOSYS: + return "I40E_AQ_RC_ENOSYS"; + case I40E_AQ_RC_ERANGE: + return "I40E_AQ_RC_ERANGE"; + case I40E_AQ_RC_EFLUSHED: + return "I40E_AQ_RC_EFLUSHED"; + case I40E_AQ_RC_BAD_ADDR: + return "I40E_AQ_RC_BAD_ADDR"; + case I40E_AQ_RC_EMODE: + return "I40E_AQ_RC_EMODE"; + case I40E_AQ_RC_EFBIG: + return "I40E_AQ_RC_EFBIG"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); + return hw->err_str; +} + +/** + * iavf_stat_str - convert status err code to a string + * @hw: pointer to the HW structure + * @stat_err: the status error code to convert + **/ +const char *iavf_stat_str(struct iavf_hw *hw, iavf_status stat_err) +{ + switch (stat_err) { + case 0: + return "OK"; + case I40E_ERR_NVM: + return "I40E_ERR_NVM"; + case I40E_ERR_NVM_CHECKSUM: + return "I40E_ERR_NVM_CHECKSUM"; + case I40E_ERR_PHY: + return "I40E_ERR_PHY"; + case I40E_ERR_CONFIG: + return "I40E_ERR_CONFIG"; + case I40E_ERR_PARAM: + return "I40E_ERR_PARAM"; + case I40E_ERR_MAC_TYPE: + return "I40E_ERR_MAC_TYPE"; + case I40E_ERR_UNKNOWN_PHY: + return "I40E_ERR_UNKNOWN_PHY"; + case I40E_ERR_LINK_SETUP: + return "I40E_ERR_LINK_SETUP"; + case I40E_ERR_ADAPTER_STOPPED: + return "I40E_ERR_ADAPTER_STOPPED"; + case I40E_ERR_INVALID_MAC_ADDR: + return "I40E_ERR_INVALID_MAC_ADDR"; + case I40E_ERR_DEVICE_NOT_SUPPORTED: + return "I40E_ERR_DEVICE_NOT_SUPPORTED"; + case I40E_ERR_MASTER_REQUESTS_PENDING: + return "I40E_ERR_MASTER_REQUESTS_PENDING"; + case I40E_ERR_INVALID_LINK_SETTINGS: + return "I40E_ERR_INVALID_LINK_SETTINGS"; + case I40E_ERR_AUTONEG_NOT_COMPLETE: + return "I40E_ERR_AUTONEG_NOT_COMPLETE"; + case I40E_ERR_RESET_FAILED: + return "I40E_ERR_RESET_FAILED"; + case I40E_ERR_SWFW_SYNC: + return "I40E_ERR_SWFW_SYNC"; + case I40E_ERR_NO_AVAILABLE_VSI: + return "I40E_ERR_NO_AVAILABLE_VSI"; + case I40E_ERR_NO_MEMORY: + return "I40E_ERR_NO_MEMORY"; + case I40E_ERR_BAD_PTR: + return "I40E_ERR_BAD_PTR"; + case I40E_ERR_RING_FULL: + return "I40E_ERR_RING_FULL"; + case I40E_ERR_INVALID_PD_ID: + return "I40E_ERR_INVALID_PD_ID"; + case I40E_ERR_INVALID_QP_ID: + return "I40E_ERR_INVALID_QP_ID"; + case I40E_ERR_INVALID_CQ_ID: + return "I40E_ERR_INVALID_CQ_ID"; + case I40E_ERR_INVALID_CEQ_ID: + return "I40E_ERR_INVALID_CEQ_ID"; + case I40E_ERR_INVALID_AEQ_ID: + return "I40E_ERR_INVALID_AEQ_ID"; + case I40E_ERR_INVALID_SIZE: + return "I40E_ERR_INVALID_SIZE"; + case I40E_ERR_INVALID_ARP_INDEX: + return "I40E_ERR_INVALID_ARP_INDEX"; + case I40E_ERR_INVALID_FPM_FUNC_ID: + return "I40E_ERR_INVALID_FPM_FUNC_ID"; + case I40E_ERR_QP_INVALID_MSG_SIZE: + return "I40E_ERR_QP_INVALID_MSG_SIZE"; + case I40E_ERR_QP_TOOMANY_WRS_POSTED: + return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; + case I40E_ERR_INVALID_FRAG_COUNT: + return "I40E_ERR_INVALID_FRAG_COUNT"; + case I40E_ERR_QUEUE_EMPTY: + return "I40E_ERR_QUEUE_EMPTY"; + case I40E_ERR_INVALID_ALIGNMENT: + return "I40E_ERR_INVALID_ALIGNMENT"; + case I40E_ERR_FLUSHED_QUEUE: + return "I40E_ERR_FLUSHED_QUEUE"; + case I40E_ERR_INVALID_PUSH_PAGE_INDEX: + return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; + case I40E_ERR_INVALID_IMM_DATA_SIZE: + return "I40E_ERR_INVALID_IMM_DATA_SIZE"; + case I40E_ERR_TIMEOUT: + return "I40E_ERR_TIMEOUT"; + case I40E_ERR_OPCODE_MISMATCH: + return "I40E_ERR_OPCODE_MISMATCH"; + case I40E_ERR_CQP_COMPL_ERROR: + return "I40E_ERR_CQP_COMPL_ERROR"; + case I40E_ERR_INVALID_VF_ID: + return "I40E_ERR_INVALID_VF_ID"; + case I40E_ERR_INVALID_HMCFN_ID: + return "I40E_ERR_INVALID_HMCFN_ID"; + case I40E_ERR_BACKING_PAGE_ERROR: + return "I40E_ERR_BACKING_PAGE_ERROR"; + case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: + return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; + case I40E_ERR_INVALID_PBLE_INDEX: + return "I40E_ERR_INVALID_PBLE_INDEX"; + case I40E_ERR_INVALID_SD_INDEX: + return "I40E_ERR_INVALID_SD_INDEX"; + case I40E_ERR_INVALID_PAGE_DESC_INDEX: + return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; + case I40E_ERR_INVALID_SD_TYPE: + return "I40E_ERR_INVALID_SD_TYPE"; + case I40E_ERR_MEMCPY_FAILED: + return "I40E_ERR_MEMCPY_FAILED"; + case I40E_ERR_INVALID_HMC_OBJ_INDEX: + return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; + case I40E_ERR_INVALID_HMC_OBJ_COUNT: + return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; + case I40E_ERR_INVALID_SRQ_ARM_LIMIT: + return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; + case I40E_ERR_SRQ_ENABLED: + return "I40E_ERR_SRQ_ENABLED"; + case I40E_ERR_ADMIN_QUEUE_ERROR: + return "I40E_ERR_ADMIN_QUEUE_ERROR"; + case I40E_ERR_ADMIN_QUEUE_TIMEOUT: + return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; + case I40E_ERR_BUF_TOO_SHORT: + return "I40E_ERR_BUF_TOO_SHORT"; + case I40E_ERR_ADMIN_QUEUE_FULL: + return "I40E_ERR_ADMIN_QUEUE_FULL"; + case I40E_ERR_ADMIN_QUEUE_NO_WORK: + return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; + case I40E_ERR_BAD_IWARP_CQE: + return "I40E_ERR_BAD_IWARP_CQE"; + case I40E_ERR_NVM_BLANK_MODE: + return "I40E_ERR_NVM_BLANK_MODE"; + case I40E_ERR_NOT_IMPLEMENTED: + return "I40E_ERR_NOT_IMPLEMENTED"; + case I40E_ERR_PE_DOORBELL_NOT_ENABLED: + return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; + case I40E_ERR_DIAG_TEST_FAILED: + return "I40E_ERR_DIAG_TEST_FAILED"; + case I40E_ERR_NOT_READY: + return "I40E_ERR_NOT_READY"; + case I40E_NOT_SUPPORTED: + return "I40E_NOT_SUPPORTED"; + case I40E_ERR_FIRMWARE_API_VERSION: + return "I40E_ERR_FIRMWARE_API_VERSION"; + case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: + return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); + return hw->err_str; +} + +/** + * iavf_debug_aq + * @hw: debug mask related to admin queue + * @mask: debug mask + * @desc: pointer to admin queue descriptor + * @buffer: pointer to command buffer + * @buf_len: max length of buffer + * + * Dumps debug log about adminq command with descriptor contents. + **/ +void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc, + void *buffer, u16 buf_len) +{ + struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + u8 *buf = (u8 *)buffer; + + if ((!(mask & hw->debug_mask)) || !desc) + return; + + iavf_debug(hw, mask, + "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", + le16_to_cpu(aq_desc->opcode), + le16_to_cpu(aq_desc->flags), + le16_to_cpu(aq_desc->datalen), + le16_to_cpu(aq_desc->retval)); + iavf_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->cookie_high), + le32_to_cpu(aq_desc->cookie_low)); + iavf_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->params.internal.param0), + le32_to_cpu(aq_desc->params.internal.param1)); + iavf_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->params.external.addr_high), + le32_to_cpu(aq_desc->params.external.addr_low)); + + if (buffer && aq_desc->datalen) { + u16 len = le16_to_cpu(aq_desc->datalen); + + iavf_debug(hw, mask, "AQ CMD Buffer:\n"); + if (buf_len < len) + len = buf_len; + /* write the full 16-byte chunks */ + if (hw->debug_mask & mask) { + char prefix[27]; + + snprintf(prefix, sizeof(prefix), + "iavf %02x:%02x.%x: \t0x", + hw->bus.bus_id, + hw->bus.device, + hw->bus.func); + + print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, + 16, 1, buf, len, false); + } + } +} + +/** + * iavf_check_asq_alive + * @hw: pointer to the hw struct + * + * Returns true if Queue is enabled else false. + **/ +bool iavf_check_asq_alive(struct iavf_hw *hw) +{ + if (hw->aq.asq.len) + return !!(rd32(hw, hw->aq.asq.len) & + IAVF_VF_ATQLEN1_ATQENABLE_MASK); + else + return false; +} + +/** + * iavf_aq_queue_shutdown + * @hw: pointer to the hw struct + * @unloading: is the driver unloading itself + * + * Tell the Firmware that we're shutting down the AdminQ and whether + * or not the driver is unloading as well. + **/ +iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_queue_shutdown *cmd = + (struct i40e_aqc_queue_shutdown *)&desc.params.raw; + iavf_status status; + + iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_queue_shutdown); + + if (unloading) + cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); + status = iavf_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} + +/** + * iavf_aq_get_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * @set: set true to set the table, false to get the table + * + * Internal function to get or set RSS look up table + **/ +static iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw, + u16 vsi_id, bool pf_lut, + u8 *lut, u16 lut_size, + bool set) +{ + iavf_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_lut *cmd_resp = + (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; + + if (set) + iavf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_lut); + else + iavf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_lut); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); + + if (pf_lut) + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + else + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + + status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL); + + return status; +} + +/** + * iavf_aq_get_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * get the RSS lookup table, PF or VSI type + **/ +iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, + false); +} + +/** + * iavf_aq_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * set the RSS lookup table, PF or VSI type + **/ +iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); +} + +/** + * iavf_aq_get_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * @set: set true to set the key, false to get the key + * + * get the RSS key per VSI + **/ +static +iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key, + bool set) +{ + iavf_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_key *cmd_resp = + (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; + u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); + + if (set) + iavf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_key); + else + iavf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_key); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); + + status = iavf_asq_send_command(hw, &desc, key, key_size, NULL); + + return status; +} + +/** + * iavf_aq_get_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + **/ +iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return iavf_aq_get_set_rss_key(hw, vsi_id, key, false); +} + +/** + * iavf_aq_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + * set the RSS key per VSI + **/ +iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return iavf_aq_get_set_rss_key(hw, vsi_id, key, true); +} + +/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT iavf_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF iavf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum iavf_rx_l2_ptype to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + { PTYPE, \ + 1, \ + IAVF_RX_PTYPE_OUTER_##OUTER_IP, \ + IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \ + IAVF_RX_PTYPE_##OUTER_FRAG, \ + IAVF_RX_PTYPE_TUNNEL_##T, \ + IAVF_RX_PTYPE_TUNNEL_END_##TE, \ + IAVF_RX_PTYPE_##TEF, \ + IAVF_RX_PTYPE_INNER_PROT_##I, \ + IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL } + +#define IAVF_PTT_UNUSED_ENTRY(PTYPE) \ + { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +/* shorter macros makes the table fit but are terse */ +#define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG +#define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG +#define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = { + /* L2 Packet types */ + IAVF_PTT_UNUSED_ENTRY(0), + IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + IAVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), + IAVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + IAVF_PTT_UNUSED_ENTRY(4), + IAVF_PTT_UNUSED_ENTRY(5), + IAVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + IAVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + IAVF_PTT_UNUSED_ENTRY(8), + IAVF_PTT_UNUSED_ENTRY(9), + IAVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + IAVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + IAVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + + /* Non Tunneled IPv4 */ + IAVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(25), + IAVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + IAVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + IAVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + IAVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + IAVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + IAVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(32), + IAVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + IAVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + IAVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + IAVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + IAVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(39), + IAVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + IAVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + IAVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + IAVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + IAVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + IAVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(47), + IAVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + IAVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + IAVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + IAVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + IAVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(54), + IAVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + IAVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + IAVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + IAVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + IAVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + IAVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(62), + IAVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + IAVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + IAVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + IAVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + IAVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(69), + IAVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + IAVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + IAVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + IAVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + IAVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + IAVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(77), + IAVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + IAVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + IAVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + IAVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + IAVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(84), + IAVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + IAVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + IAVF_PTT_UNUSED_ENTRY(91), + IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + IAVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + IAVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + IAVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + IAVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(98), + IAVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + IAVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + IAVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + IAVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + IAVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(105), + IAVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + IAVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + IAVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + IAVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + IAVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + IAVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(113), + IAVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + IAVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + IAVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + IAVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + IAVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(120), + IAVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + IAVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + IAVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + IAVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + IAVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + IAVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(128), + IAVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + IAVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + IAVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + IAVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + IAVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(135), + IAVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + IAVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + IAVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + IAVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + IAVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + IAVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(143), + IAVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + IAVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + IAVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + IAVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + IAVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(150), + IAVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + IAVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + IAVF_PTT_UNUSED_ENTRY(154), + IAVF_PTT_UNUSED_ENTRY(155), + IAVF_PTT_UNUSED_ENTRY(156), + IAVF_PTT_UNUSED_ENTRY(157), + IAVF_PTT_UNUSED_ENTRY(158), + IAVF_PTT_UNUSED_ENTRY(159), + + IAVF_PTT_UNUSED_ENTRY(160), + IAVF_PTT_UNUSED_ENTRY(161), + IAVF_PTT_UNUSED_ENTRY(162), + IAVF_PTT_UNUSED_ENTRY(163), + IAVF_PTT_UNUSED_ENTRY(164), + IAVF_PTT_UNUSED_ENTRY(165), + IAVF_PTT_UNUSED_ENTRY(166), + IAVF_PTT_UNUSED_ENTRY(167), + IAVF_PTT_UNUSED_ENTRY(168), + IAVF_PTT_UNUSED_ENTRY(169), + + IAVF_PTT_UNUSED_ENTRY(170), + IAVF_PTT_UNUSED_ENTRY(171), + IAVF_PTT_UNUSED_ENTRY(172), + IAVF_PTT_UNUSED_ENTRY(173), + IAVF_PTT_UNUSED_ENTRY(174), + IAVF_PTT_UNUSED_ENTRY(175), + IAVF_PTT_UNUSED_ENTRY(176), + IAVF_PTT_UNUSED_ENTRY(177), + IAVF_PTT_UNUSED_ENTRY(178), + IAVF_PTT_UNUSED_ENTRY(179), + + IAVF_PTT_UNUSED_ENTRY(180), + IAVF_PTT_UNUSED_ENTRY(181), + IAVF_PTT_UNUSED_ENTRY(182), + IAVF_PTT_UNUSED_ENTRY(183), + IAVF_PTT_UNUSED_ENTRY(184), + IAVF_PTT_UNUSED_ENTRY(185), + IAVF_PTT_UNUSED_ENTRY(186), + IAVF_PTT_UNUSED_ENTRY(187), + IAVF_PTT_UNUSED_ENTRY(188), + IAVF_PTT_UNUSED_ENTRY(189), + + IAVF_PTT_UNUSED_ENTRY(190), + IAVF_PTT_UNUSED_ENTRY(191), + IAVF_PTT_UNUSED_ENTRY(192), + IAVF_PTT_UNUSED_ENTRY(193), + IAVF_PTT_UNUSED_ENTRY(194), + IAVF_PTT_UNUSED_ENTRY(195), + IAVF_PTT_UNUSED_ENTRY(196), + IAVF_PTT_UNUSED_ENTRY(197), + IAVF_PTT_UNUSED_ENTRY(198), + IAVF_PTT_UNUSED_ENTRY(199), + + IAVF_PTT_UNUSED_ENTRY(200), + IAVF_PTT_UNUSED_ENTRY(201), + IAVF_PTT_UNUSED_ENTRY(202), + IAVF_PTT_UNUSED_ENTRY(203), + IAVF_PTT_UNUSED_ENTRY(204), + IAVF_PTT_UNUSED_ENTRY(205), + IAVF_PTT_UNUSED_ENTRY(206), + IAVF_PTT_UNUSED_ENTRY(207), + IAVF_PTT_UNUSED_ENTRY(208), + IAVF_PTT_UNUSED_ENTRY(209), + + IAVF_PTT_UNUSED_ENTRY(210), + IAVF_PTT_UNUSED_ENTRY(211), + IAVF_PTT_UNUSED_ENTRY(212), + IAVF_PTT_UNUSED_ENTRY(213), + IAVF_PTT_UNUSED_ENTRY(214), + IAVF_PTT_UNUSED_ENTRY(215), + IAVF_PTT_UNUSED_ENTRY(216), + IAVF_PTT_UNUSED_ENTRY(217), + IAVF_PTT_UNUSED_ENTRY(218), + IAVF_PTT_UNUSED_ENTRY(219), + + IAVF_PTT_UNUSED_ENTRY(220), + IAVF_PTT_UNUSED_ENTRY(221), + IAVF_PTT_UNUSED_ENTRY(222), + IAVF_PTT_UNUSED_ENTRY(223), + IAVF_PTT_UNUSED_ENTRY(224), + IAVF_PTT_UNUSED_ENTRY(225), + IAVF_PTT_UNUSED_ENTRY(226), + IAVF_PTT_UNUSED_ENTRY(227), + IAVF_PTT_UNUSED_ENTRY(228), + IAVF_PTT_UNUSED_ENTRY(229), + + IAVF_PTT_UNUSED_ENTRY(230), + IAVF_PTT_UNUSED_ENTRY(231), + IAVF_PTT_UNUSED_ENTRY(232), + IAVF_PTT_UNUSED_ENTRY(233), + IAVF_PTT_UNUSED_ENTRY(234), + IAVF_PTT_UNUSED_ENTRY(235), + IAVF_PTT_UNUSED_ENTRY(236), + IAVF_PTT_UNUSED_ENTRY(237), + IAVF_PTT_UNUSED_ENTRY(238), + IAVF_PTT_UNUSED_ENTRY(239), + + IAVF_PTT_UNUSED_ENTRY(240), + IAVF_PTT_UNUSED_ENTRY(241), + IAVF_PTT_UNUSED_ENTRY(242), + IAVF_PTT_UNUSED_ENTRY(243), + IAVF_PTT_UNUSED_ENTRY(244), + IAVF_PTT_UNUSED_ENTRY(245), + IAVF_PTT_UNUSED_ENTRY(246), + IAVF_PTT_UNUSED_ENTRY(247), + IAVF_PTT_UNUSED_ENTRY(248), + IAVF_PTT_UNUSED_ENTRY(249), + + IAVF_PTT_UNUSED_ENTRY(250), + IAVF_PTT_UNUSED_ENTRY(251), + IAVF_PTT_UNUSED_ENTRY(252), + IAVF_PTT_UNUSED_ENTRY(253), + IAVF_PTT_UNUSED_ENTRY(254), + IAVF_PTT_UNUSED_ENTRY(255) +}; + +/** + * iavf_aq_send_msg_to_pf + * @hw: pointer to the hardware structure + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cmd_details: pointer to command details + * + * Send message to PF driver using admin queue. By default, this message + * is sent asynchronously, i.e. iavf_asq_send_command() does not wait for + * completion before returning. + **/ +iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, + enum virtchnl_ops v_opcode, + iavf_status v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_asq_cmd_details details; + struct i40e_aq_desc desc; + iavf_status status; + + iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); + desc.cookie_high = cpu_to_le32(v_opcode); + desc.cookie_low = cpu_to_le32(v_retval); + if (msglen) { + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF + | I40E_AQ_FLAG_RD)); + if (msglen > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.datalen = cpu_to_le16(msglen); + } + if (!cmd_details) { + memset(&details, 0, sizeof(details)); + details.async = true; + cmd_details = &details; + } + status = iavf_asq_send_command(hw, &desc, msg, msglen, cmd_details); + return status; +} + +/** + * iavf_vf_parse_hw_config + * @hw: pointer to the hardware structure + * @msg: pointer to the virtual channel VF resource structure + * + * Given a VF resource message from the PF, populate the hw struct + * with appropriate information. + **/ +void iavf_vf_parse_hw_config(struct iavf_hw *hw, + struct virtchnl_vf_resource *msg) +{ + struct virtchnl_vsi_resource *vsi_res; + int i; + + vsi_res = &msg->vsi_res[0]; + + hw->dev_caps.num_vsis = msg->num_vsis; + hw->dev_caps.num_rx_qp = msg->num_queue_pairs; + hw->dev_caps.num_tx_qp = msg->num_queue_pairs; + hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; + hw->dev_caps.dcb = msg->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_L2; + hw->dev_caps.fcoe = 0; + for (i = 0; i < msg->num_vsis; i++) { + if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { + ether_addr_copy(hw->mac.perm_addr, + vsi_res->default_mac_addr); + ether_addr_copy(hw->mac.addr, + vsi_res->default_mac_addr); + } + vsi_res++; + } +} + +/** + * iavf_vf_reset + * @hw: pointer to the hardware structure + * + * Send a VF_RESET message to the PF. Does not wait for response from PF + * as none will be forthcoming. Immediately after calling this function, + * the admin queue should be shut down and (optionally) reinitialized. + **/ +iavf_status iavf_vf_reset(struct iavf_hw *hw) +{ + return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, + 0, NULL, 0, NULL); +} diff --git a/drivers/net/ethernet/intel/iavf/iavf_devids.h b/drivers/net/ethernet/intel/iavf/iavf_devids.h new file mode 100644 index 000000000000..8eb7b697e96c --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_devids.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _IAVF_DEVIDS_H_ +#define _IAVF_DEVIDS_H_ + +/* Device IDs for the VF driver */ +#define IAVF_DEV_ID_VF 0x154C +#define IAVF_DEV_ID_VF_HV 0x1571 +#define IAVF_DEV_ID_ADAPTIVE_VF 0x1889 +#define IAVF_DEV_ID_X722_VF 0x37CD +#endif /* _IAVF_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c new file mode 100644 index 000000000000..9f87304109fe --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -0,0 +1,1036 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +/* ethtool support for iavf */ +#include "iavf.h" + +#include <linux/uaccess.h> + +/* ethtool statistics helpers */ + +/** + * struct iavf_stats - definition for an ethtool statistic + * @stat_string: statistic name to display in ethtool -S output + * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) + * @stat_offset: offsetof() the stat from a base pointer + * + * This structure defines a statistic to be added to the ethtool stats buffer. + * It defines a statistic as offset from a common base pointer. Stats should + * be defined in constant arrays using the IAVF_STAT macro, with every element + * of the array using the same _type for calculating the sizeof_stat and + * stat_offset. + * + * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or + * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from + * the iavf_add_ethtool_stat() helper function. + * + * The @stat_string is interpreted as a format string, allowing formatted + * values to be inserted while looping over multiple structures for a given + * statistics array. Thus, every statistic string in an array should have the + * same type and number of format specifiers, to be formatted by variadic + * arguments to the iavf_add_stat_string() helper function. + **/ +struct iavf_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* Helper macro to define an iavf_stat structure with proper size and type. + * Use this when defining constant statistics arrays. Note that @_type expects + * only a type name and is used multiple times. + */ +#define IAVF_STAT(_type, _name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +/* Helper macro for defining some statistics related to queues */ +#define IAVF_QUEUE_STAT(_name, _stat) \ + IAVF_STAT(struct iavf_ring, _name, _stat) + +/* Stats associated with a Tx or Rx ring */ +static const struct iavf_stats iavf_gstrings_queue_stats[] = { + IAVF_QUEUE_STAT("%s-%u.packets", stats.packets), + IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes), +}; + +/** + * iavf_add_one_ethtool_stat - copy the stat into the supplied buffer + * @data: location to store the stat value + * @pointer: basis for where to copy from + * @stat: the stat definition + * + * Copies the stat data defined by the pointer and stat structure pair into + * the memory supplied as data. Used to implement iavf_add_ethtool_stats and + * iavf_add_queue_stats. If the pointer is null, data will be zero'd. + */ +static void +iavf_add_one_ethtool_stat(u64 *data, void *pointer, + const struct iavf_stats *stat) +{ + char *p; + + if (!pointer) { + /* ensure that the ethtool data buffer is zero'd for any stats + * which don't have a valid pointer. + */ + *data = 0; + return; + } + + p = (char *)pointer + stat->stat_offset; + switch (stat->sizeof_stat) { + case sizeof(u64): + *data = *((u64 *)p); + break; + case sizeof(u32): + *data = *((u32 *)p); + break; + case sizeof(u16): + *data = *((u16 *)p); + break; + case sizeof(u8): + *data = *((u8 *)p); + break; + default: + WARN_ONCE(1, "unexpected stat size for %s", + stat->stat_string); + *data = 0; + } +} + +/** + * __iavf_add_ethtool_stats - copy stats into the ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location to copy stats from + * @stats: array of stats to copy + * @size: the size of the stats definition + * + * Copy the stats defined by the stats array using the pointer as a base into + * the data buffer supplied by ethtool. Updates the data pointer to point to + * the next empty location for successive calls to __iavf_add_ethtool_stats. + * If pointer is null, set the data values to zero and update the pointer to + * skip these stats. + **/ +static void +__iavf_add_ethtool_stats(u64 **data, void *pointer, + const struct iavf_stats stats[], + const unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + iavf_add_one_ethtool_stat((*data)++, pointer, &stats[i]); +} + +/** + * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location where stats are stored + * @stats: static const array of stat definitions + * + * Macro to ease the use of __iavf_add_ethtool_stats by taking a static + * constant stats array and passing the ARRAY_SIZE(). This avoids typos by + * ensuring that we pass the size associated with the given stats array. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. + **/ +#define iavf_add_ethtool_stats(data, pointer, stats) \ + __iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) + +/** + * iavf_add_queue_stats - copy queue statistics into supplied buffer + * @data: ethtool stats buffer + * @ring: the ring to copy + * + * Queue statistics must be copied while protected by + * u64_stats_fetch_begin_irq, so we can't directly use iavf_add_ethtool_stats. + * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the + * ring pointer is null, zero out the queue stat values and update the data + * pointer. Otherwise safely copy the stats from the ring into the supplied + * buffer and update the data pointer when finished. + * + * This function expects to be called while under rcu_read_lock(). + **/ +static void +iavf_add_queue_stats(u64 **data, struct iavf_ring *ring) +{ + const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats); + const struct iavf_stats *stats = iavf_gstrings_queue_stats; + unsigned int start; + unsigned int i; + + /* To avoid invalid statistics values, ensure that we keep retrying + * the copy until we get a consistent value according to + * u64_stats_fetch_retry_irq. But first, make sure our ring is + * non-null before attempting to access its syncp. + */ + do { + start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); + for (i = 0; i < size; i++) + iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]); + } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); + + /* Once we successfully copy the stats in, update the data pointer */ + *data += size; +} + +/** + * __iavf_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * @size: size of the stats array + * + * Format and copy the strings described by stats into the buffer pointed at + * by p. + **/ +static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[], + const unsigned int size, ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } +} + +/** + * iavf_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * + * Format and copy the strings described by the const static stats value into + * the buffer pointed at by p. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. Additionally, stats must be an array such that + * ARRAY_SIZE can be called on it. + **/ +#define iavf_add_stat_strings(p, stats, ...) \ + __iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) + +#define VF_STAT(_name, _stat) \ + IAVF_STAT(struct iavf_adapter, _name, _stat) + +static const struct iavf_stats iavf_gstrings_stats[] = { + VF_STAT("rx_bytes", current_stats.rx_bytes), + VF_STAT("rx_unicast", current_stats.rx_unicast), + VF_STAT("rx_multicast", current_stats.rx_multicast), + VF_STAT("rx_broadcast", current_stats.rx_broadcast), + VF_STAT("rx_discards", current_stats.rx_discards), + VF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol), + VF_STAT("tx_bytes", current_stats.tx_bytes), + VF_STAT("tx_unicast", current_stats.tx_unicast), + VF_STAT("tx_multicast", current_stats.tx_multicast), + VF_STAT("tx_broadcast", current_stats.tx_broadcast), + VF_STAT("tx_discards", current_stats.tx_discards), + VF_STAT("tx_errors", current_stats.tx_errors), +}; + +#define IAVF_STATS_LEN ARRAY_SIZE(iavf_gstrings_stats) + +#define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats) + +/* For now we have one and only one private flag and it is only defined + * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead + * of leaving all this code sitting around empty we will strip it unless + * our one private flag is actually available. + */ +struct iavf_priv_flags { + char flag_string[ETH_GSTRING_LEN]; + u32 flag; + bool read_only; +}; + +#define IAVF_PRIV_FLAG(_name, _flag, _read_only) { \ + .flag_string = _name, \ + .flag = _flag, \ + .read_only = _read_only, \ +} + +static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = { + IAVF_PRIV_FLAG("legacy-rx", IAVF_FLAG_LEGACY_RX, 0), +}; + +#define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags) + +/** + * iavf_get_link_ksettings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @cmd: ethtool command + * + * Reports speed/duplex settings. Because this is a VF, we don't know what + * kind of link we really have, so we fake it. + **/ +static int iavf_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.port = PORT_NONE; + /* Set speed and duplex */ + switch (adapter->link_speed) { + case I40E_LINK_SPEED_40GB: + cmd->base.speed = SPEED_40000; + break; + case I40E_LINK_SPEED_25GB: +#ifdef SPEED_25000 + cmd->base.speed = SPEED_25000; +#else + netdev_info(netdev, + "Speed is 25G, display not supported by this version of ethtool.\n"); +#endif + break; + case I40E_LINK_SPEED_20GB: + cmd->base.speed = SPEED_20000; + break; + case I40E_LINK_SPEED_10GB: + cmd->base.speed = SPEED_10000; + break; + case I40E_LINK_SPEED_1GB: + cmd->base.speed = SPEED_1000; + break; + case I40E_LINK_SPEED_100MB: + cmd->base.speed = SPEED_100; + break; + default: + break; + } + cmd->base.duplex = DUPLEX_FULL; + + return 0; +} + +/** + * iavf_get_sset_count - Get length of string set + * @netdev: network interface device structure + * @sset: id of string set + * + * Reports size of various string tables. + **/ +static int iavf_get_sset_count(struct net_device *netdev, int sset) +{ + if (sset == ETH_SS_STATS) + return IAVF_STATS_LEN + + (IAVF_QUEUE_STATS_LEN * 2 * IAVF_MAX_REQ_QUEUES); + else if (sset == ETH_SS_PRIV_FLAGS) + return IAVF_PRIV_FLAGS_STR_LEN; + else + return -EINVAL; +} + +/** + * iavf_get_ethtool_stats - report device statistics + * @netdev: network interface device structure + * @stats: ethtool statistics structure + * @data: pointer to data buffer + * + * All statistics are added to the data buffer as an array of u64. + **/ +static void iavf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + unsigned int i; + + iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats); + + rcu_read_lock(); + for (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) { + struct iavf_ring *ring; + + /* Avoid accessing un-allocated queues */ + ring = (i < adapter->num_active_queues ? + &adapter->tx_rings[i] : NULL); + iavf_add_queue_stats(&data, ring); + + /* Avoid accessing un-allocated queues */ + ring = (i < adapter->num_active_queues ? + &adapter->rx_rings[i] : NULL); + iavf_add_queue_stats(&data, ring); + } + rcu_read_unlock(); +} + +/** + * iavf_get_priv_flag_strings - Get private flag strings + * @netdev: network interface device structure + * @data: buffer for string data + * + * Builds the private flags string table + **/ +static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data) +{ + unsigned int i; + + for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) { + snprintf(data, ETH_GSTRING_LEN, "%s", + iavf_gstrings_priv_flags[i].flag_string); + data += ETH_GSTRING_LEN; + } +} + +/** + * iavf_get_stat_strings - Get stat strings + * @netdev: network interface device structure + * @data: buffer for string data + * + * Builds the statistics string table + **/ +static void iavf_get_stat_strings(struct net_device *netdev, u8 *data) +{ + unsigned int i; + + iavf_add_stat_strings(&data, iavf_gstrings_stats); + + /* Queues are always allocated in pairs, so we just use num_tx_queues + * for both Tx and Rx queues. + */ + for (i = 0; i < netdev->num_tx_queues; i++) { + iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, + "tx", i); + iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, + "rx", i); + } +} + +/** + * iavf_get_strings - Get string set + * @netdev: network interface device structure + * @sset: id of string set + * @data: buffer for string data + * + * Builds string tables for various string sets + **/ +static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + switch (sset) { + case ETH_SS_STATS: + iavf_get_stat_strings(netdev, data); + break; + case ETH_SS_PRIV_FLAGS: + iavf_get_priv_flag_strings(netdev, data); + break; + default: + break; + } +} + +/** + * iavf_get_priv_flags - report device private flags + * @netdev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the iavf_gstrings_priv_flags + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 iavf_get_priv_flags(struct net_device *netdev) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + u32 i, ret_flags = 0; + + for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) { + const struct iavf_priv_flags *priv_flags; + + priv_flags = &iavf_gstrings_priv_flags[i]; + + if (priv_flags->flag & adapter->flags) + ret_flags |= BIT(i); + } + + return ret_flags; +} + +/** + * iavf_set_priv_flags - set private flags + * @netdev: network interface device structure + * @flags: bit flags to be set + **/ +static int iavf_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + u32 orig_flags, new_flags, changed_flags; + u32 i; + + orig_flags = READ_ONCE(adapter->flags); + new_flags = orig_flags; + + for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) { + const struct iavf_priv_flags *priv_flags; + + priv_flags = &iavf_gstrings_priv_flags[i]; + + if (flags & BIT(i)) + new_flags |= priv_flags->flag; + else + new_flags &= ~(priv_flags->flag); + + if (priv_flags->read_only && + ((orig_flags ^ new_flags) & ~BIT(i))) + return -EOPNOTSUPP; + } + + /* Before we finalize any flag changes, any checks which we need to + * perform to determine if the new flags will be supported should go + * here... + */ + + /* Compare and exchange the new flags into place. If we failed, that + * is if cmpxchg returns anything but the old value, this means + * something else must have modified the flags variable since we + * copied it. We'll just punt with an error and log something in the + * message buffer. + */ + if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) { + dev_warn(&adapter->pdev->dev, + "Unable to update adapter->flags as it was modified by another thread...\n"); + return -EAGAIN; + } + + changed_flags = orig_flags ^ new_flags; + + /* Process any additional changes needed as a result of flag changes. + * The changed_flags value reflects the list of bits that were changed + * in the code above. + */ + + /* issue a reset to force legacy-rx change to take effect */ + if (changed_flags & IAVF_FLAG_LEGACY_RX) { + if (netif_running(netdev)) { + adapter->flags |= IAVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } + } + + return 0; +} + +/** + * iavf_get_msglevel - Get debug message level + * @netdev: network interface device structure + * + * Returns current debug message level. + **/ +static u32 iavf_get_msglevel(struct net_device *netdev) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +/** + * iavf_set_msglevel - Set debug message level + * @netdev: network interface device structure + * @data: message level + * + * Set current debug message level. Higher values cause the driver to + * be noisier. + **/ +static void iavf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + if (IAVF_DEBUG_USER & data) + adapter->hw.debug_mask = data; + adapter->msg_enable = data; +} + +/** + * iavf_get_drvinfo - Get driver info + * @netdev: network interface device structure + * @drvinfo: ethool driver info structure + * + * Returns information about the driver and device for display to the user. + **/ +static void iavf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, iavf_driver_name, 32); + strlcpy(drvinfo->version, iavf_driver_version, 32); + strlcpy(drvinfo->fw_version, "N/A", 4); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN; +} + +/** + * iavf_get_ringparam - Get ring parameters + * @netdev: network interface device structure + * @ring: ethtool ringparam structure + * + * Returns current ring parameters. TX and RX rings are reported separately, + * but the number of rings is not reported. + **/ +static void iavf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IAVF_MAX_RXD; + ring->tx_max_pending = IAVF_MAX_TXD; + ring->rx_pending = adapter->rx_desc_count; + ring->tx_pending = adapter->tx_desc_count; +} + +/** + * iavf_set_ringparam - Set ring parameters + * @netdev: network interface device structure + * @ring: ethtool ringparam structure + * + * Sets ring parameters. TX and RX rings are controlled separately, but the + * number of rings is not specified, so all rings get the same settings. + **/ +static int iavf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + IAVF_MIN_TXD, + IAVF_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + IAVF_MIN_RXD, + IAVF_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); + + /* if nothing to do return success */ + if ((new_tx_count == adapter->tx_desc_count) && + (new_rx_count == adapter->rx_desc_count)) + return 0; + + adapter->tx_desc_count = new_tx_count; + adapter->rx_desc_count = new_rx_count; + + if (netif_running(netdev)) { + adapter->flags |= IAVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } + + return 0; +} + +/** + * __iavf_get_coalesce - get per-queue coalesce settings + * @netdev: the netdev to check + * @ec: ethtool coalesce data structure + * @queue: which queue to pick + * + * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs + * are per queue. If queue is <0 then we default to queue 0 as the + * representative value. + **/ +static int __iavf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, int queue) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_vsi *vsi = &adapter->vsi; + struct iavf_ring *rx_ring, *tx_ring; + + ec->tx_max_coalesced_frames = vsi->work_limit; + ec->rx_max_coalesced_frames = vsi->work_limit; + + /* Rx and Tx usecs per queue value. If user doesn't specify the + * queue, return queue 0's value to represent. + */ + if (queue < 0) + queue = 0; + else if (queue >= adapter->num_active_queues) + return -EINVAL; + + rx_ring = &adapter->rx_rings[queue]; + tx_ring = &adapter->tx_rings[queue]; + + if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) + ec->use_adaptive_rx_coalesce = 1; + + if (ITR_IS_DYNAMIC(tx_ring->itr_setting)) + ec->use_adaptive_tx_coalesce = 1; + + ec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + ec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + return 0; +} + +/** + * iavf_get_coalesce - Get interrupt coalescing settings + * @netdev: network interface device structure + * @ec: ethtool coalesce structure + * + * Returns current coalescing settings. This is referred to elsewhere in the + * driver as Interrupt Throttle Rate, as this is how the hardware describes + * this functionality. Note that if per-queue settings have been modified this + * only represents the settings of queue 0. + **/ +static int iavf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + return __iavf_get_coalesce(netdev, ec, -1); +} + +/** + * iavf_get_per_queue_coalesce - get coalesce values for specific queue + * @netdev: netdev to read + * @ec: coalesce settings from ethtool + * @queue: the queue to read + * + * Read specific queue's coalesce settings. + **/ +static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __iavf_get_coalesce(netdev, ec, queue); +} + +/** + * iavf_set_itr_per_queue - set ITR values for specific queue + * @adapter: the VF adapter struct to set values for + * @ec: coalesce settings from ethtool + * @queue: the queue to modify + * + * Change the ITR settings for a specific queue. + **/ +static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, + struct ethtool_coalesce *ec, int queue) +{ + struct iavf_ring *rx_ring = &adapter->rx_rings[queue]; + struct iavf_ring *tx_ring = &adapter->tx_rings[queue]; + struct iavf_q_vector *q_vector; + + rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); + tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); + + rx_ring->itr_setting |= IAVF_ITR_DYNAMIC; + if (!ec->use_adaptive_rx_coalesce) + rx_ring->itr_setting ^= IAVF_ITR_DYNAMIC; + + tx_ring->itr_setting |= IAVF_ITR_DYNAMIC; + if (!ec->use_adaptive_tx_coalesce) + tx_ring->itr_setting ^= IAVF_ITR_DYNAMIC; + + q_vector = rx_ring->q_vector; + q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); + + q_vector = tx_ring->q_vector; + q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); + + /* The interrupt handler itself will take care of programming + * the Tx and Rx ITR values based on the values we have entered + * into the q_vector, no need to write the values now. + */ +} + +/** + * __iavf_set_coalesce - set coalesce settings for particular queue + * @netdev: the netdev to change + * @ec: ethtool coalesce settings + * @queue: the queue to change + * + * Sets the coalesce settings for a particular queue. + **/ +static int __iavf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, int queue) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_vsi *vsi = &adapter->vsi; + int i; + + if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) + vsi->work_limit = ec->tx_max_coalesced_frames_irq; + + if (ec->rx_coalesce_usecs == 0) { + if (ec->use_adaptive_rx_coalesce) + netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); + } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) || + (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) { + netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); + return -EINVAL; + } else if (ec->tx_coalesce_usecs == 0) { + if (ec->use_adaptive_tx_coalesce) + netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); + } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) || + (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) { + netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); + return -EINVAL; + } + + /* Rx and Tx usecs has per queue value. If user doesn't specify the + * queue, apply to all queues. + */ + if (queue < 0) { + for (i = 0; i < adapter->num_active_queues; i++) + iavf_set_itr_per_queue(adapter, ec, i); + } else if (queue < adapter->num_active_queues) { + iavf_set_itr_per_queue(adapter, ec, queue); + } else { + netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", + adapter->num_active_queues - 1); + return -EINVAL; + } + + return 0; +} + +/** + * iavf_set_coalesce - Set interrupt coalescing settings + * @netdev: network interface device structure + * @ec: ethtool coalesce structure + * + * Change current coalescing settings for every queue. + **/ +static int iavf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + return __iavf_set_coalesce(netdev, ec, -1); +} + +/** + * iavf_set_per_queue_coalesce - set specific queue's coalesce settings + * @netdev: the netdev to change + * @ec: ethtool's coalesce settings + * @queue: the queue to modify + * + * Modifies a specific queue's coalesce settings. + */ +static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __iavf_set_coalesce(netdev, ec, queue); +} + +/** + * iavf_get_rxnfc - command to get RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * @rule_locs: pointer to store rule locations + * + * Returns Success if the command is supported. + **/ +static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_active_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + netdev_info(netdev, + "RSS hash info is not available to vf, use pf.\n"); + break; + default: + break; + } + + return ret; +} +/** + * iavf_get_channels: get the number of channels supported by the device + * @netdev: network interface device structure + * @ch: channel information structure + * + * For the purposes of our device, we only use combined channels, i.e. a tx/rx + * queue pair. Report one extra channel to match our "other" MSI-X vector. + **/ +static void iavf_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = IAVF_MAX_REQ_QUEUES; + + ch->max_other = NONQ_VECS; + ch->other_count = NONQ_VECS; + + ch->combined_count = adapter->num_active_queues; +} + +/** + * iavf_set_channels: set the new channel count + * @netdev: network interface device structure + * @ch: channel information structure + * + * Negotiate a new number of channels with the PF then do a reset. During + * reset we'll realloc queues and fix the RSS table. Returns 0 on success, + * negative on failure. + **/ +static int iavf_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + int num_req = ch->combined_count; + + if (num_req != adapter->num_active_queues && + !(adapter->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) { + dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n"); + return -EINVAL; + } + + if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && + adapter->num_tc) { + dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n"); + return -EINVAL; + } + + /* All of these should have already been checked by ethtool before this + * even gets to us, but just to be sure. + */ + if (num_req <= 0 || num_req > IAVF_MAX_REQ_QUEUES) + return -EINVAL; + + if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS) + return -EINVAL; + + adapter->num_req_queues = num_req; + return iavf_request_queues(adapter, num_req); +} + +/** + * iavf_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 iavf_get_rxfh_key_size(struct net_device *netdev) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_key_size; +} + +/** + * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 iavf_get_rxfh_indir_size(struct net_device *netdev) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_lut_size; +} + +/** + * iavf_get_rxfh - get the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function in use + * + * Reads the indirection table directly from the hardware. Always returns 0. + **/ +static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + u16 i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + + memcpy(key, adapter->rss_key, adapter->rss_key_size); + + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_lut_size; i++) + indir[i] = (u32)adapter->rss_lut[i]; + + return 0; +} + +/** + * iavf_set_rxfh - set the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function to use + * + * Returns -EINVAL if the table specifies an inavlid queue id, otherwise + * returns 0 after programming the table. + **/ +static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + u16 i; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + if (key) + memcpy(adapter->rss_key, key, adapter->rss_key_size); + + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_lut_size; i++) + adapter->rss_lut[i] = (u8)(indir[i]); + + return iavf_config_rss(adapter); +} + +static const struct ethtool_ops iavf_ethtool_ops = { + .get_drvinfo = iavf_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = iavf_get_ringparam, + .set_ringparam = iavf_set_ringparam, + .get_strings = iavf_get_strings, + .get_ethtool_stats = iavf_get_ethtool_stats, + .get_sset_count = iavf_get_sset_count, + .get_priv_flags = iavf_get_priv_flags, + .set_priv_flags = iavf_set_priv_flags, + .get_msglevel = iavf_get_msglevel, + .set_msglevel = iavf_set_msglevel, + .get_coalesce = iavf_get_coalesce, + .set_coalesce = iavf_set_coalesce, + .get_per_queue_coalesce = iavf_get_per_queue_coalesce, + .set_per_queue_coalesce = iavf_set_per_queue_coalesce, + .get_rxnfc = iavf_get_rxnfc, + .get_rxfh_indir_size = iavf_get_rxfh_indir_size, + .get_rxfh = iavf_get_rxfh, + .set_rxfh = iavf_set_rxfh, + .get_channels = iavf_get_channels, + .set_channels = iavf_set_channels, + .get_rxfh_key_size = iavf_get_rxfh_key_size, + .get_link_ksettings = iavf_get_link_ksettings, +}; + +/** + * iavf_set_ethtool_ops - Initialize ethtool ops struct + * @netdev: network interface device structure + * + * Sets ethtool ops struct in our netdev so that ethtool can call + * our functions. + **/ +void iavf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &iavf_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index fef6d892ed4c..9f2b7b7adf6b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1,38 +1,38 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include "i40evf.h" -#include "i40e_prototype.h" -#include "i40evf_client.h" -/* All i40evf tracepoints are defined by the include below, which must +#include "iavf.h" +#include "iavf_prototype.h" +#include "iavf_client.h" +/* All iavf tracepoints are defined by the include below, which must * be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined */ #define CREATE_TRACE_POINTS -#include "i40e_trace.h" +#include "iavf_trace.h" -static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter); -static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter); -static int i40evf_close(struct net_device *netdev); +static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); +static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); +static int iavf_close(struct net_device *netdev); -char i40evf_driver_name[] = "i40evf"; -static const char i40evf_driver_string[] = - "Intel(R) 40-10 Gigabit Virtual Function Network Driver"; +char iavf_driver_name[] = "iavf"; +static const char iavf_driver_string[] = + "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 3 #define DRV_VERSION_MINOR 2 -#define DRV_VERSION_BUILD 2 +#define DRV_VERSION_BUILD 3 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ DRV_KERN -const char i40evf_driver_version[] = DRV_VERSION; -static const char i40evf_copyright[] = - "Copyright (c) 2013 - 2015 Intel Corporation."; +const char iavf_driver_version[] = DRV_VERSION; +static const char iavf_copyright[] = + "Copyright (c) 2013 - 2018 Intel Corporation."; -/* i40evf_pci_tbl - PCI Device ID Table +/* iavf_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last * Last entry must be all 0s @@ -40,36 +40,37 @@ static const char i40evf_copyright[] = * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ -static const struct pci_device_id i40evf_pci_tbl[] = { - {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0}, +static const struct pci_device_id iavf_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, + {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, + {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, + {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, /* required last entry */ {0, } }; -MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl); +MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); +MODULE_ALIAS("i40evf"); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); -MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); -static struct workqueue_struct *i40evf_wq; +static struct workqueue_struct *iavf_wq; /** - * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code + * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested * @alignment: what to align the allocation to **/ -i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw, - struct i40e_dma_mem *mem, - u64 size, u32 alignment) +iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, + struct iavf_dma_mem *mem, + u64 size, u32 alignment) { - struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back; + struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; if (!mem) return I40E_ERR_PARAM; @@ -84,13 +85,13 @@ i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw, } /** - * i40evf_free_dma_mem_d - OS specific memory free for shared code + * iavf_free_dma_mem_d - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) +iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem) { - struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back; + struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; if (!mem || !mem->va) return I40E_ERR_PARAM; @@ -100,13 +101,13 @@ i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) } /** - * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code + * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested **/ -i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw, - struct i40e_virt_mem *mem, u32 size) +iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, + struct iavf_virt_mem *mem, u32 size) { if (!mem) return I40E_ERR_PARAM; @@ -121,12 +122,11 @@ i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw, } /** - * i40evf_free_virt_mem_d - OS specific memory free for shared code + * iavf_free_virt_mem_d - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw, - struct i40e_virt_mem *mem) +iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, struct iavf_virt_mem *mem) { if (!mem) return I40E_ERR_PARAM; @@ -138,17 +138,17 @@ i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw, } /** - * i40evf_debug_d - OS dependent version of debug printing + * iavf_debug_d - OS dependent version of debug printing * @hw: pointer to the HW structure * @mask: debug level mask * @fmt_str: printf-type format description **/ -void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) +void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...) { char buf[512]; va_list argptr; - if (!(mask & ((struct i40e_hw *)hw)->debug_mask)) + if (!(mask & ((struct iavf_hw *)hw)->debug_mask)) return; va_start(argptr, fmt_str); @@ -160,134 +160,131 @@ void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) } /** - * i40evf_schedule_reset - Set the flags and schedule a reset event + * iavf_schedule_reset - Set the flags and schedule a reset event * @adapter: board private structure **/ -void i40evf_schedule_reset(struct i40evf_adapter *adapter) +void iavf_schedule_reset(struct iavf_adapter *adapter) { if (!(adapter->flags & - (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) { - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { + adapter->flags |= IAVF_FLAG_RESET_NEEDED; schedule_work(&adapter->reset_task); } } /** - * i40evf_tx_timeout - Respond to a Tx Hang + * iavf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ -static void i40evf_tx_timeout(struct net_device *netdev) +static void iavf_tx_timeout(struct net_device *netdev) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); adapter->tx_timeout_count++; - i40evf_schedule_reset(adapter); + iavf_schedule_reset(adapter); } /** - * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC + * iavf_misc_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ -static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter) +static void iavf_misc_irq_disable(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; if (!adapter->msix_entries) return; - wr32(hw, I40E_VFINT_DYN_CTL01, 0); + wr32(hw, IAVF_VFINT_DYN_CTL01, 0); - /* read flush */ - rd32(hw, I40E_VFGEN_RSTAT); + iavf_flush(hw); synchronize_irq(adapter->msix_entries[0].vector); } /** - * i40evf_misc_irq_enable - Enable default interrupt generation settings + * iavf_misc_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ -static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter) +static void iavf_misc_irq_enable(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; - wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | - I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); - wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK); + wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | + IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); + wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); - /* read flush */ - rd32(hw, I40E_VFGEN_RSTAT); + iavf_flush(hw); } /** - * i40evf_irq_disable - Mask off interrupt generation on the NIC + * iavf_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ -static void i40evf_irq_disable(struct i40evf_adapter *adapter) +static void iavf_irq_disable(struct iavf_adapter *adapter) { int i; - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; if (!adapter->msix_entries) return; for (i = 1; i < adapter->num_msix_vectors; i++) { - wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0); + wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); synchronize_irq(adapter->msix_entries[i].vector); } - /* read flush */ - rd32(hw, I40E_VFGEN_RSTAT); + iavf_flush(hw); } /** - * i40evf_irq_enable_queues - Enable interrupt for specified queues + * iavf_irq_enable_queues - Enable interrupt for specified queues * @adapter: board private structure * @mask: bitmap of queues to enable **/ -void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) +void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; int i; for (i = 1; i < adapter->num_msix_vectors; i++) { if (mask & BIT(i - 1)) { - wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), - I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK); + wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), + IAVF_VFINT_DYN_CTLN1_INTENA_MASK | + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); } } } /** - * i40evf_irq_enable - Enable default interrupt generation settings + * iavf_irq_enable - Enable default interrupt generation settings * @adapter: board private structure * @flush: boolean value whether to run rd32() **/ -void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush) +void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; - i40evf_misc_irq_enable(adapter); - i40evf_irq_enable_queues(adapter, ~0); + iavf_misc_irq_enable(adapter); + iavf_irq_enable_queues(adapter, ~0); if (flush) - rd32(hw, I40E_VFGEN_RSTAT); + iavf_flush(hw); } /** - * i40evf_msix_aq - Interrupt handler for vector 0 + * iavf_msix_aq - Interrupt handler for vector 0 * @irq: interrupt number * @data: pointer to netdev **/ -static irqreturn_t i40evf_msix_aq(int irq, void *data) +static irqreturn_t iavf_msix_aq(int irq, void *data) { struct net_device *netdev = data; - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_hw *hw = &adapter->hw; + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_hw *hw = &adapter->hw; /* handle non-queue interrupts, these reads clear the registers */ - rd32(hw, I40E_VFINT_ICR01); - rd32(hw, I40E_VFINT_ICR0_ENA1); + rd32(hw, IAVF_VFINT_ICR01); + rd32(hw, IAVF_VFINT_ICR0_ENA1); /* schedule work on the private workqueue */ schedule_work(&adapter->adminq_task); @@ -296,13 +293,13 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data) } /** - * i40evf_msix_clean_rings - MSIX mode Interrupt Handler + * iavf_msix_clean_rings - MSIX mode Interrupt Handler * @irq: interrupt number * @data: pointer to a q_vector **/ -static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) +static irqreturn_t iavf_msix_clean_rings(int irq, void *data) { - struct i40e_q_vector *q_vector = data; + struct iavf_q_vector *q_vector = data; if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; @@ -313,17 +310,17 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) } /** - * i40evf_map_vector_to_rxq - associate irqs with rx queues + * iavf_map_vector_to_rxq - associate irqs with rx queues * @adapter: board private structure * @v_idx: interrupt number * @r_idx: queue number **/ static void -i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) +iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) { - struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; - struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx]; - struct i40e_hw *hw = &adapter->hw; + struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; + struct iavf_hw *hw = &adapter->hw; rx_ring->q_vector = q_vector; rx_ring->next = q_vector->rx.ring; @@ -333,23 +330,23 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) q_vector->rx.next_update = jiffies + 1; q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); q_vector->ring_mask |= BIT(r_idx); - wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx), + wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), q_vector->rx.current_itr); q_vector->rx.current_itr = q_vector->rx.target_itr; } /** - * i40evf_map_vector_to_txq - associate irqs with tx queues + * iavf_map_vector_to_txq - associate irqs with tx queues * @adapter: board private structure * @v_idx: interrupt number * @t_idx: queue number **/ static void -i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) +iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) { - struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; - struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx]; - struct i40e_hw *hw = &adapter->hw; + struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; + struct iavf_hw *hw = &adapter->hw; tx_ring->q_vector = q_vector; tx_ring->next = q_vector->tx.ring; @@ -359,13 +356,13 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) q_vector->tx.next_update = jiffies + 1; q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); q_vector->num_ringpairs++; - wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx), + wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), q_vector->tx.target_itr); q_vector->tx.current_itr = q_vector->tx.target_itr; } /** - * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors + * iavf_map_rings_to_vectors - Maps descriptor rings to vectors * @adapter: board private structure to initialize * * This function maps descriptor rings to the queue-specific vectors @@ -374,7 +371,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) * group the rings as "efficiently" as possible. You would add new * mapping configurations in here. **/ -static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) +static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) { int rings_remaining = adapter->num_active_queues; int ridx = 0, vidx = 0; @@ -383,8 +380,8 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (; ridx < rings_remaining; ridx++) { - i40evf_map_vector_to_rxq(adapter, vidx, ridx); - i40evf_map_vector_to_txq(adapter, vidx, ridx); + iavf_map_vector_to_rxq(adapter, vidx, ridx); + iavf_map_vector_to_txq(adapter, vidx, ridx); /* In the case where we have more queues than vectors, continue * round-robin on vectors until all queues are mapped. @@ -393,38 +390,38 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) vidx = 0; } - adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; + adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; } /** - * i40evf_irq_affinity_notify - Callback for affinity changes + * iavf_irq_affinity_notify - Callback for affinity changes * @notify: context as to what irq was changed * @mask: the new affinity mask * * This is a callback function used by the irq_set_affinity_notifier function * so that we may register to receive changes to the irq affinity masks. **/ -static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) +static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) { - struct i40e_q_vector *q_vector = - container_of(notify, struct i40e_q_vector, affinity_notify); + struct iavf_q_vector *q_vector = + container_of(notify, struct iavf_q_vector, affinity_notify); cpumask_copy(&q_vector->affinity_mask, mask); } /** - * i40evf_irq_affinity_release - Callback for affinity notifier release + * iavf_irq_affinity_release - Callback for affinity notifier release * @ref: internal core kernel usage * * This is a callback function used by the irq_set_affinity_notifier function * to inform the current notification subscriber that they will no longer * receive notifications. **/ -static void i40evf_irq_affinity_release(struct kref *ref) {} +static void iavf_irq_affinity_release(struct kref *ref) {} /** - * i40evf_request_traffic_irqs - Initialize MSI-X interrupts + * iavf_request_traffic_irqs - Initialize MSI-X interrupts * @adapter: board private structure * @basename: device basename * @@ -432,37 +429,38 @@ static void i40evf_irq_affinity_release(struct kref *ref) {} * interrupts from the kernel. **/ static int -i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) +iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) { unsigned int vector, q_vectors; unsigned int rx_int_idx = 0, tx_int_idx = 0; int irq_num, err; int cpu; - i40evf_irq_disable(adapter); + iavf_irq_disable(adapter); /* Decrement for Other and TCP Timer vectors */ q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (vector = 0; vector < q_vectors; vector++) { - struct i40e_q_vector *q_vector = &adapter->q_vectors[vector]; + struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; + irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "i40evf-%s-TxRx-%d", basename, rx_int_idx++); + "iavf-%s-TxRx-%d", basename, rx_int_idx++); tx_int_idx++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "i40evf-%s-rx-%d", basename, rx_int_idx++); + "iavf-%s-rx-%d", basename, rx_int_idx++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "i40evf-%s-tx-%d", basename, tx_int_idx++); + "iavf-%s-tx-%d", basename, tx_int_idx++); } else { /* skip this unused q_vector */ continue; } err = request_irq(irq_num, - i40evf_msix_clean_rings, + iavf_msix_clean_rings, 0, q_vector->name, q_vector); @@ -472,9 +470,9 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) goto free_queue_irqs; } /* register for affinity change notifications */ - q_vector->affinity_notify.notify = i40evf_irq_affinity_notify; + q_vector->affinity_notify.notify = iavf_irq_affinity_notify; q_vector->affinity_notify.release = - i40evf_irq_affinity_release; + iavf_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); /* Spread the IRQ affinity hints across online CPUs. Note that * get_cpu_mask returns a mask with a permanent lifetime so @@ -498,23 +496,23 @@ free_queue_irqs: } /** - * i40evf_request_misc_irq - Initialize MSI-X interrupts + * iavf_request_misc_irq - Initialize MSI-X interrupts * @adapter: board private structure * * Allocates MSI-X vector 0 and requests interrupts from the kernel. This * vector is only for the admin queue, and stays active even when the netdev * is closed. **/ -static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) +static int iavf_request_misc_irq(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err; snprintf(adapter->misc_vector_name, - sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx", + sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", dev_name(&adapter->pdev->dev)); err = request_irq(adapter->msix_entries[0].vector, - &i40evf_msix_aq, 0, + &iavf_msix_aq, 0, adapter->misc_vector_name, netdev); if (err) { dev_err(&adapter->pdev->dev, @@ -526,12 +524,12 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) } /** - * i40evf_free_traffic_irqs - Free MSI-X interrupts + * iavf_free_traffic_irqs - Free MSI-X interrupts * @adapter: board private structure * * Frees all MSI-X vectors other than 0. **/ -static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) +static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) { int vector, irq_num, q_vectors; @@ -549,12 +547,12 @@ static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) } /** - * i40evf_free_misc_irq - Free MSI-X miscellaneous vector + * iavf_free_misc_irq - Free MSI-X miscellaneous vector * @adapter: board private structure * * Frees MSI-X vector 0. **/ -static void i40evf_free_misc_irq(struct i40evf_adapter *adapter) +static void iavf_free_misc_irq(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -565,58 +563,58 @@ static void i40evf_free_misc_irq(struct i40evf_adapter *adapter) } /** - * i40evf_configure_tx - Configure Transmit Unit after Reset + * iavf_configure_tx - Configure Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. **/ -static void i40evf_configure_tx(struct i40evf_adapter *adapter) +static void iavf_configure_tx(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; int i; for (i = 0; i < adapter->num_active_queues; i++) - adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i); + adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); } /** - * i40evf_configure_rx - Configure Receive Unit after Reset + * iavf_configure_rx - Configure Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/ -static void i40evf_configure_rx(struct i40evf_adapter *adapter) +static void iavf_configure_rx(struct iavf_adapter *adapter) { - unsigned int rx_buf_len = I40E_RXBUFFER_2048; - struct i40e_hw *hw = &adapter->hw; + unsigned int rx_buf_len = IAVF_RXBUFFER_2048; + struct iavf_hw *hw = &adapter->hw; int i; /* Legacy Rx will always default to a 2048 buffer size. */ #if (PAGE_SIZE < 8192) - if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) { + if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { struct net_device *netdev = adapter->netdev; /* For jumbo frames on systems with 4K pages we have to use * an order 1 page, so we might as well increase the size * of our Rx buffer to make better use of the available space */ - rx_buf_len = I40E_RXBUFFER_3072; + rx_buf_len = IAVF_RXBUFFER_3072; /* We use a 1536 buffer size for configurations with * standard Ethernet mtu. On x86 this gives us enough room * for shared info and 192 bytes of padding. */ - if (!I40E_2K_TOO_SMALL_WITH_PADDING && + if (!IAVF_2K_TOO_SMALL_WITH_PADDING && (netdev->mtu <= ETH_DATA_LEN)) - rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; + rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; } #endif for (i = 0; i < adapter->num_active_queues; i++) { - adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); + adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); adapter->rx_rings[i].rx_buf_len = rx_buf_len; - if (adapter->flags & I40EVF_FLAG_LEGACY_RX) + if (adapter->flags & IAVF_FLAG_LEGACY_RX) clear_ring_build_skb_enabled(&adapter->rx_rings[i]); else set_ring_build_skb_enabled(&adapter->rx_rings[i]); @@ -624,7 +622,7 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) } /** - * i40evf_find_vlan - Search filter list for specific vlan filter + * iavf_find_vlan - Search filter list for specific vlan filter * @adapter: board private structure * @vlan: vlan tag * @@ -632,9 +630,9 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) * mac_vlan_list_lock. **/ static struct -i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan) +iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) { - struct i40evf_vlan_filter *f; + struct iavf_vlan_filter *f; list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (vlan == f->vlan) @@ -644,20 +642,20 @@ i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan) } /** - * i40evf_add_vlan - Add a vlan filter to the list + * iavf_add_vlan - Add a vlan filter to the list * @adapter: board private structure * @vlan: VLAN tag * * Returns ptr to the filter object or NULL when no memory available. **/ static struct -i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) +iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) { - struct i40evf_vlan_filter *f = NULL; + struct iavf_vlan_filter *f = NULL; spin_lock_bh(&adapter->mac_vlan_list_lock); - f = i40evf_find_vlan(adapter, vlan); + f = iavf_find_vlan(adapter, vlan); if (!f) { f = kzalloc(sizeof(*f), GFP_KERNEL); if (!f) @@ -668,7 +666,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) INIT_LIST_HEAD(&f->list); list_add(&f->list, &adapter->vlan_filter_list); f->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; } clearout: @@ -677,63 +675,63 @@ clearout: } /** - * i40evf_del_vlan - Remove a vlan filter from the list + * iavf_del_vlan - Remove a vlan filter from the list * @adapter: board private structure * @vlan: VLAN tag **/ -static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) +static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) { - struct i40evf_vlan_filter *f; + struct iavf_vlan_filter *f; spin_lock_bh(&adapter->mac_vlan_list_lock); - f = i40evf_find_vlan(adapter, vlan); + f = iavf_find_vlan(adapter, vlan); if (f) { f->remove = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; } spin_unlock_bh(&adapter->mac_vlan_list_lock); } /** - * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device + * iavf_vlan_rx_add_vid - Add a VLAN filter to a device * @netdev: network device struct * @proto: unused protocol data * @vid: VLAN tag **/ -static int i40evf_vlan_rx_add_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) +static int iavf_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); if (!VLAN_ALLOWED(adapter)) return -EIO; - if (i40evf_add_vlan(adapter, vid) == NULL) + if (iavf_add_vlan(adapter, vid) == NULL) return -ENOMEM; return 0; } /** - * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device + * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device * @netdev: network device struct * @proto: unused protocol data * @vid: VLAN tag **/ -static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) +static int iavf_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); if (VLAN_ALLOWED(adapter)) { - i40evf_del_vlan(adapter, vid); + iavf_del_vlan(adapter, vid); return 0; } return -EIO; } /** - * i40evf_find_filter - Search filter list for specific mac filter + * iavf_find_filter - Search filter list for specific mac filter * @adapter: board private structure * @macaddr: the MAC address * @@ -741,10 +739,10 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, * mac_vlan_list_lock. **/ static struct -i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter, - const u8 *macaddr) +iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, + const u8 *macaddr) { - struct i40evf_mac_filter *f; + struct iavf_mac_filter *f; if (!macaddr) return NULL; @@ -757,22 +755,22 @@ i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter, } /** - * i40e_add_filter - Add a mac filter to the filter list + * iavf_add_filter - Add a mac filter to the filter list * @adapter: board private structure * @macaddr: the MAC address * * Returns ptr to the filter object or NULL when no memory available. **/ static struct -i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, - const u8 *macaddr) +iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, + const u8 *macaddr) { - struct i40evf_mac_filter *f; + struct iavf_mac_filter *f; if (!macaddr) return NULL; - f = i40evf_find_filter(adapter, macaddr); + f = iavf_find_filter(adapter, macaddr); if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) @@ -782,7 +780,7 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, list_add_tail(&f->list, &adapter->mac_filter_list); f->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; } else { f->remove = false; } @@ -791,17 +789,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, } /** - * i40evf_set_mac - NDO callback to set port mac address + * iavf_set_mac - NDO callback to set port mac address * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/ -static int i40evf_set_mac(struct net_device *netdev, void *p) +static int iavf_set_mac(struct net_device *netdev, void *p) { - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_hw *hw = &adapter->hw; - struct i40evf_mac_filter *f; + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_hw *hw = &adapter->hw; + struct iavf_mac_filter *f; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) @@ -810,18 +808,18 @@ static int i40evf_set_mac(struct net_device *netdev, void *p) if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) return 0; - if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF) + if (adapter->flags & IAVF_FLAG_ADDR_SET_BY_PF) return -EPERM; spin_lock_bh(&adapter->mac_vlan_list_lock); - f = i40evf_find_filter(adapter, hw->mac.addr); + f = iavf_find_filter(adapter, hw->mac.addr); if (f) { f->remove = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; } - f = i40evf_add_filter(adapter, addr->sa_data); + f = iavf_add_filter(adapter, addr->sa_data); spin_unlock_bh(&adapter->mac_vlan_list_lock); @@ -834,35 +832,35 @@ static int i40evf_set_mac(struct net_device *netdev, void *p) } /** - * i40evf_addr_sync - Callback for dev_(mc|uc)_sync to add address + * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address * @netdev: the netdevice * @addr: address to add * * Called by __dev_(mc|uc)_sync when an address needs to be added. We call * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. */ -static int i40evf_addr_sync(struct net_device *netdev, const u8 *addr) +static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); - if (i40evf_add_filter(adapter, addr)) + if (iavf_add_filter(adapter, addr)) return 0; else return -ENOMEM; } /** - * i40evf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address + * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address * @netdev: the netdevice * @addr: address to add * * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. */ -static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr) +static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) { - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40evf_mac_filter *f; + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_mac_filter *f; /* Under some circumstances, we might receive a request to delete * our own device address from our uc list. Because we store the @@ -872,50 +870,50 @@ static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr) if (ether_addr_equal(addr, netdev->dev_addr)) return 0; - f = i40evf_find_filter(adapter, addr); + f = iavf_find_filter(adapter, addr); if (f) { f->remove = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; } return 0; } /** - * i40evf_set_rx_mode - NDO callback to set the netdev filters + * iavf_set_rx_mode - NDO callback to set the netdev filters * @netdev: network interface device structure **/ -static void i40evf_set_rx_mode(struct net_device *netdev) +static void iavf_set_rx_mode(struct net_device *netdev) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); spin_lock_bh(&adapter->mac_vlan_list_lock); - __dev_uc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync); - __dev_mc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync); + __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); + __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); spin_unlock_bh(&adapter->mac_vlan_list_lock); if (netdev->flags & IFF_PROMISC && - !(adapter->flags & I40EVF_FLAG_PROMISC_ON)) - adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC; + !(adapter->flags & IAVF_FLAG_PROMISC_ON)) + adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; else if (!(netdev->flags & IFF_PROMISC) && - adapter->flags & I40EVF_FLAG_PROMISC_ON) - adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC; + adapter->flags & IAVF_FLAG_PROMISC_ON) + adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; if (netdev->flags & IFF_ALLMULTI && - !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON)) - adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI; + !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) + adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; else if (!(netdev->flags & IFF_ALLMULTI) && - adapter->flags & I40EVF_FLAG_ALLMULTI_ON) - adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI; + adapter->flags & IAVF_FLAG_ALLMULTI_ON) + adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; } /** - * i40evf_napi_enable_all - enable NAPI on all queue vectors + * iavf_napi_enable_all - enable NAPI on all queue vectors * @adapter: board private structure **/ -static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) +static void iavf_napi_enable_all(struct iavf_adapter *adapter) { int q_idx; - struct i40e_q_vector *q_vector; + struct iavf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { @@ -928,13 +926,13 @@ static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) } /** - * i40evf_napi_disable_all - disable NAPI on all queue vectors + * iavf_napi_disable_all - disable NAPI on all queue vectors * @adapter: board private structure **/ -static void i40evf_napi_disable_all(struct i40evf_adapter *adapter) +static void iavf_napi_disable_all(struct iavf_adapter *adapter) { int q_idx; - struct i40e_q_vector *q_vector; + struct iavf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { @@ -944,67 +942,67 @@ static void i40evf_napi_disable_all(struct i40evf_adapter *adapter) } /** - * i40evf_configure - set up transmit and receive data structures + * iavf_configure - set up transmit and receive data structures * @adapter: board private structure **/ -static void i40evf_configure(struct i40evf_adapter *adapter) +static void iavf_configure(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int i; - i40evf_set_rx_mode(netdev); + iavf_set_rx_mode(netdev); - i40evf_configure_tx(adapter); - i40evf_configure_rx(adapter); - adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; + iavf_configure_tx(adapter); + iavf_configure_rx(adapter); + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; for (i = 0; i < adapter->num_active_queues; i++) { - struct i40e_ring *ring = &adapter->rx_rings[i]; + struct iavf_ring *ring = &adapter->rx_rings[i]; - i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); } } /** - * i40evf_up_complete - Finish the last steps of bringing up a connection + * iavf_up_complete - Finish the last steps of bringing up a connection * @adapter: board private structure * - * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock. + * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. **/ -static void i40evf_up_complete(struct i40evf_adapter *adapter) +static void iavf_up_complete(struct iavf_adapter *adapter) { - adapter->state = __I40EVF_RUNNING; - clear_bit(__I40E_VSI_DOWN, adapter->vsi.state); + adapter->state = __IAVF_RUNNING; + clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); - i40evf_napi_enable_all(adapter); + iavf_napi_enable_all(adapter); - adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES; + adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; if (CLIENT_ENABLED(adapter)) - adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN; + adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); } /** - * i40e_down - Shutdown the connection processing + * iavf_down - Shutdown the connection processing * @adapter: board private structure * - * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock. + * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. **/ -void i40evf_down(struct i40evf_adapter *adapter) +void iavf_down(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; - struct i40evf_vlan_filter *vlf; - struct i40evf_mac_filter *f; - struct i40evf_cloud_filter *cf; + struct iavf_vlan_filter *vlf; + struct iavf_mac_filter *f; + struct iavf_cloud_filter *cf; - if (adapter->state <= __I40EVF_DOWN_PENDING) + if (adapter->state <= __IAVF_DOWN_PENDING) return; netif_carrier_off(netdev); netif_tx_disable(netdev); adapter->link_up = false; - i40evf_napi_disable_all(adapter); - i40evf_irq_disable(adapter); + iavf_napi_disable_all(adapter); + iavf_irq_disable(adapter); spin_lock_bh(&adapter->mac_vlan_list_lock); @@ -1031,25 +1029,25 @@ void i40evf_down(struct i40evf_adapter *adapter) } spin_unlock_bh(&adapter->cloud_filter_list_lock); - if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && - adapter->state != __I40EVF_RESETTING) { + if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && + adapter->state != __IAVF_RESETTING) { /* cancel any current operation */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; /* Schedule operations to close down the HW. Don't wait * here for this to complete. The watchdog is still running * and it will take care of this. */ - adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; + adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; } mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); } /** - * i40evf_acquire_msix_vectors - Setup the MSIX capability + * iavf_acquire_msix_vectors - Setup the MSIX capability * @adapter: board private structure * @vectors: number of vectors to request * @@ -1058,7 +1056,7 @@ void i40evf_down(struct i40evf_adapter *adapter) * Returns 0 on success, negative on failure **/ static int -i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors) +iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) { int err, vector_threshold; @@ -1092,12 +1090,12 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors) } /** - * i40evf_free_queues - Free memory for all rings + * iavf_free_queues - Free memory for all rings * @adapter: board private structure to initialize * * Free all of the memory associated with queue pairs. **/ -static void i40evf_free_queues(struct i40evf_adapter *adapter) +static void iavf_free_queues(struct iavf_adapter *adapter) { if (!adapter->vsi_res) return; @@ -1109,14 +1107,14 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter) } /** - * i40evf_alloc_queues - Allocate memory for all rings + * iavf_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * * We allocate one ring per queue at run-time since we don't know the * number of queues at compile-time. The polling_netdev array is * intended for Multiqueue, but should work fine with a single queue. **/ -static int i40evf_alloc_queues(struct i40evf_adapter *adapter) +static int iavf_alloc_queues(struct iavf_adapter *adapter) { int i, num_active_queues; @@ -1137,17 +1135,17 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) adapter->tx_rings = kcalloc(num_active_queues, - sizeof(struct i40e_ring), GFP_KERNEL); + sizeof(struct iavf_ring), GFP_KERNEL); if (!adapter->tx_rings) goto err_out; adapter->rx_rings = kcalloc(num_active_queues, - sizeof(struct i40e_ring), GFP_KERNEL); + sizeof(struct iavf_ring), GFP_KERNEL); if (!adapter->rx_rings) goto err_out; for (i = 0; i < num_active_queues; i++) { - struct i40e_ring *tx_ring; - struct i40e_ring *rx_ring; + struct iavf_ring *tx_ring; + struct iavf_ring *rx_ring; tx_ring = &adapter->tx_rings[i]; @@ -1155,16 +1153,16 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) tx_ring->netdev = adapter->netdev; tx_ring->dev = &adapter->pdev->dev; tx_ring->count = adapter->tx_desc_count; - tx_ring->itr_setting = I40E_ITR_TX_DEF; - if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE) - tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; + tx_ring->itr_setting = IAVF_ITR_TX_DEF; + if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) + tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; rx_ring = &adapter->rx_rings[i]; rx_ring->queue_index = i; rx_ring->netdev = adapter->netdev; rx_ring->dev = &adapter->pdev->dev; rx_ring->count = adapter->rx_desc_count; - rx_ring->itr_setting = I40E_ITR_RX_DEF; + rx_ring->itr_setting = IAVF_ITR_RX_DEF; } adapter->num_active_queues = num_active_queues; @@ -1172,18 +1170,18 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) return 0; err_out: - i40evf_free_queues(adapter); + iavf_free_queues(adapter); return -ENOMEM; } /** - * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported + * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported * @adapter: board private structure to initialize * * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. **/ -static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) +static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) { int vector, v_budget; int pairs = 0; @@ -1213,7 +1211,7 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector; - err = i40evf_acquire_msix_vectors(adapter, v_budget); + err = iavf_acquire_msix_vectors(adapter, v_budget); out: netif_set_real_num_rx_queues(adapter->netdev, pairs); @@ -1222,16 +1220,16 @@ out: } /** - * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands + * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands * @adapter: board private structure * * Return 0 on success, negative on failure **/ -static int i40evf_config_rss_aq(struct i40evf_adapter *adapter) +static int iavf_config_rss_aq(struct iavf_adapter *adapter) { struct i40e_aqc_get_set_rss_key_data *rss_key = (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key; - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; int ret = 0; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { @@ -1241,21 +1239,21 @@ static int i40evf_config_rss_aq(struct i40evf_adapter *adapter) return -EBUSY; } - ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); + ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); if (ret) { dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); + iavf_stat_str(hw, ret), + iavf_aq_str(hw, hw->aq.asq_last_status)); return ret; } - ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false, - adapter->rss_lut, adapter->rss_lut_size); + ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, + adapter->rss_lut, adapter->rss_lut_size); if (ret) { dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); + iavf_stat_str(hw, ret), + iavf_aq_str(hw, hw->aq.asq_last_status)); } return ret; @@ -1263,55 +1261,55 @@ static int i40evf_config_rss_aq(struct i40evf_adapter *adapter) } /** - * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers + * iavf_config_rss_reg - Configure RSS keys and lut by writing registers * @adapter: board private structure * * Returns 0 on success, negative on failure **/ -static int i40evf_config_rss_reg(struct i40evf_adapter *adapter) +static int iavf_config_rss_reg(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; u32 *dw; u16 i; dw = (u32 *)adapter->rss_key; for (i = 0; i <= adapter->rss_key_size / 4; i++) - wr32(hw, I40E_VFQF_HKEY(i), dw[i]); + wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); dw = (u32 *)adapter->rss_lut; for (i = 0; i <= adapter->rss_lut_size / 4; i++) - wr32(hw, I40E_VFQF_HLUT(i), dw[i]); + wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); - i40e_flush(hw); + iavf_flush(hw); return 0; } /** - * i40evf_config_rss - Configure RSS keys and lut + * iavf_config_rss - Configure RSS keys and lut * @adapter: board private structure * * Returns 0 on success, negative on failure **/ -int i40evf_config_rss(struct i40evf_adapter *adapter) +int iavf_config_rss(struct iavf_adapter *adapter) { if (RSS_PF(adapter)) { - adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT | - I40EVF_FLAG_AQ_SET_RSS_KEY; + adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | + IAVF_FLAG_AQ_SET_RSS_KEY; return 0; } else if (RSS_AQ(adapter)) { - return i40evf_config_rss_aq(adapter); + return iavf_config_rss_aq(adapter); } else { - return i40evf_config_rss_reg(adapter); + return iavf_config_rss_reg(adapter); } } /** - * i40evf_fill_rss_lut - Fill the lut with default values + * iavf_fill_rss_lut - Fill the lut with default values * @adapter: board private structure **/ -static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter) +static void iavf_fill_rss_lut(struct iavf_adapter *adapter) { u16 i; @@ -1320,47 +1318,46 @@ static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter) } /** - * i40evf_init_rss - Prepare for RSS + * iavf_init_rss - Prepare for RSS * @adapter: board private structure * * Return 0 on success, negative on failure **/ -static int i40evf_init_rss(struct i40evf_adapter *adapter) +static int iavf_init_rss(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; int ret; if (!RSS_PF(adapter)) { /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED; + adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; else - adapter->hena = I40E_DEFAULT_RSS_HENA; + adapter->hena = IAVF_DEFAULT_RSS_HENA; - wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena); - wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32)); + wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); + wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); } - i40evf_fill_rss_lut(adapter); - + iavf_fill_rss_lut(adapter); netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); - ret = i40evf_config_rss(adapter); + ret = iavf_config_rss(adapter); return ret; } /** - * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors + * iavf_alloc_q_vectors - Allocate memory for interrupt vectors * @adapter: board private structure to initialize * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ -static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) +static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) { int q_idx = 0, num_q_vectors; - struct i40e_q_vector *q_vector; + struct iavf_q_vector *q_vector; num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), @@ -1376,21 +1373,21 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) q_vector->reg_idx = q_idx; cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); netif_napi_add(adapter->netdev, &q_vector->napi, - i40evf_napi_poll, NAPI_POLL_WEIGHT); + iavf_napi_poll, NAPI_POLL_WEIGHT); } return 0; } /** - * i40evf_free_q_vectors - Free memory allocated for interrupt vectors + * iavf_free_q_vectors - Free memory allocated for interrupt vectors * @adapter: board private structure to initialize * * This function frees the memory allocated to the q_vectors. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ -static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) +static void iavf_free_q_vectors(struct iavf_adapter *adapter) { int q_idx, num_q_vectors; int napi_vectors; @@ -1402,7 +1399,8 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) napi_vectors = adapter->num_active_queues; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx]; + struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; + if (q_idx < napi_vectors) netif_napi_del(&q_vector->napi); } @@ -1411,11 +1409,11 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) } /** - * i40evf_reset_interrupt_capability - Reset MSIX setup + * iavf_reset_interrupt_capability - Reset MSIX setup * @adapter: board private structure * **/ -void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter) +void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) { if (!adapter->msix_entries) return; @@ -1426,15 +1424,15 @@ void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter) } /** - * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init + * iavf_init_interrupt_scheme - Determine if MSIX is supported and init * @adapter: board private structure to initialize * **/ -int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) +int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) { int err; - err = i40evf_alloc_queues(adapter); + err = iavf_alloc_queues(adapter); if (err) { dev_err(&adapter->pdev->dev, "Unable to allocate memory for queues\n"); @@ -1442,7 +1440,7 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) } rtnl_lock(); - err = i40evf_set_interrupt_capability(adapter); + err = iavf_set_interrupt_capability(adapter); rtnl_unlock(); if (err) { dev_err(&adapter->pdev->dev, @@ -1450,7 +1448,7 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) goto err_set_interrupt; } - err = i40evf_alloc_q_vectors(adapter); + err = iavf_alloc_q_vectors(adapter); if (err) { dev_err(&adapter->pdev->dev, "Unable to allocate memory for queue vectors\n"); @@ -1473,18 +1471,18 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) return 0; err_alloc_q_vectors: - i40evf_reset_interrupt_capability(adapter); + iavf_reset_interrupt_capability(adapter); err_set_interrupt: - i40evf_free_queues(adapter); + iavf_free_queues(adapter); err_alloc_queues: return err; } /** - * i40evf_free_rss - Free memory used by RSS structs + * iavf_free_rss - Free memory used by RSS structs * @adapter: board private structure **/ -static void i40evf_free_rss(struct i40evf_adapter *adapter) +static void iavf_free_rss(struct iavf_adapter *adapter) { kfree(adapter->rss_key); adapter->rss_key = NULL; @@ -1494,52 +1492,52 @@ static void i40evf_free_rss(struct i40evf_adapter *adapter) } /** - * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors + * iavf_reinit_interrupt_scheme - Reallocate queues and vectors * @adapter: board private structure * * Returns 0 on success, negative on failure **/ -static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter) +static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err; if (netif_running(netdev)) - i40evf_free_traffic_irqs(adapter); - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); - i40evf_free_q_vectors(adapter); - i40evf_free_queues(adapter); + iavf_free_traffic_irqs(adapter); + iavf_free_misc_irq(adapter); + iavf_reset_interrupt_capability(adapter); + iavf_free_q_vectors(adapter); + iavf_free_queues(adapter); - err = i40evf_init_interrupt_scheme(adapter); + err = iavf_init_interrupt_scheme(adapter); if (err) goto err; netif_tx_stop_all_queues(netdev); - err = i40evf_request_misc_irq(adapter); + err = iavf_request_misc_irq(adapter); if (err) goto err; - set_bit(__I40E_VSI_DOWN, adapter->vsi.state); + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); - i40evf_map_rings_to_vectors(adapter); + iavf_map_rings_to_vectors(adapter); if (RSS_AQ(adapter)) - adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; else - err = i40evf_init_rss(adapter); + err = iavf_init_rss(adapter); err: return err; } /** - * i40evf_watchdog_timer - Periodic call-back timer + * iavf_watchdog_timer - Periodic call-back timer * @data: pointer to adapter disguised as unsigned long **/ -static void i40evf_watchdog_timer(struct timer_list *t) +static void iavf_watchdog_timer(struct timer_list *t) { - struct i40evf_adapter *adapter = from_timer(adapter, t, + struct iavf_adapter *adapter = from_timer(adapter, t, watchdog_timer); schedule_work(&adapter->watchdog_task); @@ -1547,31 +1545,31 @@ static void i40evf_watchdog_timer(struct timer_list *t) } /** - * i40evf_watchdog_task - Periodic call-back task + * iavf_watchdog_task - Periodic call-back task * @work: pointer to work_struct **/ -static void i40evf_watchdog_task(struct work_struct *work) +static void iavf_watchdog_task(struct work_struct *work) { - struct i40evf_adapter *adapter = container_of(work, - struct i40evf_adapter, + struct iavf_adapter *adapter = container_of(work, + struct iavf_adapter, watchdog_task); - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; u32 reg_val; - if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) + if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) goto restart_watchdog; - if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { - reg_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { + reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & + IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if ((reg_val == VIRTCHNL_VFR_VFACTIVE) || (reg_val == VIRTCHNL_VFR_COMPLETED)) { /* A chance for redemption! */ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); - adapter->state = __I40EVF_STARTUP; - adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; + adapter->state = __IAVF_STARTUP; + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; schedule_delayed_work(&adapter->init_task, 10); - clear_bit(__I40EVF_IN_CRITICAL_TASK, + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); /* Don't reschedule the watchdog, since we've restarted * the init task. When init_task contacts the PF and @@ -1585,15 +1583,15 @@ static void i40evf_watchdog_task(struct work_struct *work) goto watchdog_done; } - if ((adapter->state < __I40EVF_DOWN) || - (adapter->flags & I40EVF_FLAG_RESET_PENDING)) + if ((adapter->state < __IAVF_DOWN) || + (adapter->flags & IAVF_FLAG_RESET_PENDING)) goto watchdog_done; /* check for reset */ - reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK; - if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) { - adapter->state = __I40EVF_RESETTING; - adapter->flags |= I40EVF_FLAG_RESET_PENDING; + reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; + if (!(adapter->flags & IAVF_FLAG_RESET_PENDING) && !reg_val) { + adapter->state = __IAVF_RESETTING; + adapter->flags |= IAVF_FLAG_RESET_PENDING; dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); schedule_work(&adapter->reset_task); adapter->aq_required = 0; @@ -1605,140 +1603,140 @@ static void i40evf_watchdog_task(struct work_struct *work) * here so we don't race on the admin queue. */ if (adapter->current_op) { - if (!i40evf_asq_done(hw)) { + if (!iavf_asq_done(hw)) { dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); - i40evf_send_api_ver(adapter); + iavf_send_api_ver(adapter); } goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) { - i40evf_send_vf_config_msg(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) { + iavf_send_vf_config_msg(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) { - i40evf_disable_queues(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { + iavf_disable_queues(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) { - i40evf_map_queues(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { + iavf_map_queues(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) { - i40evf_add_ether_addrs(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { + iavf_add_ether_addrs(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) { - i40evf_add_vlans(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { + iavf_add_vlans(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) { - i40evf_del_ether_addrs(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { + iavf_del_ether_addrs(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) { - i40evf_del_vlans(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { + iavf_del_vlans(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { - i40evf_enable_vlan_stripping(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { + iavf_enable_vlan_stripping(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { - i40evf_disable_vlan_stripping(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { + iavf_disable_vlan_stripping(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) { - i40evf_configure_queues(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { + iavf_configure_queues(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) { - i40evf_enable_queues(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { + iavf_enable_queues(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) { + if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { /* This message goes straight to the firmware, not the * PF, so we don't have to set current_op as we will * not get a response through the ARQ. */ - i40evf_init_rss(adapter); - adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS; + iavf_init_rss(adapter); + adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) { - i40evf_get_hena(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { + iavf_get_hena(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) { - i40evf_set_hena(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { + iavf_set_hena(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) { - i40evf_set_rss_key(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { + iavf_set_rss_key(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) { - i40evf_set_rss_lut(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { + iavf_set_rss_lut(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) { - i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | + if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { + iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) { - i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); + if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { + iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); goto watchdog_done; } - if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) && - (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) { - i40evf_set_promiscuous(adapter, 0); + if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && + (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { + iavf_set_promiscuous(adapter, 0); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_CHANNELS) { - i40evf_enable_channels(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { + iavf_enable_channels(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_CHANNELS) { - i40evf_disable_channels(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { + iavf_disable_channels(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) { - i40evf_add_cloud_filter(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { + iavf_add_cloud_filter(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) { - i40evf_del_cloud_filter(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { + iavf_del_cloud_filter(adapter); goto watchdog_done; } schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); - if (adapter->state == __I40EVF_RUNNING) - i40evf_request_stats(adapter); + if (adapter->state == __IAVF_RUNNING) + iavf_request_stats(adapter); watchdog_done: - if (adapter->state == __I40EVF_RUNNING) - i40evf_detect_recover_hung(&adapter->vsi); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + if (adapter->state == __IAVF_RUNNING) + iavf_detect_recover_hung(&adapter->vsi); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); restart_watchdog: - if (adapter->state == __I40EVF_REMOVE) + if (adapter->state == __IAVF_REMOVE) return; if (adapter->aq_required) mod_timer(&adapter->watchdog_timer, @@ -1748,28 +1746,28 @@ restart_watchdog: schedule_work(&adapter->adminq_task); } -static void i40evf_disable_vf(struct i40evf_adapter *adapter) +static void iavf_disable_vf(struct iavf_adapter *adapter) { - struct i40evf_mac_filter *f, *ftmp; - struct i40evf_vlan_filter *fv, *fvtmp; - struct i40evf_cloud_filter *cf, *cftmp; + struct iavf_mac_filter *f, *ftmp; + struct iavf_vlan_filter *fv, *fvtmp; + struct iavf_cloud_filter *cf, *cftmp; - adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; + adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; /* We don't use netif_running() because it may be true prior to * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ - if (adapter->state == __I40EVF_RUNNING) { - set_bit(__I40E_VSI_DOWN, adapter->vsi.state); + if (adapter->state == __IAVF_RUNNING) { + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); netif_carrier_off(adapter->netdev); netif_tx_disable(adapter->netdev); adapter->link_up = false; - i40evf_napi_disable_all(adapter); - i40evf_irq_disable(adapter); - i40evf_free_traffic_irqs(adapter); - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); + iavf_napi_disable_all(adapter); + iavf_irq_disable(adapter); + iavf_free_traffic_irqs(adapter); + iavf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); } spin_lock_bh(&adapter->mac_vlan_list_lock); @@ -1795,41 +1793,41 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) } spin_unlock_bh(&adapter->cloud_filter_list_lock); - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); - i40evf_free_queues(adapter); - i40evf_free_q_vectors(adapter); + iavf_free_misc_irq(adapter); + iavf_reset_interrupt_capability(adapter); + iavf_free_queues(adapter); + iavf_free_q_vectors(adapter); kfree(adapter->vf_res); - i40evf_shutdown_adminq(&adapter->hw); + iavf_shutdown_adminq(&adapter->hw); adapter->netdev->flags &= ~IFF_UP; - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - adapter->state = __I40EVF_DOWN; + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + adapter->flags &= ~IAVF_FLAG_RESET_PENDING; + adapter->state = __IAVF_DOWN; wake_up(&adapter->down_waitqueue); dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); } -#define I40EVF_RESET_WAIT_MS 10 -#define I40EVF_RESET_WAIT_COUNT 500 +#define IAVF_RESET_WAIT_MS 10 +#define IAVF_RESET_WAIT_COUNT 500 /** - * i40evf_reset_task - Call-back task to handle hardware reset + * iavf_reset_task - Call-back task to handle hardware reset * @work: pointer to work_struct * * During reset we need to shut down and reinitialize the admin queue * before we can use it to communicate with the PF again. We also clear * and reinit the rings because that context is lost as well. **/ -static void i40evf_reset_task(struct work_struct *work) +static void iavf_reset_task(struct work_struct *work) { - struct i40evf_adapter *adapter = container_of(work, - struct i40evf_adapter, + struct iavf_adapter *adapter = container_of(work, + struct iavf_adapter, reset_task); struct virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; - struct i40e_hw *hw = &adapter->hw; - struct i40evf_vlan_filter *vlf; - struct i40evf_cloud_filter *cf; - struct i40evf_mac_filter *f; + struct iavf_hw *hw = &adapter->hw; + struct iavf_vlan_filter *vlf; + struct iavf_cloud_filter *cf; + struct iavf_mac_filter *f; u32 reg_val; int i = 0, err; bool running; @@ -1837,63 +1835,63 @@ static void i40evf_reset_task(struct work_struct *work) /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ - if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section)) + if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) return; - while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, + while (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section)) usleep_range(500, 1000); if (CLIENT_ENABLED(adapter)) { - adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN | - I40EVF_FLAG_CLIENT_NEEDS_CLOSE | - I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS | - I40EVF_FLAG_SERVICE_CLIENT_REQUESTED); + adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | + IAVF_FLAG_CLIENT_NEEDS_CLOSE | + IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | + IAVF_FLAG_SERVICE_CLIENT_REQUESTED); cancel_delayed_work_sync(&adapter->client_task); - i40evf_notify_client_close(&adapter->vsi, true); + iavf_notify_client_close(&adapter->vsi, true); } - i40evf_misc_irq_disable(adapter); - if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { - adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED; + iavf_misc_irq_disable(adapter); + if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { + adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; /* Restart the AQ here. If we have been reset but didn't * detect it, or if the PF had to reinit, our AQ will be hosed. */ - i40evf_shutdown_adminq(hw); - i40evf_init_adminq(hw); - i40evf_request_reset(adapter); + iavf_shutdown_adminq(hw); + iavf_init_adminq(hw); + iavf_request_reset(adapter); } - adapter->flags |= I40EVF_FLAG_RESET_PENDING; + adapter->flags |= IAVF_FLAG_RESET_PENDING; /* poll until we see the reset actually happen */ - for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { - reg_val = rd32(hw, I40E_VF_ARQLEN1) & - I40E_VF_ARQLEN1_ARQENABLE_MASK; + for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { + reg_val = rd32(hw, IAVF_VF_ARQLEN1) & + IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) break; usleep_range(5000, 10000); } - if (i == I40EVF_RESET_WAIT_COUNT) { + if (i == IAVF_RESET_WAIT_COUNT) { dev_info(&adapter->pdev->dev, "Never saw reset\n"); goto continue_reset; /* act like the reset happened */ } /* wait until the reset is complete and the PF is responding to us */ - for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { + for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { /* sleep first to make sure a minimum wait time is met */ - msleep(I40EVF_RESET_WAIT_MS); + msleep(IAVF_RESET_WAIT_MS); - reg_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; + reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & + IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if (reg_val == VIRTCHNL_VFR_VFACTIVE) break; } pci_set_master(adapter->pdev); - if (i == I40EVF_RESET_WAIT_COUNT) { + if (i == IAVF_RESET_WAIT_COUNT) { dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", reg_val); - i40evf_disable_vf(adapter); - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); + iavf_disable_vf(adapter); + clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -1902,44 +1900,44 @@ continue_reset: * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ - running = ((adapter->state == __I40EVF_RUNNING) || - (adapter->state == __I40EVF_RESETTING)); + running = ((adapter->state == __IAVF_RUNNING) || + (adapter->state == __IAVF_RESETTING)); if (running) { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; - i40evf_napi_disable_all(adapter); + iavf_napi_disable_all(adapter); } - i40evf_irq_disable(adapter); + iavf_irq_disable(adapter); - adapter->state = __I40EVF_RESETTING; - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + adapter->state = __IAVF_RESETTING; + adapter->flags &= ~IAVF_FLAG_RESET_PENDING; /* free the Tx/Rx rings and descriptors, might be better to just * re-use them sometime in the future */ - i40evf_free_all_rx_resources(adapter); - i40evf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); + iavf_free_all_tx_resources(adapter); - adapter->flags |= I40EVF_FLAG_QUEUES_DISABLED; + adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; /* kill and reinit the admin queue */ - i40evf_shutdown_adminq(hw); + iavf_shutdown_adminq(hw); adapter->current_op = VIRTCHNL_OP_UNKNOWN; - err = i40evf_init_adminq(hw); + err = iavf_init_adminq(hw); if (err) dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", err); adapter->aq_required = 0; - if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) { - err = i40evf_reinit_interrupt_scheme(adapter); + if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { + err = iavf_reinit_interrupt_scheme(adapter); if (err) goto reset_err; } - adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG; - adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; + adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; + adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; spin_lock_bh(&adapter->mac_vlan_list_lock); @@ -1964,10 +1962,10 @@ continue_reset: } spin_unlock_bh(&adapter->cloud_filter_list_lock); - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; - i40evf_misc_irq_enable(adapter); + adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; + iavf_misc_irq_enable(adapter); mod_timer(&adapter->watchdog_timer, jiffies + 2); @@ -1976,84 +1974,83 @@ continue_reset: */ if (running) { /* allocate transmit descriptors */ - err = i40evf_setup_all_tx_resources(adapter); + err = iavf_setup_all_tx_resources(adapter); if (err) goto reset_err; /* allocate receive descriptors */ - err = i40evf_setup_all_rx_resources(adapter); + err = iavf_setup_all_rx_resources(adapter); if (err) goto reset_err; - if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) { - err = i40evf_request_traffic_irqs(adapter, - netdev->name); + if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { + err = iavf_request_traffic_irqs(adapter, netdev->name); if (err) goto reset_err; - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; } - i40evf_configure(adapter); + iavf_configure(adapter); - i40evf_up_complete(adapter); + iavf_up_complete(adapter); - i40evf_irq_enable(adapter, true); + iavf_irq_enable(adapter, true); } else { - adapter->state = __I40EVF_DOWN; + adapter->state = __IAVF_DOWN; wake_up(&adapter->down_waitqueue); } - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); return; reset_err: - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); - i40evf_close(netdev); + iavf_close(netdev); } /** - * i40evf_adminq_task - worker thread to clean the admin queue + * iavf_adminq_task - worker thread to clean the admin queue * @work: pointer to work_struct containing our data **/ -static void i40evf_adminq_task(struct work_struct *work) +static void iavf_adminq_task(struct work_struct *work) { - struct i40evf_adapter *adapter = - container_of(work, struct i40evf_adapter, adminq_task); - struct i40e_hw *hw = &adapter->hw; + struct iavf_adapter *adapter = + container_of(work, struct iavf_adapter, adminq_task); + struct iavf_hw *hw = &adapter->hw; struct i40e_arq_event_info event; enum virtchnl_ops v_op; - i40e_status ret, v_ret; + iavf_status ret, v_ret; u32 val, oldval; u16 pending; - if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) goto out; - event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) goto out; do { - ret = i40evf_clean_arq_element(hw, &event, &pending); + ret = iavf_clean_arq_element(hw, &event, &pending); v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); - v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low); + v_ret = (iavf_status)le32_to_cpu(event.desc.cookie_low); if (ret || !v_op) break; /* No event to process or error cleaning ARQ */ - i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, - event.msg_len); + iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, + event.msg_len); if (pending != 0) - memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); + memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); } while (pending); if ((adapter->flags & - (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) || - adapter->state == __I40EVF_RESETTING) + (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || + adapter->state == __IAVF_RESETTING) goto freedom; /* check for error indications */ @@ -2061,34 +2058,34 @@ static void i40evf_adminq_task(struct work_struct *work) if (val == 0xdeadbeef) /* indicates device in reset */ goto freedom; oldval = val; - if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) { + if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); - val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK; + val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; } - if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) { + if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); - val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK; + val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; } - if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) { + if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); - val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK; + val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; } if (oldval != val) wr32(hw, hw->aq.arq.len, val); val = rd32(hw, hw->aq.asq.len); oldval = val; - if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) { + if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); - val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK; + val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; } - if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) { + if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); - val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK; + val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; } - if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) { + if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); - val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK; + val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; } if (oldval != val) wr32(hw, hw->aq.asq.len, val); @@ -2097,58 +2094,58 @@ freedom: kfree(event.msg_buf); out: /* re-enable Admin queue interrupt cause */ - i40evf_misc_irq_enable(adapter); + iavf_misc_irq_enable(adapter); } /** - * i40evf_client_task - worker thread to perform client work + * iavf_client_task - worker thread to perform client work * @work: pointer to work_struct containing our data * * This task handles client interactions. Because client calls can be * reentrant, we can't handle them in the watchdog. **/ -static void i40evf_client_task(struct work_struct *work) +static void iavf_client_task(struct work_struct *work) { - struct i40evf_adapter *adapter = - container_of(work, struct i40evf_adapter, client_task.work); + struct iavf_adapter *adapter = + container_of(work, struct iavf_adapter, client_task.work); /* If we can't get the client bit, just give up. We'll be rescheduled * later. */ - if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section)) + if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section)) return; - if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) { - i40evf_client_subtask(adapter); - adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { + iavf_client_subtask(adapter); + adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; goto out; } - if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { - i40evf_notify_client_l2_params(&adapter->vsi); - adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS; + if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { + iavf_notify_client_l2_params(&adapter->vsi); + adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; goto out; } - if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) { - i40evf_notify_client_close(&adapter->vsi, false); - adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE; + if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { + iavf_notify_client_close(&adapter->vsi, false); + adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; goto out; } - if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) { - i40evf_notify_client_open(&adapter->vsi); - adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN; + if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { + iavf_notify_client_open(&adapter->vsi); + adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; } out: - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); } /** - * i40evf_free_all_tx_resources - Free Tx Resources for All Queues + * iavf_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources **/ -void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) +void iavf_free_all_tx_resources(struct iavf_adapter *adapter) { int i; @@ -2157,11 +2154,11 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) if (adapter->tx_rings[i].desc) - i40evf_free_tx_resources(&adapter->tx_rings[i]); + iavf_free_tx_resources(&adapter->tx_rings[i]); } /** - * i40evf_setup_all_tx_resources - allocate all queues Tx resources + * iavf_setup_all_tx_resources - allocate all queues Tx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or @@ -2170,13 +2167,13 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) * * Return 0 on success, negative on failure **/ -static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) +static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) { int i, err = 0; for (i = 0; i < adapter->num_active_queues; i++) { adapter->tx_rings[i].count = adapter->tx_desc_count; - err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]); + err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); if (!err) continue; dev_err(&adapter->pdev->dev, @@ -2188,7 +2185,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) } /** - * i40evf_setup_all_rx_resources - allocate all queues Rx resources + * iavf_setup_all_rx_resources - allocate all queues Rx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or @@ -2197,13 +2194,13 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) * * Return 0 on success, negative on failure **/ -static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) +static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) { int i, err = 0; for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i].count = adapter->rx_desc_count; - err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]); + err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); if (!err) continue; dev_err(&adapter->pdev->dev, @@ -2214,12 +2211,12 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) } /** - * i40evf_free_all_rx_resources - Free Rx Resources for All Queues + * iavf_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources **/ -void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) +void iavf_free_all_rx_resources(struct iavf_adapter *adapter) { int i; @@ -2228,16 +2225,16 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) if (adapter->rx_rings[i].desc) - i40evf_free_rx_resources(&adapter->rx_rings[i]); + iavf_free_rx_resources(&adapter->rx_rings[i]); } /** - * i40evf_validate_tx_bandwidth - validate the max Tx bandwidth + * iavf_validate_tx_bandwidth - validate the max Tx bandwidth * @adapter: board private structure * @max_tx_rate: max Tx bw for a tc **/ -static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter, - u64 max_tx_rate) +static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, + u64 max_tx_rate) { int speed = 0, ret = 0; @@ -2274,7 +2271,7 @@ static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter, } /** - * i40evf_validate_channel_config - validate queue mapping info + * iavf_validate_channel_config - validate queue mapping info * @adapter: board private structure * @mqprio_qopt: queue parameters * @@ -2282,15 +2279,15 @@ static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter, * configure queue channels is valid or not. Returns 0 on a valid * config. **/ -static int i40evf_validate_ch_config(struct i40evf_adapter *adapter, - struct tc_mqprio_qopt_offload *mqprio_qopt) +static int iavf_validate_ch_config(struct iavf_adapter *adapter, + struct tc_mqprio_qopt_offload *mqprio_qopt) { u64 total_max_rate = 0; int i, num_qps = 0; u64 tx_rate = 0; int ret = 0; - if (mqprio_qopt->qopt.num_tc > I40EVF_MAX_TRAFFIC_CLASS || + if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || mqprio_qopt->qopt.num_tc < 1) return -EINVAL; @@ -2305,24 +2302,24 @@ static int i40evf_validate_ch_config(struct i40evf_adapter *adapter, } /*convert to Mbps */ tx_rate = div_u64(mqprio_qopt->max_rate[i], - I40EVF_MBPS_DIVISOR); + IAVF_MBPS_DIVISOR); total_max_rate += tx_rate; num_qps += mqprio_qopt->qopt.count[i]; } - if (num_qps > I40EVF_MAX_REQ_QUEUES) + if (num_qps > IAVF_MAX_REQ_QUEUES) return -EINVAL; - ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate); + ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); return ret; } /** - * i40evf_del_all_cloud_filters - delete all cloud filters + * iavf_del_all_cloud_filters - delete all cloud filters * on the traffic classes **/ -static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter) +static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) { - struct i40evf_cloud_filter *cf, *cftmp; + struct iavf_cloud_filter *cf, *cftmp; spin_lock_bh(&adapter->cloud_filter_list_lock); list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, @@ -2335,7 +2332,7 @@ static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter) } /** - * __i40evf_setup_tc - configure multiple traffic classes + * __iavf_setup_tc - configure multiple traffic classes * @netdev: network interface device structure * @type_date: tc offload data * @@ -2345,10 +2342,10 @@ static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter) * * Returns 0 on success. **/ -static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) +static int __iavf_setup_tc(struct net_device *netdev, void *type_data) { struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); struct virtchnl_vf_resource *vfres = adapter->vf_res; u8 num_tc = 0, total_qps = 0; int ret = 0, netdev_tc = 0; @@ -2361,14 +2358,14 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) /* delete queue_channel */ if (!mqprio_qopt->qopt.hw) { - if (adapter->ch_config.state == __I40EVF_TC_RUNNING) { + if (adapter->ch_config.state == __IAVF_TC_RUNNING) { /* reset the tc configuration */ netdev_reset_tc(netdev); adapter->num_tc = 0; netif_tx_stop_all_queues(netdev); netif_tx_disable(netdev); - i40evf_del_all_cloud_filters(adapter); - adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS; + iavf_del_all_cloud_filters(adapter); + adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; goto exit; } else { return -EINVAL; @@ -2381,12 +2378,12 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) dev_err(&adapter->pdev->dev, "ADq not supported\n"); return -EOPNOTSUPP; } - if (adapter->ch_config.state != __I40EVF_TC_INVALID) { + if (adapter->ch_config.state != __IAVF_TC_INVALID) { dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); return -EINVAL; } - ret = i40evf_validate_ch_config(adapter, mqprio_qopt); + ret = iavf_validate_ch_config(adapter, mqprio_qopt); if (ret) return ret; /* Return if same TC config is requested */ @@ -2394,7 +2391,7 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) return 0; adapter->num_tc = num_tc; - for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { if (i < num_tc) { adapter->ch_config.ch_info[i].count = mqprio_qopt->qopt.count[i]; @@ -2404,7 +2401,7 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) max_tx_rate = mqprio_qopt->max_rate[i]; /* convert to Mbps */ max_tx_rate = div_u64(max_tx_rate, - I40EVF_MBPS_DIVISOR); + IAVF_MBPS_DIVISOR); adapter->ch_config.ch_info[i].max_tx_rate = max_tx_rate; } else { @@ -2415,11 +2412,11 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) adapter->ch_config.total_qps = total_qps; netif_tx_stop_all_queues(netdev); netif_tx_disable(netdev); - adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_CHANNELS; + adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; netdev_reset_tc(netdev); /* Report the tc mapping up the stack */ netdev_set_num_tc(adapter->netdev, num_tc); - for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { u16 qcount = mqprio_qopt->qopt.count[i]; u16 qoffset = mqprio_qopt->qopt.offset[i]; @@ -2433,14 +2430,14 @@ exit: } /** - * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel + * iavf_parse_cls_flower - Parse tc flower filters provided by kernel * @adapter: board private structure * @cls_flower: pointer to struct tc_cls_flower_offload * @filter: pointer to cloud filter structure */ -static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *f, - struct i40evf_cloud_filter *filter) +static int iavf_parse_cls_flower(struct iavf_adapter *adapter, + struct tc_cls_flower_offload *f, + struct iavf_cloud_filter *filter) { u16 n_proto_mask = 0; u16 n_proto_key = 0; @@ -2471,7 +2468,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, f->mask); if (mask->keyid != 0) - field_flags |= I40EVF_CLOUD_FIELD_TEN_ID; + field_flags |= IAVF_CLOUD_FIELD_TEN_ID; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { @@ -2518,7 +2515,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, /* use is_broadcast and is_zero to check for all 0xf or 0 */ if (!is_zero_ether_addr(mask->dst)) { if (is_broadcast_ether_addr(mask->dst)) { - field_flags |= I40EVF_CLOUD_FIELD_OMAC; + field_flags |= IAVF_CLOUD_FIELD_OMAC; } else { dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", mask->dst); @@ -2528,7 +2525,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, if (!is_zero_ether_addr(mask->src)) { if (is_broadcast_ether_addr(mask->src)) { - field_flags |= I40EVF_CLOUD_FIELD_IMAC; + field_flags |= IAVF_CLOUD_FIELD_IMAC; } else { dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", mask->src); @@ -2569,7 +2566,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, if (mask->vlan_id) { if (mask->vlan_id == VLAN_VID_MASK) { - field_flags |= I40EVF_CLOUD_FIELD_IVLAN; + field_flags |= IAVF_CLOUD_FIELD_IVLAN; } else { dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", mask->vlan_id); @@ -2601,7 +2598,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, if (mask->dst) { if (mask->dst == cpu_to_be32(0xffffffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; + field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", be32_to_cpu(mask->dst)); @@ -2611,7 +2608,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, if (mask->src) { if (mask->src == cpu_to_be32(0xffffffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; + field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", be32_to_cpu(mask->dst)); @@ -2619,7 +2616,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, } } - if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) { + if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); return I40E_ERR_CONFIG; } @@ -2660,7 +2657,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, return I40E_ERR_CONFIG; } if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src)) - field_flags |= I40EVF_CLOUD_FIELD_IIP; + field_flags |= IAVF_CLOUD_FIELD_IIP; for (i = 0; i < 4; i++) vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); @@ -2683,7 +2680,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, if (mask->src) { if (mask->src == cpu_to_be16(0xffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; + field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", be16_to_cpu(mask->src)); @@ -2693,7 +2690,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, if (mask->dst) { if (mask->dst == cpu_to_be16(0xffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; + field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", be16_to_cpu(mask->dst)); @@ -2716,13 +2713,13 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, } /** - * i40evf_handle_tclass - Forward to a traffic class on the device + * iavf_handle_tclass - Forward to a traffic class on the device * @adapter: board private structure * @tc: traffic class index on the device * @filter: pointer to cloud filter structure */ -static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc, - struct i40evf_cloud_filter *filter) +static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, + struct iavf_cloud_filter *filter) { if (tc == 0) return 0; @@ -2740,15 +2737,15 @@ static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc, } /** - * i40evf_configure_clsflower - Add tc flower filters + * iavf_configure_clsflower - Add tc flower filters * @adapter: board private structure * @cls_flower: Pointer to struct tc_cls_flower_offload */ -static int i40evf_configure_clsflower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) +static int iavf_configure_clsflower(struct iavf_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) { int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); - struct i40evf_cloud_filter *filter = NULL; + struct iavf_cloud_filter *filter = NULL; int err = -EINVAL, count = 50; if (tc < 0) { @@ -2760,7 +2757,7 @@ static int i40evf_configure_clsflower(struct i40evf_adapter *adapter, if (!filter) return -ENOMEM; - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) { if (--count == 0) goto err; @@ -2773,11 +2770,11 @@ static int i40evf_configure_clsflower(struct i40evf_adapter *adapter, memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); /* start out with flow type and eth type IPv4 to begin with */ filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; - err = i40evf_parse_cls_flower(adapter, cls_flower, filter); + err = iavf_parse_cls_flower(adapter, cls_flower, filter); if (err < 0) goto err; - err = i40evf_handle_tclass(adapter, tc, filter); + err = iavf_handle_tclass(adapter, tc, filter); if (err < 0) goto err; @@ -2786,27 +2783,27 @@ static int i40evf_configure_clsflower(struct i40evf_adapter *adapter, list_add_tail(&filter->list, &adapter->cloud_filter_list); adapter->num_cloud_filters++; filter->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; spin_unlock_bh(&adapter->cloud_filter_list_lock); err: if (err) kfree(filter); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); return err; } -/* i40evf_find_cf - Find the cloud filter in the list +/* iavf_find_cf - Find the cloud filter in the list * @adapter: Board private structure * @cookie: filter specific cookie * * Returns ptr to the filter object or NULL. Must be called while holding the * cloud_filter_list_lock. */ -static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter, - unsigned long *cookie) +static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, + unsigned long *cookie) { - struct i40evf_cloud_filter *filter = NULL; + struct iavf_cloud_filter *filter = NULL; if (!cookie) return NULL; @@ -2819,21 +2816,21 @@ static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter } /** - * i40evf_delete_clsflower - Remove tc flower filters + * iavf_delete_clsflower - Remove tc flower filters * @adapter: board private structure * @cls_flower: Pointer to struct tc_cls_flower_offload */ -static int i40evf_delete_clsflower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) +static int iavf_delete_clsflower(struct iavf_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) { - struct i40evf_cloud_filter *filter = NULL; + struct iavf_cloud_filter *filter = NULL; int err = 0; spin_lock_bh(&adapter->cloud_filter_list_lock); - filter = i40evf_find_cf(adapter, &cls_flower->cookie); + filter = iavf_find_cf(adapter, &cls_flower->cookie); if (filter) { filter->del = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; } else { err = -EINVAL; } @@ -2843,21 +2840,21 @@ static int i40evf_delete_clsflower(struct i40evf_adapter *adapter, } /** - * i40evf_setup_tc_cls_flower - flower classifier offloads + * iavf_setup_tc_cls_flower - flower classifier offloads * @netdev: net device to configure * @type_data: offload data */ -static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) +static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) { if (cls_flower->common.chain_index) return -EOPNOTSUPP; switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - return i40evf_configure_clsflower(adapter, cls_flower); + return iavf_configure_clsflower(adapter, cls_flower); case TC_CLSFLOWER_DESTROY: - return i40evf_delete_clsflower(adapter, cls_flower); + return iavf_delete_clsflower(adapter, cls_flower); case TC_CLSFLOWER_STATS: return -EOPNOTSUPP; default: @@ -2866,46 +2863,46 @@ static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter, } /** - * i40evf_setup_tc_block_cb - block callback for tc + * iavf_setup_tc_block_cb - block callback for tc * @type: type of offload * @type_data: offload data * @cb_priv: * * This function is the block callback for traffic classes **/ -static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) +static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) { switch (type) { case TC_SETUP_CLSFLOWER: - return i40evf_setup_tc_cls_flower(cb_priv, type_data); + return iavf_setup_tc_cls_flower(cb_priv, type_data); default: return -EOPNOTSUPP; } } /** - * i40evf_setup_tc_block - register callbacks for tc + * iavf_setup_tc_block - register callbacks for tc * @netdev: network interface device structure * @f: tc offload data * * This function registers block callbacks for tc * offloads **/ -static int i40evf_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) +static int iavf_setup_tc_block(struct net_device *dev, + struct tc_block_offload *f) { - struct i40evf_adapter *adapter = netdev_priv(dev); + struct iavf_adapter *adapter = netdev_priv(dev); if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; switch (f->command) { case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb, + return tcf_block_cb_register(f->block, iavf_setup_tc_block_cb, adapter, adapter, f->extack); case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb, + tcf_block_cb_unregister(f->block, iavf_setup_tc_block_cb, adapter); return 0; default: @@ -2914,7 +2911,7 @@ static int i40evf_setup_tc_block(struct net_device *dev, } /** - * i40evf_setup_tc - configure multiple traffic classes + * iavf_setup_tc - configure multiple traffic classes * @netdev: network interface device structure * @type: type of offload * @type_date: tc offload data @@ -2924,21 +2921,21 @@ static int i40evf_setup_tc_block(struct net_device *dev, * * Returns 0 on success **/ -static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type, - void *type_data) +static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) { switch (type) { case TC_SETUP_QDISC_MQPRIO: - return __i40evf_setup_tc(netdev, type_data); + return __iavf_setup_tc(netdev, type_data); case TC_SETUP_BLOCK: - return i40evf_setup_tc_block(netdev, type_data); + return iavf_setup_tc_block(netdev, type_data); default: return -EOPNOTSUPP; } } /** - * i40evf_open - Called when a network interface is made active + * iavf_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure @@ -2949,71 +2946,71 @@ static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type, * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ -static int i40evf_open(struct net_device *netdev) +static int iavf_open(struct net_device *netdev) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); int err; - if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); return -EIO; } - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) usleep_range(500, 1000); - if (adapter->state != __I40EVF_DOWN) { + if (adapter->state != __IAVF_DOWN) { err = -EBUSY; goto err_unlock; } /* allocate transmit descriptors */ - err = i40evf_setup_all_tx_resources(adapter); + err = iavf_setup_all_tx_resources(adapter); if (err) goto err_setup_tx; /* allocate receive descriptors */ - err = i40evf_setup_all_rx_resources(adapter); + err = iavf_setup_all_rx_resources(adapter); if (err) goto err_setup_rx; /* clear any pending interrupts, may auto mask */ - err = i40evf_request_traffic_irqs(adapter, netdev->name); + err = iavf_request_traffic_irqs(adapter, netdev->name); if (err) goto err_req_irq; spin_lock_bh(&adapter->mac_vlan_list_lock); - i40evf_add_filter(adapter, adapter->hw.mac.addr); + iavf_add_filter(adapter, adapter->hw.mac.addr); spin_unlock_bh(&adapter->mac_vlan_list_lock); - i40evf_configure(adapter); + iavf_configure(adapter); - i40evf_up_complete(adapter); + iavf_up_complete(adapter); - i40evf_irq_enable(adapter, true); + iavf_irq_enable(adapter, true); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); return 0; err_req_irq: - i40evf_down(adapter); - i40evf_free_traffic_irqs(adapter); + iavf_down(adapter); + iavf_free_traffic_irqs(adapter); err_setup_rx: - i40evf_free_all_rx_resources(adapter); + iavf_free_all_rx_resources(adapter); err_setup_tx: - i40evf_free_all_tx_resources(adapter); + iavf_free_all_tx_resources(adapter); err_unlock: - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); return err; } /** - * i40evf_close - Disables a network interface + * iavf_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail @@ -3023,41 +3020,41 @@ err_unlock: * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) * are freed, along with all transmit and receive resources. **/ -static int i40evf_close(struct net_device *netdev) +static int iavf_close(struct net_device *netdev) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); int status; - if (adapter->state <= __I40EVF_DOWN_PENDING) + if (adapter->state <= __IAVF_DOWN_PENDING) return 0; - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) usleep_range(500, 1000); - set_bit(__I40E_VSI_DOWN, adapter->vsi.state); + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); if (CLIENT_ENABLED(adapter)) - adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE; + adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; - i40evf_down(adapter); - adapter->state = __I40EVF_DOWN_PENDING; - i40evf_free_traffic_irqs(adapter); + iavf_down(adapter); + adapter->state = __IAVF_DOWN_PENDING; + iavf_free_traffic_irqs(adapter); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); /* We explicitly don't free resources here because the hardware is * still active and can DMA into memory. Resources are cleared in - * i40evf_virtchnl_completion() after we get confirmation from the PF + * iavf_virtchnl_completion() after we get confirmation from the PF * driver that the rings have been stopped. * - * Also, we wait for state to transition to __I40EVF_DOWN before - * returning. State change occurs in i40evf_virtchnl_completion() after + * Also, we wait for state to transition to __IAVF_DOWN before + * returning. State change occurs in iavf_virtchnl_completion() after * VF resources are released (which occurs after PF driver processes and * responds to admin queue commands). */ status = wait_event_timeout(adapter->down_waitqueue, - adapter->state == __I40EVF_DOWN, + adapter->state == __IAVF_DOWN, msecs_to_jiffies(200)); if (!status) netdev_warn(netdev, "Device resources not yet released\n"); @@ -3065,64 +3062,65 @@ static int i40evf_close(struct net_device *netdev) } /** - * i40evf_change_mtu - Change the Maximum Transfer Unit + * iavf_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ -static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) +static int iavf_change_mtu(struct net_device *netdev, int new_mtu) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); netdev->mtu = new_mtu; if (CLIENT_ENABLED(adapter)) { - i40evf_notify_client_l2_params(&adapter->vsi); - adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + iavf_notify_client_l2_params(&adapter->vsi); + adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; } - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + adapter->flags |= IAVF_FLAG_RESET_NEEDED; schedule_work(&adapter->reset_task); return 0; } /** - * i40e_set_features - set the netdev feature flags + * iavf_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting * Note: expects to be called while under rtnl_lock() **/ -static int i40evf_set_features(struct net_device *netdev, - netdev_features_t features) +static int iavf_set_features(struct net_device *netdev, + netdev_features_t features) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); - /* Don't allow changing VLAN_RX flag when VLAN is set for VF - * and return an error in this case + /* Don't allow changing VLAN_RX flag when adapter is not capable + * of VLAN offload */ - if (VLAN_ALLOWED(adapter)) { + if (!VLAN_ALLOWED(adapter)) { + if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) + return -EINVAL; + } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) adapter->aq_required |= - I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; + IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; else adapter->aq_required |= - I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; - } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { - return -EINVAL; + IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; } return 0; } /** - * i40evf_features_check - Validate encapsulated packet conforms to limits + * iavf_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff * @dev: This physical port's netdev * @features: Offload features that the stack believes apply **/ -static netdev_features_t i40evf_features_check(struct sk_buff *skb, - struct net_device *dev, - netdev_features_t features) +static netdev_features_t iavf_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) { size_t len; @@ -3173,16 +3171,16 @@ out_err: } /** - * i40evf_fix_features - fix up the netdev feature bits + * iavf_fix_features - fix up the netdev feature bits * @netdev: our net device * @features: desired feature bits * * Returns fixed-up features bits **/ -static netdev_features_t i40evf_fix_features(struct net_device *netdev, - netdev_features_t features) +static netdev_features_t iavf_fix_features(struct net_device *netdev, + netdev_features_t features) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) features &= ~(NETIF_F_HW_VLAN_CTAG_TX | @@ -3192,37 +3190,37 @@ static netdev_features_t i40evf_fix_features(struct net_device *netdev, return features; } -static const struct net_device_ops i40evf_netdev_ops = { - .ndo_open = i40evf_open, - .ndo_stop = i40evf_close, - .ndo_start_xmit = i40evf_xmit_frame, - .ndo_set_rx_mode = i40evf_set_rx_mode, +static const struct net_device_ops iavf_netdev_ops = { + .ndo_open = iavf_open, + .ndo_stop = iavf_close, + .ndo_start_xmit = iavf_xmit_frame, + .ndo_set_rx_mode = iavf_set_rx_mode, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = i40evf_set_mac, - .ndo_change_mtu = i40evf_change_mtu, - .ndo_tx_timeout = i40evf_tx_timeout, - .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, - .ndo_features_check = i40evf_features_check, - .ndo_fix_features = i40evf_fix_features, - .ndo_set_features = i40evf_set_features, - .ndo_setup_tc = i40evf_setup_tc, + .ndo_set_mac_address = iavf_set_mac, + .ndo_change_mtu = iavf_change_mtu, + .ndo_tx_timeout = iavf_tx_timeout, + .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, + .ndo_features_check = iavf_features_check, + .ndo_fix_features = iavf_fix_features, + .ndo_set_features = iavf_set_features, + .ndo_setup_tc = iavf_setup_tc, }; /** - * i40evf_check_reset_complete - check that VF reset is complete + * iavf_check_reset_complete - check that VF reset is complete * @hw: pointer to hw struct * * Returns 0 if device is ready to use, or -EBUSY if it's in reset. **/ -static int i40evf_check_reset_complete(struct i40e_hw *hw) +static int iavf_check_reset_complete(struct iavf_hw *hw) { u32 rstat; int i; for (i = 0; i < 100; i++) { - rstat = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; + rstat = rd32(hw, IAVF_VFGEN_RSTAT) & + IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if ((rstat == VIRTCHNL_VFR_VFACTIVE) || (rstat == VIRTCHNL_VFR_COMPLETED)) return 0; @@ -3232,18 +3230,18 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) } /** - * i40evf_process_config - Process the config information we got from the PF + * iavf_process_config - Process the config information we got from the PF * @adapter: board private structure * * Verify that we have a valid config struct, and set up our netdev features * and our VSI struct. **/ -int i40evf_process_config(struct i40evf_adapter *adapter) +int iavf_process_config(struct iavf_adapter *adapter) { struct virtchnl_vf_resource *vfres = adapter->vf_res; int i, num_req_queues = adapter->num_req_queues; struct net_device *netdev = adapter->netdev; - struct i40e_vsi *vsi = &adapter->vsi; + struct iavf_vsi *vsi = &adapter->vsi; netdev_features_t hw_enc_features; netdev_features_t hw_features; @@ -3267,9 +3265,9 @@ int i40evf_process_config(struct i40evf_adapter *adapter) "Requested %d queues, but PF only gave us %d.\n", num_req_queues, adapter->vsi_res->num_queue_pairs); - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; - i40evf_schedule_reset(adapter); + iavf_schedule_reset(adapter); return -ENODEV; } adapter->num_req_queues = 0; @@ -3332,6 +3330,8 @@ int i40evf_process_config(struct i40evf_adapter *adapter) if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->priv_flags |= IFF_UNICAST_FLT; + /* Do not turn on offloads when they are requested to be turned off. * TSO needs minimum 576 bytes to work correctly. */ @@ -3354,22 +3354,22 @@ int i40evf_process_config(struct i40evf_adapter *adapter) adapter->vsi.back = adapter; adapter->vsi.base_vector = 1; - adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; + adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; vsi->netdev = adapter->netdev; vsi->qs_handle = adapter->vsi_res->qset_handle; if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { adapter->rss_key_size = vfres->rss_key_size; adapter->rss_lut_size = vfres->rss_lut_size; } else { - adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE; - adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE; + adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; + adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; } return 0; } /** - * i40evf_init_task - worker thread to perform delayed initialization + * iavf_init_task - worker thread to perform delayed initialization * @work: pointer to work_struct containing our data * * This task completes the work that was begun in probe. Due to the nature @@ -3380,65 +3380,65 @@ int i40evf_process_config(struct i40evf_adapter *adapter) * communications with the PF driver and set up our netdev, the watchdog * takes over. **/ -static void i40evf_init_task(struct work_struct *work) +static void iavf_init_task(struct work_struct *work) { - struct i40evf_adapter *adapter = container_of(work, - struct i40evf_adapter, + struct iavf_adapter *adapter = container_of(work, + struct iavf_adapter, init_task.work); struct net_device *netdev = adapter->netdev; - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; int err, bufsz; switch (adapter->state) { - case __I40EVF_STARTUP: + case __IAVF_STARTUP: /* driver loaded, probe complete */ - adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - err = i40e_set_mac_type(hw); + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; + adapter->flags &= ~IAVF_FLAG_RESET_PENDING; + err = iavf_set_mac_type(hw); if (err) { dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); goto err; } - err = i40evf_check_reset_complete(hw); + err = iavf_check_reset_complete(hw); if (err) { dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", err); goto err; } - hw->aq.num_arq_entries = I40EVF_AQ_LEN; - hw->aq.num_asq_entries = I40EVF_AQ_LEN; - hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE; - hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE; + hw->aq.num_arq_entries = IAVF_AQ_LEN; + hw->aq.num_asq_entries = IAVF_AQ_LEN; + hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; + hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; - err = i40evf_init_adminq(hw); + err = iavf_init_adminq(hw); if (err) { dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); goto err; } - err = i40evf_send_api_ver(adapter); + err = iavf_send_api_ver(adapter); if (err) { dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); - i40evf_shutdown_adminq(hw); + iavf_shutdown_adminq(hw); goto err; } - adapter->state = __I40EVF_INIT_VERSION_CHECK; + adapter->state = __IAVF_INIT_VERSION_CHECK; goto restart; - case __I40EVF_INIT_VERSION_CHECK: - if (!i40evf_asq_done(hw)) { + case __IAVF_INIT_VERSION_CHECK: + if (!iavf_asq_done(hw)) { dev_err(&pdev->dev, "Admin queue command never completed\n"); - i40evf_shutdown_adminq(hw); - adapter->state = __I40EVF_STARTUP; + iavf_shutdown_adminq(hw); + adapter->state = __IAVF_STARTUP; goto err; } /* aq msg sent, awaiting reply */ - err = i40evf_verify_api_ver(adapter); + err = iavf_verify_api_ver(adapter); if (err) { if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - err = i40evf_send_api_ver(adapter); + err = iavf_send_api_ver(adapter); else dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", adapter->pf_version.major, @@ -3447,34 +3447,34 @@ static void i40evf_init_task(struct work_struct *work) VIRTCHNL_VERSION_MINOR); goto err; } - err = i40evf_send_vf_config_msg(adapter); + err = iavf_send_vf_config_msg(adapter); if (err) { dev_err(&pdev->dev, "Unable to send config request (%d)\n", err); goto err; } - adapter->state = __I40EVF_INIT_GET_RESOURCES; + adapter->state = __IAVF_INIT_GET_RESOURCES; goto restart; - case __I40EVF_INIT_GET_RESOURCES: + case __IAVF_INIT_GET_RESOURCES: /* aq msg sent, awaiting reply */ if (!adapter->vf_res) { bufsz = sizeof(struct virtchnl_vf_resource) + - (I40E_MAX_VF_VSI * + (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); if (!adapter->vf_res) goto err; } - err = i40evf_get_vf_config(adapter); + err = iavf_get_vf_config(adapter); if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { - err = i40evf_send_vf_config_msg(adapter); + err = iavf_send_vf_config_msg(adapter); goto err; } else if (err == I40E_ERR_PARAM) { /* We only get ERR_PARAM if the device is in a very bad * state or if we've been disabled for previous bad * behavior. Either way, we're done now. */ - i40evf_shutdown_adminq(hw); + iavf_shutdown_adminq(hw); dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); return; } @@ -3483,25 +3483,25 @@ static void i40evf_init_task(struct work_struct *work) err); goto err_alloc; } - adapter->state = __I40EVF_INIT_SW; + adapter->state = __IAVF_INIT_SW; break; default: goto err_alloc; } - if (i40evf_process_config(adapter)) + if (iavf_process_config(adapter)) goto err_alloc; adapter->current_op = VIRTCHNL_OP_UNKNOWN; - adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; + adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; - netdev->netdev_ops = &i40evf_netdev_ops; - i40evf_set_ethtool_ops(netdev); + netdev->netdev_ops = &iavf_netdev_ops; + iavf_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; /* MTU range: 68 - 9710 */ netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; + netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; if (!is_valid_ether_addr(adapter->hw.mac.addr)) { dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", @@ -3509,25 +3509,25 @@ static void i40evf_init_task(struct work_struct *work) eth_hw_addr_random(netdev); ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); } else { - adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF; + adapter->flags |= IAVF_FLAG_ADDR_SET_BY_PF; ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } - timer_setup(&adapter->watchdog_timer, i40evf_watchdog_timer, 0); + timer_setup(&adapter->watchdog_timer, iavf_watchdog_timer, 0); mod_timer(&adapter->watchdog_timer, jiffies + 1); - adapter->tx_desc_count = I40EVF_DEFAULT_TXD; - adapter->rx_desc_count = I40EVF_DEFAULT_RXD; - err = i40evf_init_interrupt_scheme(adapter); + adapter->tx_desc_count = IAVF_DEFAULT_TXD; + adapter->rx_desc_count = IAVF_DEFAULT_RXD; + err = iavf_init_interrupt_scheme(adapter); if (err) goto err_sw_init; - i40evf_map_rings_to_vectors(adapter); + iavf_map_rings_to_vectors(adapter); if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) - adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; + adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; - err = i40evf_request_misc_irq(adapter); + err = iavf_request_misc_irq(adapter); if (err) goto err_sw_init; @@ -3544,7 +3544,7 @@ static void i40evf_init_task(struct work_struct *work) netif_tx_stop_all_queues(netdev); if (CLIENT_ALLOWED(adapter)) { - err = i40evf_lan_add_device(adapter); + err = iavf_lan_add_device(adapter); if (err) dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", err); @@ -3554,9 +3554,9 @@ static void i40evf_init_task(struct work_struct *work) if (netdev->features & NETIF_F_GRO) dev_info(&pdev->dev, "GRO is enabled\n"); - adapter->state = __I40EVF_DOWN; - set_bit(__I40E_VSI_DOWN, adapter->vsi.state); - i40evf_misc_irq_enable(adapter); + adapter->state = __IAVF_DOWN; + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); + iavf_misc_irq_enable(adapter); wake_up(&adapter->down_waitqueue); adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); @@ -3565,31 +3565,31 @@ static void i40evf_init_task(struct work_struct *work) goto err_mem; if (RSS_AQ(adapter)) { - adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); } else { - i40evf_init_rss(adapter); + iavf_init_rss(adapter); } return; restart: schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30)); return; err_mem: - i40evf_free_rss(adapter); + iavf_free_rss(adapter); err_register: - i40evf_free_misc_irq(adapter); + iavf_free_misc_irq(adapter); err_sw_init: - i40evf_reset_interrupt_capability(adapter); + iavf_reset_interrupt_capability(adapter); err_alloc: kfree(adapter->vf_res); adapter->vf_res = NULL; err: /* Things went into the weeds, so try again later */ - if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { + if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n"); - adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; - i40evf_shutdown_adminq(hw); - adapter->state = __I40EVF_STARTUP; + adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; + iavf_shutdown_adminq(hw); + adapter->state = __IAVF_STARTUP; schedule_delayed_work(&adapter->init_task, HZ * 5); return; } @@ -3597,21 +3597,21 @@ err: } /** - * i40evf_shutdown - Shutdown the device in preparation for a reboot + * iavf_shutdown - Shutdown the device in preparation for a reboot * @pdev: pci device structure **/ -static void i40evf_shutdown(struct pci_dev *pdev) +static void iavf_shutdown(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); if (netif_running(netdev)) - i40evf_close(netdev); + iavf_close(netdev); /* Prevent the watchdog from running. */ - adapter->state = __I40EVF_REMOVE; + adapter->state = __IAVF_REMOVE; adapter->aq_required = 0; #ifdef CONFIG_PM @@ -3622,21 +3622,21 @@ static void i40evf_shutdown(struct pci_dev *pdev) } /** - * i40evf_probe - Device Initialization Routine + * iavf_probe - Device Initialization Routine * @pdev: PCI device information struct - * @ent: entry in i40evf_pci_tbl + * @ent: entry in iavf_pci_tbl * * Returns 0 on success, negative on failure * - * i40evf_probe initializes an adapter identified by a pci_dev structure. + * iavf_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ -static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; - struct i40evf_adapter *adapter = NULL; - struct i40e_hw *hw = NULL; + struct iavf_adapter *adapter = NULL; + struct iavf_hw *hw = NULL; int err; err = pci_enable_device(pdev); @@ -3653,7 +3653,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - err = pci_request_regions(pdev, i40evf_driver_name); + err = pci_request_regions(pdev, iavf_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); @@ -3664,8 +3664,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), - I40EVF_MAX_REQ_QUEUES); + netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), + IAVF_MAX_REQ_QUEUES); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; @@ -3683,7 +3683,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->back = adapter; adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - adapter->state = __I40EVF_STARTUP; + adapter->state = __IAVF_STARTUP; /* Call save state here because it relies on the adapter struct. */ pci_save_state(pdev); @@ -3716,11 +3716,11 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_LIST_HEAD(&adapter->vlan_filter_list); INIT_LIST_HEAD(&adapter->cloud_filter_list); - INIT_WORK(&adapter->reset_task, i40evf_reset_task); - INIT_WORK(&adapter->adminq_task, i40evf_adminq_task); - INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task); - INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task); - INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task); + INIT_WORK(&adapter->reset_task, iavf_reset_task); + INIT_WORK(&adapter->adminq_task, iavf_adminq_task); + INIT_WORK(&adapter->watchdog_task, iavf_watchdog_task); + INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); + INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task); schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(5 * (pdev->devfn & 0x07))); @@ -3741,33 +3741,33 @@ err_dma: #ifdef CONFIG_PM /** - * i40evf_suspend - Power management suspend routine + * iavf_suspend - Power management suspend routine * @pdev: PCI device information struct * @state: unused * * Called when the system (VM) is entering sleep/suspend. **/ -static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) +static int iavf_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); int retval = 0; netif_device_detach(netdev); - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) usleep_range(500, 1000); if (netif_running(netdev)) { rtnl_lock(); - i40evf_down(adapter); + iavf_down(adapter); rtnl_unlock(); } - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); + iavf_free_misc_irq(adapter); + iavf_reset_interrupt_capability(adapter); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); retval = pci_save_state(pdev); if (retval) @@ -3779,14 +3779,14 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) } /** - * i40evf_resume - Power management resume routine + * iavf_resume - Power management resume routine * @pdev: PCI device information struct * * Called when the system (VM) is resumed from sleep/suspend. **/ -static int i40evf_resume(struct pci_dev *pdev) +static int iavf_resume(struct pci_dev *pdev) { - struct i40evf_adapter *adapter = pci_get_drvdata(pdev); + struct iavf_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; u32 err; @@ -3805,13 +3805,13 @@ static int i40evf_resume(struct pci_dev *pdev) pci_set_master(pdev); rtnl_lock(); - err = i40evf_set_interrupt_capability(adapter); + err = iavf_set_interrupt_capability(adapter); if (err) { rtnl_unlock(); dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); return err; } - err = i40evf_request_misc_irq(adapter); + err = iavf_request_misc_irq(adapter); rtnl_unlock(); if (err) { dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); @@ -3827,25 +3827,25 @@ static int i40evf_resume(struct pci_dev *pdev) #endif /* CONFIG_PM */ /** - * i40evf_remove - Device Removal Routine + * iavf_remove - Device Removal Routine * @pdev: PCI device information struct * - * i40evf_remove is called by the PCI subsystem to alert the driver + * iavf_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ -static void i40evf_remove(struct pci_dev *pdev) +static void iavf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40evf_vlan_filter *vlf, *vlftmp; - struct i40evf_mac_filter *f, *ftmp; - struct i40evf_cloud_filter *cf, *cftmp; - struct i40e_hw *hw = &adapter->hw; + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_vlan_filter *vlf, *vlftmp; + struct iavf_mac_filter *f, *ftmp; + struct iavf_cloud_filter *cf, *cftmp; + struct iavf_hw *hw = &adapter->hw; int err; /* Indicate we are in remove and not to run reset_task */ - set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section); + set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); cancel_delayed_work_sync(&adapter->init_task); cancel_work_sync(&adapter->reset_task); cancel_delayed_work_sync(&adapter->client_task); @@ -3854,37 +3854,39 @@ static void i40evf_remove(struct pci_dev *pdev) adapter->netdev_registered = false; } if (CLIENT_ALLOWED(adapter)) { - err = i40evf_lan_del_device(adapter); + err = iavf_lan_del_device(adapter); if (err) dev_warn(&pdev->dev, "Failed to delete client device: %d\n", err); } /* Shut down all the garbage mashers on the detention level */ - adapter->state = __I40EVF_REMOVE; + adapter->state = __IAVF_REMOVE; adapter->aq_required = 0; - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; - i40evf_request_reset(adapter); + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + iavf_request_reset(adapter); msleep(50); /* If the FW isn't responding, kick it once, but only once. */ - if (!i40evf_asq_done(hw)) { - i40evf_request_reset(adapter); + if (!iavf_asq_done(hw)) { + iavf_request_reset(adapter); msleep(50); } - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); - i40evf_misc_irq_disable(adapter); - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); - i40evf_free_q_vectors(adapter); + iavf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); + iavf_misc_irq_disable(adapter); + iavf_free_misc_irq(adapter); + iavf_reset_interrupt_capability(adapter); + iavf_free_q_vectors(adapter); if (adapter->watchdog_timer.function) del_timer_sync(&adapter->watchdog_timer); - i40evf_free_rss(adapter); + cancel_work_sync(&adapter->adminq_task); + + iavf_free_rss(adapter); if (hw->aq.asq.count) - i40evf_shutdown_adminq(hw); + iavf_shutdown_adminq(hw); /* destroy the locks only once, here */ mutex_destroy(&hw->aq.arq_mutex); @@ -3892,9 +3894,9 @@ static void i40evf_remove(struct pci_dev *pdev) iounmap(hw->hw_addr); pci_release_regions(pdev); - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); - i40evf_free_queues(adapter); + iavf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); + iavf_free_queues(adapter); kfree(adapter->vf_res); spin_lock_bh(&adapter->mac_vlan_list_lock); /* If we got removed before an up/down sequence, we've got a filter @@ -3926,57 +3928,57 @@ static void i40evf_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -static struct pci_driver i40evf_driver = { - .name = i40evf_driver_name, - .id_table = i40evf_pci_tbl, - .probe = i40evf_probe, - .remove = i40evf_remove, +static struct pci_driver iavf_driver = { + .name = iavf_driver_name, + .id_table = iavf_pci_tbl, + .probe = iavf_probe, + .remove = iavf_remove, #ifdef CONFIG_PM - .suspend = i40evf_suspend, - .resume = i40evf_resume, + .suspend = iavf_suspend, + .resume = iavf_resume, #endif - .shutdown = i40evf_shutdown, + .shutdown = iavf_shutdown, }; /** - * i40e_init_module - Driver Registration Routine + * iavf_init_module - Driver Registration Routine * - * i40e_init_module is the first routine called when the driver is + * iavf_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ -static int __init i40evf_init_module(void) +static int __init iavf_init_module(void) { int ret; - pr_info("i40evf: %s - version %s\n", i40evf_driver_string, - i40evf_driver_version); + pr_info("iavf: %s - version %s\n", iavf_driver_string, + iavf_driver_version); - pr_info("%s\n", i40evf_copyright); + pr_info("%s\n", iavf_copyright); - i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, - i40evf_driver_name); - if (!i40evf_wq) { - pr_err("%s: Failed to create workqueue\n", i40evf_driver_name); + iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, + iavf_driver_name); + if (!iavf_wq) { + pr_err("%s: Failed to create workqueue\n", iavf_driver_name); return -ENOMEM; } - ret = pci_register_driver(&i40evf_driver); + ret = pci_register_driver(&iavf_driver); return ret; } -module_init(i40evf_init_module); +module_init(iavf_init_module); /** - * i40e_exit_module - Driver Exit Cleanup Routine + * iavf_exit_module - Driver Exit Cleanup Routine * - * i40e_exit_module is called just before the driver is removed + * iavf_exit_module is called just before the driver is removed * from memory. **/ -static void __exit i40evf_exit_module(void) +static void __exit iavf_exit_module(void) { - pci_unregister_driver(&i40evf_driver); - destroy_workqueue(i40evf_wq); + pci_unregister_driver(&iavf_driver); + destroy_workqueue(iavf_wq); } -module_exit(i40evf_exit_module); +module_exit(iavf_exit_module); -/* i40evf_main.c */ +/* iavf_main.c */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/iavf/iavf_osdep.h index 3ddddb46455b..e6e0b0328706 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h +++ b/drivers/net/ethernet/intel/iavf/iavf_osdep.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#ifndef _I40E_OSDEP_H_ -#define _I40E_OSDEP_H_ +#ifndef _IAVF_OSDEP_H_ +#define _IAVF_OSDEP_H_ #include <linux/types.h> #include <linux/if_ether.h> @@ -24,29 +24,29 @@ #define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) #define rd64(a, reg) readq((a)->hw_addr + (reg)) -#define i40e_flush(a) readl((a)->hw_addr + I40E_VFGEN_RSTAT) +#define iavf_flush(a) readl((a)->hw_addr + IAVF_VFGEN_RSTAT) /* memory allocation tracking */ -struct i40e_dma_mem { +struct iavf_dma_mem { void *va; dma_addr_t pa; u32 size; }; -#define i40e_allocate_dma_mem(h, m, unused, s, a) \ - i40evf_allocate_dma_mem_d(h, m, s, a) -#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m) +#define iavf_allocate_dma_mem(h, m, unused, s, a) \ + iavf_allocate_dma_mem_d(h, m, s, a) +#define iavf_free_dma_mem(h, m) iavf_free_dma_mem_d(h, m) -struct i40e_virt_mem { +struct iavf_virt_mem { void *va; u32 size; }; -#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s) -#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m) +#define iavf_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s) +#define iavf_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m) -#define i40e_debug(h, m, s, ...) i40evf_debug_d(h, m, s, ##__VA_ARGS__) -extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) +#define iavf_debug(h, m, s, ...) iavf_debug_d(h, m, s, ##__VA_ARGS__) +extern void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...) __attribute__ ((format(gnu_printf, 3, 4))); -typedef enum i40e_status_code i40e_status; -#endif /* _I40E_OSDEP_H_ */ +typedef enum iavf_status_code iavf_status; +#endif /* _IAVF_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h new file mode 100644 index 000000000000..d6685103af39 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _IAVF_PROTOTYPE_H_ +#define _IAVF_PROTOTYPE_H_ + +#include "iavf_type.h" +#include "iavf_alloc.h" +#include <linux/avf/virtchnl.h> + +/* Prototypes for shared code functions that are not in + * the standard function pointer structures. These are + * mostly because they are needed even before the init + * has happened and will assist in the early SW and FW + * setup. + */ + +/* adminq functions */ +iavf_status iavf_init_adminq(struct iavf_hw *hw); +iavf_status iavf_shutdown_adminq(struct iavf_hw *hw); +void i40e_adminq_init_ring_data(struct iavf_hw *hw); +iavf_status iavf_clean_arq_element(struct iavf_hw *hw, + struct i40e_arq_event_info *e, + u16 *events_pending); +iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +bool iavf_asq_done(struct iavf_hw *hw); + +/* debug function for adminq */ +void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, + void *desc, void *buffer, u16 buf_len); + +void i40e_idle_aq(struct iavf_hw *hw); +void iavf_resume_aq(struct iavf_hw *hw); +bool iavf_check_asq_alive(struct iavf_hw *hw); +iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading); +const char *iavf_aq_str(struct iavf_hw *hw, enum i40e_admin_queue_err aq_err); +const char *iavf_stat_str(struct iavf_hw *hw, iavf_status stat_err); + +iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); + +iavf_status iavf_set_mac_type(struct iavf_hw *hw); + +extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[]; + +static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) +{ + return iavf_ptype_lookup[ptype]; +} + +void iavf_vf_parse_hw_config(struct iavf_hw *hw, + struct virtchnl_vf_resource *msg); +iavf_status iavf_vf_reset(struct iavf_hw *hw); +iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, + enum virtchnl_ops v_opcode, + iavf_status v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +#endif /* _IAVF_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_register.h b/drivers/net/ethernet/intel/iavf/iavf_register.h new file mode 100644 index 000000000000..bf793332fc9d --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_register.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _IAVF_REGISTER_H_ +#define _IAVF_REGISTER_H_ + +#define IAVF_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ +#define IAVF_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ +#define IAVF_VF_ARQH1 0x00007400 /* Reset: EMPR */ +#define IAVF_VF_ARQH1_ARQH_SHIFT 0 +#define IAVF_VF_ARQH1_ARQH_MASK IAVF_MASK(0x3FF, IAVF_VF_ARQH1_ARQH_SHIFT) +#define IAVF_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ +#define IAVF_VF_ARQLEN1_ARQVFE_SHIFT 28 +#define IAVF_VF_ARQLEN1_ARQVFE_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQVFE_SHIFT) +#define IAVF_VF_ARQLEN1_ARQOVFL_SHIFT 29 +#define IAVF_VF_ARQLEN1_ARQOVFL_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQOVFL_SHIFT) +#define IAVF_VF_ARQLEN1_ARQCRIT_SHIFT 30 +#define IAVF_VF_ARQLEN1_ARQCRIT_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQCRIT_SHIFT) +#define IAVF_VF_ARQLEN1_ARQENABLE_SHIFT 31 +#define IAVF_VF_ARQLEN1_ARQENABLE_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQENABLE_SHIFT) +#define IAVF_VF_ARQT1 0x00007000 /* Reset: EMPR */ +#define IAVF_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ +#define IAVF_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ +#define IAVF_VF_ATQH1 0x00006400 /* Reset: EMPR */ +#define IAVF_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ +#define IAVF_VF_ATQLEN1_ATQVFE_SHIFT 28 +#define IAVF_VF_ATQLEN1_ATQVFE_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQVFE_SHIFT) +#define IAVF_VF_ATQLEN1_ATQOVFL_SHIFT 29 +#define IAVF_VF_ATQLEN1_ATQOVFL_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQOVFL_SHIFT) +#define IAVF_VF_ATQLEN1_ATQCRIT_SHIFT 30 +#define IAVF_VF_ATQLEN1_ATQCRIT_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQCRIT_SHIFT) +#define IAVF_VF_ATQLEN1_ATQENABLE_SHIFT 31 +#define IAVF_VF_ATQLEN1_ATQENABLE_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQENABLE_SHIFT) +#define IAVF_VF_ATQT1 0x00008400 /* Reset: EMPR */ +#define IAVF_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ +#define IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT 0 +#define IAVF_VFGEN_RSTAT_VFR_STATE_MASK IAVF_MASK(0x3, IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT) +#define IAVF_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ +#define IAVF_VFINT_DYN_CTL01_INTENA_SHIFT 0 +#define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT) +#define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 +#define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) +#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ +#define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0 +#define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT) +#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 +#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) +#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 +#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) +#define IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 +#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 +#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) +#define IAVF_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ +#define IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 +#define IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK IAVF_MASK(0x1, IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT) +#define IAVF_VFINT_ICR0_ENA1_RSVD_SHIFT 31 +#define IAVF_VFINT_ICR01 0x00004800 /* Reset: CORER */ +#define IAVF_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ +#define IAVF_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define IAVF_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ +#define IAVF_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define IAVF_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ +#define IAVF_VFQF_HKEY_MAX_INDEX 12 +#define IAVF_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define IAVF_VFQF_HLUT_MAX_INDEX 15 +#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 +#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) +#endif /* _IAVF_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h index 77be0702d07c..46742fab7b8c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_status.h +++ b/drivers/net/ethernet/intel/iavf/iavf_status.h @@ -1,11 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#ifndef _I40E_STATUS_H_ -#define _I40E_STATUS_H_ +#ifndef _IAVF_STATUS_H_ +#define _IAVF_STATUS_H_ /* Error Codes */ -enum i40e_status_code { +enum iavf_status_code { I40E_SUCCESS = 0, I40E_ERR_NVM = -1, I40E_ERR_NVM_CHECKSUM = -2, @@ -75,4 +75,4 @@ enum i40e_status_code { I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, }; -#endif /* _I40E_STATUS_H_ */ +#endif /* _IAVF_STATUS_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h index d7a4e68820a8..1474f5539751 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_trace.h +++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h @@ -3,16 +3,16 @@ /* Modeled on trace-events-sample.h */ -/* The trace subsystem name for i40evf will be "i40evf". +/* The trace subsystem name for iavf will be "iavf". * - * This file is named i40e_trace.h. + * This file is named iavf_trace.h. * * Since this include file's name is different from the trace * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end * of this file. */ #undef TRACE_SYSTEM -#define TRACE_SYSTEM i40evf +#define TRACE_SYSTEM iavf /* See trace-events-sample.h for a detailed description of why this * guard clause is different from most normal include files. @@ -23,14 +23,14 @@ #include <linux/tracepoint.h> /** - * i40e_trace() macro enables shared code to refer to trace points + * iavf_trace() macro enables shared code to refer to trace points * like: * - * trace_i40e{,vf}_example(args...) + * trace_iavf{,vf}_example(args...) * * ... as: * - * i40e_trace(example, args...) + * iavf_trace(example, args...) * * ... to resolve to the PF or VF version of the tracepoint without * ifdefs, and to allow tracepoints to be disabled entirely at build @@ -39,29 +39,29 @@ * Trace point should always be referred to in the driver via this * macro. * - * Similarly, i40e_trace_enabled(trace_name) wraps references to - * trace_i40e{,vf}_<trace_name>_enabled() functions. + * Similarly, iavf_trace_enabled(trace_name) wraps references to + * trace_iavf{,vf}_<trace_name>_enabled() functions. */ -#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40evf ## _ ## trace_name) -#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name) +#define _IAVF_TRACE_NAME(trace_name) (trace_ ## iavf ## _ ## trace_name) +#define IAVF_TRACE_NAME(trace_name) _IAVF_TRACE_NAME(trace_name) -#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args) +#define iavf_trace(trace_name, args...) IAVF_TRACE_NAME(trace_name)(args) -#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)() +#define iavf_trace_enabled(trace_name) IAVF_TRACE_NAME(trace_name##_enabled)() /* Events common to PF and VF. Corresponding versions will be defined - * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace() + * for both, named trace_iavf_* and trace_iavf_*. The iavf_trace() * macro above will select the right trace point name for the driver * being built from shared code. */ /* Events related to a vsi & ring */ DECLARE_EVENT_CLASS( - i40evf_tx_template, + iavf_tx_template, - TP_PROTO(struct i40e_ring *ring, - struct i40e_tx_desc *desc, - struct i40e_tx_buffer *buf), + TP_PROTO(struct iavf_ring *ring, + struct iavf_tx_desc *desc, + struct iavf_tx_buffer *buf), TP_ARGS(ring, desc, buf), @@ -93,26 +93,26 @@ DECLARE_EVENT_CLASS( ); DEFINE_EVENT( - i40evf_tx_template, i40evf_clean_tx_irq, - TP_PROTO(struct i40e_ring *ring, - struct i40e_tx_desc *desc, - struct i40e_tx_buffer *buf), + iavf_tx_template, iavf_clean_tx_irq, + TP_PROTO(struct iavf_ring *ring, + struct iavf_tx_desc *desc, + struct iavf_tx_buffer *buf), TP_ARGS(ring, desc, buf)); DEFINE_EVENT( - i40evf_tx_template, i40evf_clean_tx_irq_unmap, - TP_PROTO(struct i40e_ring *ring, - struct i40e_tx_desc *desc, - struct i40e_tx_buffer *buf), + iavf_tx_template, iavf_clean_tx_irq_unmap, + TP_PROTO(struct iavf_ring *ring, + struct iavf_tx_desc *desc, + struct iavf_tx_buffer *buf), TP_ARGS(ring, desc, buf)); DECLARE_EVENT_CLASS( - i40evf_rx_template, + iavf_rx_template, - TP_PROTO(struct i40e_ring *ring, - union i40e_32byte_rx_desc *desc, + TP_PROTO(struct iavf_ring *ring, + union iavf_32byte_rx_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb), @@ -138,26 +138,26 @@ DECLARE_EVENT_CLASS( ); DEFINE_EVENT( - i40evf_rx_template, i40evf_clean_rx_irq, - TP_PROTO(struct i40e_ring *ring, - union i40e_32byte_rx_desc *desc, + iavf_rx_template, iavf_clean_rx_irq, + TP_PROTO(struct iavf_ring *ring, + union iavf_32byte_rx_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb)); DEFINE_EVENT( - i40evf_rx_template, i40evf_clean_rx_irq_rx, - TP_PROTO(struct i40e_ring *ring, - union i40e_32byte_rx_desc *desc, + iavf_rx_template, iavf_clean_rx_irq_rx, + TP_PROTO(struct iavf_ring *ring, + union iavf_32byte_rx_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb)); DECLARE_EVENT_CLASS( - i40evf_xmit_template, + iavf_xmit_template, TP_PROTO(struct sk_buff *skb, - struct i40e_ring *ring), + struct iavf_ring *ring), TP_ARGS(skb, ring), @@ -180,23 +180,23 @@ DECLARE_EVENT_CLASS( ); DEFINE_EVENT( - i40evf_xmit_template, i40evf_xmit_frame_ring, + iavf_xmit_template, iavf_xmit_frame_ring, TP_PROTO(struct sk_buff *skb, - struct i40e_ring *ring), + struct iavf_ring *ring), TP_ARGS(skb, ring)); DEFINE_EVENT( - i40evf_xmit_template, i40evf_xmit_frame_ring_drop, + iavf_xmit_template, iavf_xmit_frame_ring_drop, TP_PROTO(struct sk_buff *skb, - struct i40e_ring *ring), + struct iavf_ring *ring), TP_ARGS(skb, ring)); /* Events unique to the VF. */ -#endif /* _I40E_TRACE_H_ */ -/* This must be outside ifdef _I40E_TRACE_H */ +#endif /* _IAVF_TRACE_H_ */ +/* This must be outside ifdef _IAVF_TRACE_H */ /* This trace include file is not located in the .../include/trace * with the kernel tracepoint definitions, because we're a loadable @@ -205,5 +205,5 @@ DEFINE_EVENT( #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE i40e_trace +#define TRACE_INCLUDE_FILE iavf_trace #include <trace/define_trace.h> diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index a9730711e257..edc349f49748 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -4,32 +4,32 @@ #include <linux/prefetch.h> #include <net/busy_poll.h> -#include "i40evf.h" -#include "i40e_trace.h" -#include "i40e_prototype.h" +#include "iavf.h" +#include "iavf_trace.h" +#include "iavf_prototype.h" static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, u32 td_tag) { - return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | - ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | - ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | - ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | - ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); + return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA | + ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) | + ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) | + ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT)); } -#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) +#define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS) /** - * i40e_unmap_and_free_tx_resource - Release a Tx buffer + * iavf_unmap_and_free_tx_resource - Release a Tx buffer * @ring: the ring that owns the buffer * @tx_buffer: the buffer to free **/ -static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, - struct i40e_tx_buffer *tx_buffer) +static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring, + struct iavf_tx_buffer *tx_buffer) { if (tx_buffer->skb) { - if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) + if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB) kfree(tx_buffer->raw_buf); else dev_kfree_skb_any(tx_buffer->skb); @@ -52,10 +52,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, } /** - * i40evf_clean_tx_ring - Free any empty Tx buffers + * iavf_clean_tx_ring - Free any empty Tx buffers * @tx_ring: ring to be cleaned **/ -void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) +void iavf_clean_tx_ring(struct iavf_ring *tx_ring) { unsigned long bi_size; u16 i; @@ -66,9 +66,9 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) - i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); + iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); - bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; + bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; memset(tx_ring->tx_bi, 0, bi_size); /* Zero out the descriptor ring */ @@ -85,14 +85,14 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) } /** - * i40evf_free_tx_resources - Free Tx resources per queue + * iavf_free_tx_resources - Free Tx resources per queue * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/ -void i40evf_free_tx_resources(struct i40e_ring *tx_ring) +void iavf_free_tx_resources(struct iavf_ring *tx_ring) { - i40evf_clean_tx_ring(tx_ring); + iavf_clean_tx_ring(tx_ring); kfree(tx_ring->tx_bi); tx_ring->tx_bi = NULL; @@ -104,14 +104,14 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) } /** - * i40evf_get_tx_pending - how many Tx descriptors not processed + * iavf_get_tx_pending - how many Tx descriptors not processed * @ring: the ring of descriptors * @in_sw: is tx_pending being checked in SW or HW * * Since there is no access to the ring head register * in XL710, we need to use our local copies **/ -u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) +u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw) { u32 head, tail; @@ -126,15 +126,15 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) } /** - * i40evf_detect_recover_hung - Function to detect and recover hung_queues + * iavf_detect_recover_hung - Function to detect and recover hung_queues * @vsi: pointer to vsi struct with tx queues * * VSI has netdev and netdev has TX queues. This function is to check each of * those TX queues if they are hung, trigger recovery by issuing SW interrupt. **/ -void i40evf_detect_recover_hung(struct i40e_vsi *vsi) +void iavf_detect_recover_hung(struct iavf_vsi *vsi) { - struct i40e_ring *tx_ring = NULL; + struct iavf_ring *tx_ring = NULL; struct net_device *netdev; unsigned int i; int packets; @@ -142,7 +142,7 @@ void i40evf_detect_recover_hung(struct i40e_vsi *vsi) if (!vsi) return; - if (test_bit(__I40E_VSI_DOWN, vsi->state)) + if (test_bit(__IAVF_VSI_DOWN, vsi->state)) return; netdev = vsi->netdev; @@ -164,16 +164,16 @@ void i40evf_detect_recover_hung(struct i40e_vsi *vsi) */ packets = tx_ring->stats.packets & INT_MAX; if (tx_ring->tx_stats.prev_pkt_ctr == packets) { - i40evf_force_wb(vsi, tx_ring->q_vector); + iavf_force_wb(vsi, tx_ring->q_vector); continue; } /* Memory barrier between read of packet count and call - * to i40evf_get_tx_pending() + * to iavf_get_tx_pending() */ smp_rmb(); tx_ring->tx_stats.prev_pkt_ctr = - i40evf_get_tx_pending(tx_ring, true) ? packets : -1; + iavf_get_tx_pending(tx_ring, true) ? packets : -1; } } } @@ -181,28 +181,28 @@ void i40evf_detect_recover_hung(struct i40e_vsi *vsi) #define WB_STRIDE 4 /** - * i40e_clean_tx_irq - Reclaim resources after transmit completes + * iavf_clean_tx_irq - Reclaim resources after transmit completes * @vsi: the VSI we care about * @tx_ring: Tx ring to clean * @napi_budget: Used to determine if we are in netpoll * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, - struct i40e_ring *tx_ring, int napi_budget) +static bool iavf_clean_tx_irq(struct iavf_vsi *vsi, + struct iavf_ring *tx_ring, int napi_budget) { u16 i = tx_ring->next_to_clean; - struct i40e_tx_buffer *tx_buf; - struct i40e_tx_desc *tx_desc; + struct iavf_tx_buffer *tx_buf; + struct iavf_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; unsigned int budget = vsi->work_limit; tx_buf = &tx_ring->tx_bi[i]; - tx_desc = I40E_TX_DESC(tx_ring, i); + tx_desc = IAVF_TX_DESC(tx_ring, i); i -= tx_ring->count; do { - struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; + struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) @@ -211,10 +211,10 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, /* prevent any other reads prior to eop_desc */ smp_rmb(); - i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); + iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->cmd_type_offset_bsz & - cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) + cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE))) break; /* clear next_to_watch to prevent false hangs */ @@ -239,7 +239,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, /* unmap remaining buffers */ while (tx_desc != eop_desc) { - i40e_trace(clean_tx_irq_unmap, + iavf_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); tx_buf++; @@ -248,7 +248,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, if (unlikely(!i)) { i -= tx_ring->count; tx_buf = tx_ring->tx_bi; - tx_desc = I40E_TX_DESC(tx_ring, 0); + tx_desc = IAVF_TX_DESC(tx_ring, 0); } /* unmap any remaining paged data */ @@ -268,7 +268,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, if (unlikely(!i)) { i -= tx_ring->count; tx_buf = tx_ring->tx_bi; - tx_desc = I40E_TX_DESC(tx_ring, 0); + tx_desc = IAVF_TX_DESC(tx_ring, 0); } prefetch(tx_desc); @@ -286,18 +286,18 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; - if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { + if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) { /* check to see if there are < 4 descriptors * waiting to be written back, then kick the hardware to force * them to be written back in case we stay in NAPI. * In this mode on X722 we do not enable Interrupt. */ - unsigned int j = i40evf_get_tx_pending(tx_ring, false); + unsigned int j = iavf_get_tx_pending(tx_ring, false); if (budget && ((j / WB_STRIDE) == 0) && (j > 0) && - !test_bit(__I40E_VSI_DOWN, vsi->state) && - (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + !test_bit(__IAVF_VSI_DOWN, vsi->state) && + (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count)) tx_ring->arm_wb = true; } @@ -307,14 +307,14 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && - (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - !test_bit(__I40E_VSI_DOWN, vsi->state)) { + !test_bit(__IAVF_VSI_DOWN, vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; @@ -325,75 +325,75 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, } /** - * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled + * iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled * @vsi: the VSI we care about * @q_vector: the vector on which to enable writeback * **/ -static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, - struct i40e_q_vector *q_vector) +static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi, + struct iavf_q_vector *q_vector) { u16 flags = q_vector->tx.ring[0].flags; u32 val; - if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) + if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR)) return; if (q_vector->arm_wb_state) return; - val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ + val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), val); + IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val); q_vector->arm_wb_state = true; } /** - * i40evf_force_wb - Issue SW Interrupt so HW does a wb + * iavf_force_wb - Issue SW Interrupt so HW does a wb * @vsi: the VSI we care about * @q_vector: the vector on which to force writeback * **/ -void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector) { - u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ - I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | - I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK + u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ + IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | + IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK /* allow 00 to be written to the index */; wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), + IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val); } -static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector, - struct i40e_ring_container *rc) +static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector, + struct iavf_ring_container *rc) { return &q_vector->rx == rc; } -static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) +static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector) { unsigned int divisor; switch (q_vector->adapter->link_speed) { case I40E_LINK_SPEED_40GB: - divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024; + divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024; break; case I40E_LINK_SPEED_25GB: case I40E_LINK_SPEED_20GB: - divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512; + divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512; break; default: case I40E_LINK_SPEED_10GB: - divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256; + divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256; break; case I40E_LINK_SPEED_1GB: case I40E_LINK_SPEED_100MB: - divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32; + divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32; break; } @@ -401,7 +401,7 @@ static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) } /** - * i40e_update_itr - update the dynamic ITR value based on statistics + * iavf_update_itr - update the dynamic ITR value based on statistics * @q_vector: structure containing interrupt and ring information * @rc: structure containing ring performance data * @@ -413,8 +413,8 @@ static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) * on testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ -static void i40e_update_itr(struct i40e_q_vector *q_vector, - struct i40e_ring_container *rc) +static void iavf_update_itr(struct iavf_q_vector *q_vector, + struct iavf_ring_container *rc) { unsigned int avg_wire_size, packets, bytes, itr; unsigned long next_update = jiffies; @@ -428,9 +428,9 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, /* For Rx we want to push the delay up and default to low latency. * for Tx we want to pull the delay down and default to high latency. */ - itr = i40e_container_is_rx(q_vector, rc) ? - I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY : - I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY; + itr = iavf_container_is_rx(q_vector, rc) ? + IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY : + IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY; /* If we didn't update within up to 1 - 2 jiffies we can assume * that either packets are coming in so slow there hasn't been @@ -454,15 +454,15 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, packets = rc->total_packets; bytes = rc->total_bytes; - if (i40e_container_is_rx(q_vector, rc)) { + if (iavf_container_is_rx(q_vector, rc)) { /* If Rx there are 1 to 4 packets and bytes are less than * 9000 assume insufficient data to use bulk rate limiting * approach unless Tx is already in bulk rate limiting. We * are likely latency driven. */ if (packets && packets < 4 && bytes < 9000 && - (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) { - itr = I40E_ITR_ADAPTIVE_LATENCY; + (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) { + itr = IAVF_ITR_ADAPTIVE_LATENCY; goto adjust_by_size; } } else if (packets < 4) { @@ -471,15 +471,15 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so * that the Rx can relax. */ - if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS && - (q_vector->rx.target_itr & I40E_ITR_MASK) == - I40E_ITR_ADAPTIVE_MAX_USECS) + if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS && + (q_vector->rx.target_itr & IAVF_ITR_MASK) == + IAVF_ITR_ADAPTIVE_MAX_USECS) goto clear_counts; } else if (packets > 32) { /* If we have processed over 32 packets in a single interrupt * for Tx assume we need to switch over to "bulk" mode. */ - rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY; + rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY; } /* We have no packets to actually measure against. This means @@ -491,17 +491,17 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, * fixed amount. */ if (packets < 56) { - itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC; - if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { - itr &= I40E_ITR_ADAPTIVE_LATENCY; - itr += I40E_ITR_ADAPTIVE_MAX_USECS; + itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC; + if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) { + itr &= IAVF_ITR_ADAPTIVE_LATENCY; + itr += IAVF_ITR_ADAPTIVE_MAX_USECS; } goto clear_counts; } if (packets <= 256) { itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); - itr &= I40E_ITR_MASK; + itr &= IAVF_ITR_MASK; /* Between 56 and 112 is our "goldilocks" zone where we are * working out "just right". Just report that our current @@ -516,9 +516,9 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, * in half per interrupt. */ itr /= 2; - itr &= I40E_ITR_MASK; - if (itr < I40E_ITR_ADAPTIVE_MIN_USECS) - itr = I40E_ITR_ADAPTIVE_MIN_USECS; + itr &= IAVF_ITR_MASK; + if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS) + itr = IAVF_ITR_ADAPTIVE_MIN_USECS; goto clear_counts; } @@ -529,7 +529,7 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, * though for smaller packet sizes there isn't much we can do as * NAPI polling will likely be kicking in sooner rather than later. */ - itr = I40E_ITR_ADAPTIVE_BULK; + itr = IAVF_ITR_ADAPTIVE_BULK; adjust_by_size: /* If packet counts are 256 or greater we can assume we have a gross @@ -577,7 +577,7 @@ adjust_by_size: /* If we are in low latency mode halve our delay which doubles the * rate to somewhere between 100K to 16K ints/sec */ - if (itr & I40E_ITR_ADAPTIVE_LATENCY) + if (itr & IAVF_ITR_ADAPTIVE_LATENCY) avg_wire_size /= 2; /* Resultant value is 256 times larger than it needs to be. This @@ -587,12 +587,12 @@ adjust_by_size: * Use addition as we have already recorded the new latency flag * for the ITR value. */ - itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) * - I40E_ITR_ADAPTIVE_MIN_INC; + itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) * + IAVF_ITR_ADAPTIVE_MIN_INC; - if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { - itr &= I40E_ITR_ADAPTIVE_LATENCY; - itr += I40E_ITR_ADAPTIVE_MAX_USECS; + if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) { + itr &= IAVF_ITR_ADAPTIVE_LATENCY; + itr += IAVF_ITR_ADAPTIVE_MAX_USECS; } clear_counts: @@ -607,12 +607,12 @@ clear_counts: } /** - * i40evf_setup_tx_descriptors - Allocate the Tx descriptors + * iavf_setup_tx_descriptors - Allocate the Tx descriptors * @tx_ring: the tx ring to set up * * Return 0 on success, negative on error **/ -int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) +int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring) { struct device *dev = tx_ring->dev; int bi_size; @@ -622,13 +622,13 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) /* warn if we are about to overwrite the pointer */ WARN_ON(tx_ring->tx_bi); - bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; + bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); if (!tx_ring->tx_bi) goto err; /* round up to nearest 4K */ - tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); + tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); @@ -650,10 +650,10 @@ err: } /** - * i40evf_clean_rx_ring - Free Rx buffers + * iavf_clean_rx_ring - Free Rx buffers * @rx_ring: ring to be cleaned **/ -void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) +void iavf_clean_rx_ring(struct iavf_ring *rx_ring) { unsigned long bi_size; u16 i; @@ -669,7 +669,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { - struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; if (!rx_bi->page) continue; @@ -685,9 +685,9 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) /* free resources associated with mapping */ dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, - i40e_rx_pg_size(rx_ring), + iavf_rx_pg_size(rx_ring), DMA_FROM_DEVICE, - I40E_RX_DMA_ATTR); + IAVF_RX_DMA_ATTR); __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); @@ -695,7 +695,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) rx_bi->page_offset = 0; } - bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; + bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; memset(rx_ring->rx_bi, 0, bi_size); /* Zero out the descriptor ring */ @@ -707,14 +707,14 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) } /** - * i40evf_free_rx_resources - Free Rx resources + * iavf_free_rx_resources - Free Rx resources * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ -void i40evf_free_rx_resources(struct i40e_ring *rx_ring) +void iavf_free_rx_resources(struct iavf_ring *rx_ring) { - i40evf_clean_rx_ring(rx_ring); + iavf_clean_rx_ring(rx_ring); kfree(rx_ring->rx_bi); rx_ring->rx_bi = NULL; @@ -726,19 +726,19 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring) } /** - * i40evf_setup_rx_descriptors - Allocate Rx descriptors + * iavf_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ -int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) +int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) { struct device *dev = rx_ring->dev; int bi_size; /* warn if we are about to overwrite the pointer */ WARN_ON(rx_ring->rx_bi); - bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; + bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); if (!rx_ring->rx_bi) goto err; @@ -746,7 +746,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc); + rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); @@ -769,11 +769,11 @@ err: } /** - * i40e_release_rx_desc - Store the new tail and head values + * iavf_release_rx_desc - Store the new tail and head values * @rx_ring: ring to bump * @val: new head index **/ -static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) +static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; @@ -790,26 +790,26 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40e_rx_offset - Return expected offset into page to access data + * iavf_rx_offset - Return expected offset into page to access data * @rx_ring: Ring we are requesting offset of * * Returns the offset value for ring into the data buffer. */ -static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) +static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring) { - return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; + return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0; } /** - * i40e_alloc_mapped_page - recycle or make a new page + * iavf_alloc_mapped_page - recycle or make a new page * @rx_ring: ring to use * @bi: rx_buffer struct to modify * * Returns true if the page was successfully allocated or * reused. **/ -static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *bi) +static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring, + struct iavf_rx_buffer *bi) { struct page *page = bi->page; dma_addr_t dma; @@ -821,7 +821,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, } /* alloc new page for storage */ - page = dev_alloc_pages(i40e_rx_pg_order(rx_ring)); + page = dev_alloc_pages(iavf_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_page_failed++; return false; @@ -829,22 +829,22 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, /* map page for use */ dma = dma_map_page_attrs(rx_ring->dev, page, 0, - i40e_rx_pg_size(rx_ring), + iavf_rx_pg_size(rx_ring), DMA_FROM_DEVICE, - I40E_RX_DMA_ATTR); + IAVF_RX_DMA_ATTR); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use */ if (dma_mapping_error(rx_ring->dev, dma)) { - __free_pages(page, i40e_rx_pg_order(rx_ring)); + __free_pages(page, iavf_rx_pg_order(rx_ring)); rx_ring->rx_stats.alloc_page_failed++; return false; } bi->dma = dma; bi->page = page; - bi->page_offset = i40e_rx_offset(rx_ring); + bi->page_offset = iavf_rx_offset(rx_ring); /* initialize pagecnt_bias to 1 representing we fully own page */ bi->pagecnt_bias = 1; @@ -853,15 +853,15 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, } /** - * i40e_receive_skb - Send a completed packet up the stack + * iavf_receive_skb - Send a completed packet up the stack * @rx_ring: rx ring in play * @skb: packet to send up * @vlan_tag: vlan tag for packet **/ -static void i40e_receive_skb(struct i40e_ring *rx_ring, +static void iavf_receive_skb(struct iavf_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) { - struct i40e_q_vector *q_vector = rx_ring->q_vector; + struct iavf_q_vector *q_vector = rx_ring->q_vector; if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && (vlan_tag & VLAN_VID_MASK)) @@ -871,27 +871,27 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring, } /** - * i40evf_alloc_rx_buffers - Replace used receive buffers + * iavf_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace * * Returns false if all allocations were successful, true if any fail **/ -bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) +bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) { u16 ntu = rx_ring->next_to_use; - union i40e_rx_desc *rx_desc; - struct i40e_rx_buffer *bi; + union iavf_rx_desc *rx_desc; + struct iavf_rx_buffer *bi; /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) return false; - rx_desc = I40E_RX_DESC(rx_ring, ntu); + rx_desc = IAVF_RX_DESC(rx_ring, ntu); bi = &rx_ring->rx_bi[ntu]; do { - if (!i40e_alloc_mapped_page(rx_ring, bi)) + if (!iavf_alloc_mapped_page(rx_ring, bi)) goto no_buffers; /* sync the buffer for use by the device */ @@ -909,7 +909,7 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) bi++; ntu++; if (unlikely(ntu == rx_ring->count)) { - rx_desc = I40E_RX_DESC(rx_ring, 0); + rx_desc = IAVF_RX_DESC(rx_ring, 0); bi = rx_ring->rx_bi; ntu = 0; } @@ -921,13 +921,13 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) } while (cleaned_count); if (rx_ring->next_to_use != ntu) - i40e_release_rx_desc(rx_ring, ntu); + iavf_release_rx_desc(rx_ring, ntu); return false; no_buffers: if (rx_ring->next_to_use != ntu) - i40e_release_rx_desc(rx_ring, ntu); + iavf_release_rx_desc(rx_ring, ntu); /* make sure to come back via polling to try again after * allocation failure @@ -936,27 +936,27 @@ no_buffers: } /** - * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum + * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum * @vsi: the VSI we care about * @skb: skb currently being received and modified * @rx_desc: the receive descriptor **/ -static inline void i40e_rx_checksum(struct i40e_vsi *vsi, +static inline void iavf_rx_checksum(struct iavf_vsi *vsi, struct sk_buff *skb, - union i40e_rx_desc *rx_desc) + union iavf_rx_desc *rx_desc) { - struct i40e_rx_ptype_decoded decoded; + struct iavf_rx_ptype_decoded decoded; u32 rx_error, rx_status; bool ipv4, ipv6; u8 ptype; u64 qword; qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; - rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> - I40E_RXD_QW1_ERROR_SHIFT; - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; + ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT; + rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >> + IAVF_RXD_QW1_ERROR_SHIFT; + rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >> + IAVF_RXD_QW1_STATUS_SHIFT; decoded = decode_rx_desc_ptype(ptype); skb->ip_summed = CHECKSUM_NONE; @@ -968,45 +968,45 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, return; /* did the hardware decode the packet and checksum? */ - if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) + if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT))) return; /* both known and outer_ip must be set for the below code to work */ if (!(decoded.known && decoded.outer_ip)) return; - ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); - ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); + ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4); + ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6); if (ipv4 && - (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | - BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) + (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) | + BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT)))) goto checksum_fail; /* likely incorrect csum if alternate IP extension headers found */ if (ipv6 && - rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) + rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT)) /* don't increment checksum err here, non-fatal err */ return; /* there was some L4 error, count error and punt packet to the stack */ - if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) + if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT)) goto checksum_fail; /* handle packets that were not able to be checksummed due * to arrival speed, in this case the stack can compute * the csum. */ - if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) + if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT)) return; /* Only report checksum unnecessary for TCP, UDP, or SCTP */ switch (decoded.inner_prot) { - case I40E_RX_PTYPE_INNER_PROT_TCP: - case I40E_RX_PTYPE_INNER_PROT_UDP: - case I40E_RX_PTYPE_INNER_PROT_SCTP: + case IAVF_RX_PTYPE_INNER_PROT_TCP: + case IAVF_RX_PTYPE_INNER_PROT_UDP: + case IAVF_RX_PTYPE_INNER_PROT_SCTP: skb->ip_summed = CHECKSUM_UNNECESSARY; /* fall though */ default: @@ -1020,56 +1020,56 @@ checksum_fail: } /** - * i40e_ptype_to_htype - get a hash type + * iavf_ptype_to_htype - get a hash type * @ptype: the ptype value from the descriptor * * Returns a hash type to be used by skb_set_hash **/ -static inline int i40e_ptype_to_htype(u8 ptype) +static inline int iavf_ptype_to_htype(u8 ptype) { - struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); + struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); if (!decoded.known) return PKT_HASH_TYPE_NONE; - if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) + if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && + decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4) return PKT_HASH_TYPE_L4; - else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) + else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && + decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3) return PKT_HASH_TYPE_L3; else return PKT_HASH_TYPE_L2; } /** - * i40e_rx_hash - set the hash value in the skb + * iavf_rx_hash - set the hash value in the skb * @ring: descriptor ring * @rx_desc: specific descriptor * @skb: skb currently being received and modified * @rx_ptype: Rx packet type **/ -static inline void i40e_rx_hash(struct i40e_ring *ring, - union i40e_rx_desc *rx_desc, +static inline void iavf_rx_hash(struct iavf_ring *ring, + union iavf_rx_desc *rx_desc, struct sk_buff *skb, u8 rx_ptype) { u32 hash; const __le64 rss_mask = - cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH << + IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT); if (ring->netdev->features & NETIF_F_RXHASH) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype)); } } /** - * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor + * iavf_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated @@ -1080,13 +1080,13 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, * other fields within the skb. **/ static inline -void i40evf_process_skb_fields(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, struct sk_buff *skb, - u8 rx_ptype) +void iavf_process_skb_fields(struct iavf_ring *rx_ring, + union iavf_rx_desc *rx_desc, struct sk_buff *skb, + u8 rx_ptype) { - i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype); - i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); + iavf_rx_checksum(rx_ring->vsi, skb, rx_desc); skb_record_rx_queue(skb, rx_ring->queue_index); @@ -1095,7 +1095,7 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring, } /** - * i40e_cleanup_headers - Correct empty headers + * iavf_cleanup_headers - Correct empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @skb: pointer to current skb being fixed * @@ -1107,7 +1107,7 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring, * * Returns true if an error was encountered and skb was freed. **/ -static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) +static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb) { /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) @@ -1117,16 +1117,16 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) } /** - * i40e_reuse_rx_page - page flip buffer and store it back on the ring + * iavf_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * * Synchronizes page for reuse by the adapter **/ -static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *old_buff) +static void iavf_reuse_rx_page(struct iavf_ring *rx_ring, + struct iavf_rx_buffer *old_buff) { - struct i40e_rx_buffer *new_buff; + struct iavf_rx_buffer *new_buff; u16 nta = rx_ring->next_to_alloc; new_buff = &rx_ring->rx_bi[nta]; @@ -1143,20 +1143,20 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, } /** - * i40e_page_is_reusable - check if any reuse is possible + * iavf_page_is_reusable - check if any reuse is possible * @page: page struct to check * * A page is not reusable if it was allocated under low memory * conditions, or it's not in the same NUMA node as this CPU. */ -static inline bool i40e_page_is_reusable(struct page *page) +static inline bool iavf_page_is_reusable(struct page *page) { return (page_to_nid(page) == numa_mem_id()) && !page_is_pfmemalloc(page); } /** - * i40e_can_reuse_rx_page - Determine if this page can be reused by + * iavf_can_reuse_rx_page - Determine if this page can be reused by * the adapter for another receive * * @rx_buffer: buffer containing the page @@ -1182,13 +1182,13 @@ static inline bool i40e_page_is_reusable(struct page *page) * * In either case, if the page is reusable its refcount is increased. **/ -static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) +static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; /* Is any reuse possible? */ - if (unlikely(!i40e_page_is_reusable(page))) + if (unlikely(!iavf_page_is_reusable(page))) return false; #if (PAGE_SIZE < 8192) @@ -1196,9 +1196,9 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) if (unlikely((page_count(page) - pagecnt_bias) > 1)) return false; #else -#define I40E_LAST_OFFSET \ - (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) - if (rx_buffer->page_offset > I40E_LAST_OFFSET) +#define IAVF_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048) + if (rx_buffer->page_offset > IAVF_LAST_OFFSET) return false; #endif @@ -1215,7 +1215,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) } /** - * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff + * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add * @skb: sk_buff to place the data into @@ -1226,15 +1226,15 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) * * The function will then update the page offset. **/ -static void i40e_add_rx_frag(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *rx_buffer, +static void iavf_add_rx_frag(struct iavf_ring *rx_ring, + struct iavf_rx_buffer *rx_buffer, struct sk_buff *skb, unsigned int size) { #if (PAGE_SIZE < 8192) - unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; + unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)); + unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring)); #endif skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, @@ -1249,17 +1249,17 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring, } /** - * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use + * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use * @rx_ring: rx descriptor ring to transact packets on * @size: size of buffer to add to skb * * This function will pull an Rx buffer from the ring and synchronize it * for use by the CPU. */ -static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, +static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, const unsigned int size) { - struct i40e_rx_buffer *rx_buffer; + struct iavf_rx_buffer *rx_buffer; rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; prefetchw(rx_buffer->page); @@ -1278,7 +1278,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, } /** - * i40e_construct_skb - Allocate skb and populate it + * iavf_construct_skb - Allocate skb and populate it * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: rx buffer to pull data from * @size: size of buffer to add to skb @@ -1287,13 +1287,13 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, * data from the current receive descriptor, taking care to set up the * skb correctly. */ -static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *rx_buffer, +static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, + struct iavf_rx_buffer *rx_buffer, unsigned int size) { void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) - unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; + unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(size); #endif @@ -1308,15 +1308,15 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, /* allocate a skb to store the frags */ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - I40E_RX_HDR_SIZE, + IAVF_RX_HDR_SIZE, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) return NULL; /* Determine available headroom for copy */ headlen = size; - if (headlen > I40E_RX_HDR_SIZE) - headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE); + if (headlen > IAVF_RX_HDR_SIZE) + headlen = eth_get_headlen(va, IAVF_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); @@ -1343,7 +1343,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, } /** - * i40e_build_skb - Build skb around an existing buffer + * iavf_build_skb - Build skb around an existing buffer * @rx_ring: Rx descriptor ring to transact packets on * @rx_buffer: Rx buffer to pull data from * @size: size of buffer to add to skb @@ -1351,16 +1351,16 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, * This function builds an skb around an existing Rx buffer, taking care * to set up the skb correctly and avoid any memcpy overhead. */ -static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *rx_buffer, +static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, + struct iavf_rx_buffer *rx_buffer, unsigned int size) { void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) - unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; + unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(I40E_SKB_PAD + size); + SKB_DATA_ALIGN(IAVF_SKB_PAD + size); #endif struct sk_buff *skb; @@ -1370,12 +1370,12 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, prefetch(va + L1_CACHE_BYTES); #endif /* build an skb around the page buffer */ - skb = build_skb(va - I40E_SKB_PAD, truesize); + skb = build_skb(va - IAVF_SKB_PAD, truesize); if (unlikely(!skb)) return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, I40E_SKB_PAD); + skb_reserve(skb, IAVF_SKB_PAD); __skb_put(skb, size); /* buffer is used by skb, update page_offset */ @@ -1389,25 +1389,25 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, } /** - * i40e_put_rx_buffer - Clean up used buffer and either recycle or free + * iavf_put_rx_buffer - Clean up used buffer and either recycle or free * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: rx buffer to pull data from * * This function will clean up the contents of the rx_buffer. It will * either recycle the buffer or unmap it and free the associated resources. */ -static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *rx_buffer) +static void iavf_put_rx_buffer(struct iavf_ring *rx_ring, + struct iavf_rx_buffer *rx_buffer) { - if (i40e_can_reuse_rx_page(rx_buffer)) { + if (iavf_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */ - i40e_reuse_rx_page(rx_ring, rx_buffer); + iavf_reuse_rx_page(rx_ring, rx_buffer); rx_ring->rx_stats.page_reuse_count++; } else { /* we are not reusing the buffer so unmap it */ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, - i40e_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); + iavf_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR); __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); } @@ -1417,7 +1417,7 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, } /** - * i40e_is_non_eop - process handling of non-EOP buffers + * iavf_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed * @rx_desc: Rx descriptor for current buffer * @skb: Current socket buffer containing buffer in progress @@ -1427,8 +1427,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer. **/ -static bool i40e_is_non_eop(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, +static bool iavf_is_non_eop(struct iavf_ring *rx_ring, + union iavf_rx_desc *rx_desc, struct sk_buff *skb) { u32 ntc = rx_ring->next_to_clean + 1; @@ -1437,11 +1437,11 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring->next_to_clean = ntc; - prefetch(I40E_RX_DESC(rx_ring, ntc)); + prefetch(IAVF_RX_DESC(rx_ring, ntc)); /* if we are the last buffer then there is nothing else to do */ -#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) - if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) +#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT) + if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF))) return false; rx_ring->rx_stats.non_eop_descs++; @@ -1450,7 +1450,7 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, } /** - * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process * @@ -1461,29 +1461,29 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, * * Returns amount of work completed **/ -static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) +static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct sk_buff *skb = rx_ring->skb; - u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring); bool failure = false; while (likely(total_rx_packets < (unsigned int)budget)) { - struct i40e_rx_buffer *rx_buffer; - union i40e_rx_desc *rx_desc; + struct iavf_rx_buffer *rx_buffer; + union iavf_rx_desc *rx_desc; unsigned int size; u16 vlan_tag; u8 rx_ptype; u64 qword; /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + if (cleaned_count >= IAVF_RX_BUFFER_WRITE) { failure = failure || - i40evf_alloc_rx_buffers(rx_ring, cleaned_count); + iavf_alloc_rx_buffers(rx_ring, cleaned_count); cleaned_count = 0; } - rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); + rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean); /* status_error_len will always be zero for unused descriptors * because it's cleared in cleanup, and overlaps with hdr_addr @@ -1498,21 +1498,21 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) */ dma_rmb(); - size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >> + IAVF_RXD_QW1_LENGTH_PBUF_SHIFT; if (!size) break; - i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb); - rx_buffer = i40e_get_rx_buffer(rx_ring, size); + iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); + rx_buffer = iavf_get_rx_buffer(rx_ring, size); /* retrieve a buffer from the ring */ if (skb) - i40e_add_rx_frag(rx_ring, rx_buffer, skb, size); + iavf_add_rx_frag(rx_ring, rx_buffer, skb, size); else if (ring_uses_build_skb(rx_ring)) - skb = i40e_build_skb(rx_ring, rx_buffer, size); + skb = iavf_build_skb(rx_ring, rx_buffer, size); else - skb = i40e_construct_skb(rx_ring, rx_buffer, size); + skb = iavf_construct_skb(rx_ring, rx_buffer, size); /* exit if we failed to retrieve a buffer */ if (!skb) { @@ -1521,24 +1521,24 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) break; } - i40e_put_rx_buffer(rx_ring, rx_buffer); + iavf_put_rx_buffer(rx_ring, rx_buffer); cleaned_count++; - if (i40e_is_non_eop(rx_ring, rx_desc, skb)) + if (iavf_is_non_eop(rx_ring, rx_desc, skb)) continue; /* ERR_MASK will only have valid bits if EOP set, and * what we are doing here is actually checking - * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in + * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in * the error field */ - if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { + if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) { dev_kfree_skb_any(skb); skb = NULL; continue; } - if (i40e_cleanup_headers(rx_ring, skb)) { + if (iavf_cleanup_headers(rx_ring, skb)) { skb = NULL; continue; } @@ -1547,18 +1547,18 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) total_rx_bytes += skb->len; qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT; + rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> + IAVF_RXD_QW1_PTYPE_SHIFT; /* populate checksum, VLAN, and protocol */ - i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); + iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? + vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; - i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); - i40e_receive_skb(rx_ring, skb, vlan_tag); + iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); + iavf_receive_skb(rx_ring, skb, vlan_tag); skb = NULL; /* update budget accounting */ @@ -1578,7 +1578,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) return failure ? budget : (int)total_rx_packets; } -static inline u32 i40e_buildreg_itr(const int type, u16 itr) +static inline u32 iavf_buildreg_itr(const int type, u16 itr) { u32 val; @@ -1597,17 +1597,17 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr) * only need to shift by the interval shift - 1 instead of the * full value. */ - itr &= I40E_ITR_MASK; + itr &= IAVF_ITR_MASK; - val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | - (itr << (I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1)); + val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | + (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | + (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1)); return val; } /* a small macro to shorten up some long lines */ -#define INTREG I40E_VFINT_DYN_CTLN1 +#define INTREG IAVF_VFINT_DYN_CTLN1 /* The act of updating the ITR will cause it to immediately trigger. In order * to prevent this from throwing off adaptive update statistics we defer the @@ -1619,20 +1619,20 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr) #define ITR_COUNTDOWN_START 3 /** - * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt + * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt * @vsi: the VSI we care about * @q_vector: q_vector for which itr is being updated and interrupt enabled * **/ -static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, - struct i40e_q_vector *q_vector) +static inline void iavf_update_enable_itr(struct iavf_vsi *vsi, + struct iavf_q_vector *q_vector) { - struct i40e_hw *hw = &vsi->back->hw; + struct iavf_hw *hw = &vsi->back->hw; u32 intval; /* These will do nothing if dynamic updates are not enabled */ - i40e_update_itr(q_vector, &q_vector->tx); - i40e_update_itr(q_vector, &q_vector->rx); + iavf_update_itr(q_vector, &q_vector->tx); + iavf_update_itr(q_vector, &q_vector->rx); /* This block of logic allows us to get away with only updating * one ITR value with each interrupt. The idea is to perform a @@ -1644,7 +1644,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, */ if (q_vector->rx.target_itr < q_vector->rx.current_itr) { /* Rx ITR needs to be reduced, this is highest priority */ - intval = i40e_buildreg_itr(I40E_RX_ITR, + intval = iavf_buildreg_itr(IAVF_RX_ITR, q_vector->rx.target_itr); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; @@ -1654,29 +1654,29 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, /* Tx ITR needs to be reduced, this is second priority * Tx ITR needs to be increased more than Rx, fourth priority */ - intval = i40e_buildreg_itr(I40E_TX_ITR, + intval = iavf_buildreg_itr(IAVF_TX_ITR, q_vector->tx.target_itr); q_vector->tx.current_itr = q_vector->tx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { /* Rx ITR needs to be increased, third priority */ - intval = i40e_buildreg_itr(I40E_RX_ITR, + intval = iavf_buildreg_itr(IAVF_RX_ITR, q_vector->rx.target_itr); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else { /* No ITR update, lowest priority */ - intval = i40e_buildreg_itr(I40E_ITR_NONE, 0); + intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0); if (q_vector->itr_countdown) q_vector->itr_countdown--; } - if (!test_bit(__I40E_VSI_DOWN, vsi->state)) + if (!test_bit(__IAVF_VSI_DOWN, vsi->state)) wr32(hw, INTREG(q_vector->reg_idx), intval); } /** - * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine + * iavf_napi_poll - NAPI polling Rx/Tx cleanup routine * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets * @@ -1684,18 +1684,18 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, * * Returns the amount of work done **/ -int i40evf_napi_poll(struct napi_struct *napi, int budget) +int iavf_napi_poll(struct napi_struct *napi, int budget) { - struct i40e_q_vector *q_vector = - container_of(napi, struct i40e_q_vector, napi); - struct i40e_vsi *vsi = q_vector->vsi; - struct i40e_ring *ring; + struct iavf_q_vector *q_vector = + container_of(napi, struct iavf_q_vector, napi); + struct iavf_vsi *vsi = q_vector->vsi; + struct iavf_ring *ring; bool clean_complete = true; bool arm_wb = false; int budget_per_ring; int work_done = 0; - if (test_bit(__I40E_VSI_DOWN, vsi->state)) { + if (test_bit(__IAVF_VSI_DOWN, vsi->state)) { napi_complete(napi); return 0; } @@ -1703,8 +1703,8 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ - i40e_for_each_ring(ring, q_vector->tx) { - if (!i40e_clean_tx_irq(vsi, ring, budget)) { + iavf_for_each_ring(ring, q_vector->tx) { + if (!iavf_clean_tx_irq(vsi, ring, budget)) { clean_complete = false; continue; } @@ -1721,8 +1721,8 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); - i40e_for_each_ring(ring, q_vector->rx) { - int cleaned = i40e_clean_rx_irq(ring, budget_per_ring); + iavf_for_each_ring(ring, q_vector->rx) { + int cleaned = iavf_clean_rx_irq(ring, budget_per_ring); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ @@ -1746,7 +1746,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) napi_complete_done(napi, work_done); /* Force an interrupt */ - i40evf_force_wb(vsi, q_vector); + iavf_force_wb(vsi, q_vector); /* Return budget-1 so that polling stops */ return budget - 1; @@ -1754,24 +1754,24 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) tx_only: if (arm_wb) { q_vector->tx.ring[0].tx_stats.tx_force_wb++; - i40e_enable_wb_on_itr(vsi, q_vector); + iavf_enable_wb_on_itr(vsi, q_vector); } return budget; } - if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) + if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR) q_vector->arm_wb_state = false; /* Work is done so exit the polling mode and re-enable the interrupt */ napi_complete_done(napi, work_done); - i40e_update_enable_itr(vsi, q_vector); + iavf_update_enable_itr(vsi, q_vector); return min(work_done, budget - 1); } /** - * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW + * iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW * @skb: send buffer * @tx_ring: ring to send buffer on * @flags: the tx flags to be set @@ -1782,9 +1782,9 @@ tx_only: * Returns error code indicate the frame should be dropped upon error and the * otherwise returns 0 to indicate the flags has been set properly. **/ -static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, - struct i40e_ring *tx_ring, - u32 *flags) +static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb, + struct iavf_ring *tx_ring, + u32 *flags) { __be16 protocol = skb->protocol; u32 tx_flags = 0; @@ -1804,8 +1804,8 @@ static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, /* if we have a HW VLAN tag being added, default to the HW one */ if (skb_vlan_tag_present(skb)) { - tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; - tx_flags |= I40E_TX_FLAGS_HW_VLAN; + tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IAVF_TX_FLAGS_HW_VLAN; /* else if it is a SW VLAN, check the next protocol and store the tag */ } else if (protocol == htons(ETH_P_8021Q)) { struct vlan_hdr *vhdr, _vhdr; @@ -1815,8 +1815,8 @@ static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, return -EINVAL; protocol = vhdr->h_vlan_encapsulated_proto; - tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; - tx_flags |= I40E_TX_FLAGS_SW_VLAN; + tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IAVF_TX_FLAGS_SW_VLAN; } out: @@ -1825,14 +1825,14 @@ out: } /** - * i40e_tso - set up the tso context descriptor + * iavf_tso - set up the tso context descriptor * @first: pointer to first Tx buffer for xmit * @hdr_len: ptr to the size of the packet header * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ -static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, +static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { struct sk_buff *skb = first->skb; @@ -1923,17 +1923,17 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, first->bytecount += (first->gso_segs - 1) * *hdr_len; /* find the field values */ - cd_cmd = I40E_TX_CTX_DESC_TSO; + cd_cmd = IAVF_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; cd_mss = gso_size; - *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | - (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | - (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) | + (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) | + (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT); return 1; } /** - * i40e_tx_enable_csum - Enable Tx checksum offloads + * iavf_tx_enable_csum - Enable Tx checksum offloads * @skb: send buffer * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set @@ -1941,9 +1941,9 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, * @tx_ring: Tx descriptor ring * @cd_tunneling: ptr to context desc bits **/ -static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, +static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, u32 *td_cmd, u32 *td_offset, - struct i40e_ring *tx_ring, + struct iavf_ring *tx_ring, u32 *cd_tunneling) { union { @@ -1968,19 +1968,19 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, l4.hdr = skb_transport_header(skb); /* compute outer L2 header size */ - offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; if (skb->encapsulation) { u32 tunnel = 0; /* define outer network header type */ - if (*tx_flags & I40E_TX_FLAGS_IPV4) { - tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? - I40E_TX_CTX_EXT_IP_IPV4 : - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + if (*tx_flags & IAVF_TX_FLAGS_IPV4) { + tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ? + IAVF_TX_CTX_EXT_IP_IPV4 : + IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM; l4_proto = ip.v4->protocol; - } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - tunnel |= I40E_TX_CTX_EXT_IP_IPV6; + } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) { + tunnel |= IAVF_TX_CTX_EXT_IP_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; @@ -1992,20 +1992,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* define outer transport */ switch (l4_proto) { case IPPROTO_UDP: - tunnel |= I40E_TXD_CTX_UDP_TUNNELING; - *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + tunnel |= IAVF_TXD_CTX_UDP_TUNNELING; + *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; break; case IPPROTO_GRE: - tunnel |= I40E_TXD_CTX_GRE_TUNNELING; - *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + tunnel |= IAVF_TXD_CTX_GRE_TUNNELING; + *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; break; case IPPROTO_IPIP: case IPPROTO_IPV6: - *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; l4.hdr = skb_inner_network_header(skb); break; default: - if (*tx_flags & I40E_TX_FLAGS_TSO) + if (*tx_flags & IAVF_TX_FLAGS_TSO) return -1; skb_checksum_help(skb); @@ -2014,20 +2014,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* compute outer L3 header size */ tunnel |= ((l4.hdr - ip.hdr) / 4) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT; /* switch IP header pointer from outer to inner header */ ip.hdr = skb_inner_network_header(skb); /* compute tunnel header size */ tunnel |= ((ip.hdr - l4.hdr) / 2) << - I40E_TXD_CTX_QW0_NATLEN_SHIFT; + IAVF_TXD_CTX_QW0_NATLEN_SHIFT; /* indicate if we need to offload outer UDP header */ - if ((*tx_flags & I40E_TX_FLAGS_TSO) && + if ((*tx_flags & IAVF_TX_FLAGS_TSO) && !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) - tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK; /* record tunnel offload values */ *cd_tunneling |= tunnel; @@ -2037,24 +2037,24 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, l4_proto = 0; /* reset type as we transition from outer to inner headers */ - *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); + *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6); if (ip.v4->version == 4) - *tx_flags |= I40E_TX_FLAGS_IPV4; + *tx_flags |= IAVF_TX_FLAGS_IPV4; if (ip.v6->version == 6) - *tx_flags |= I40E_TX_FLAGS_IPV6; + *tx_flags |= IAVF_TX_FLAGS_IPV6; } /* Enable IP checksum offloads */ - if (*tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & IAVF_TX_FLAGS_IPV4) { l4_proto = ip.v4->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? - I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : - I40E_TX_DESC_CMD_IIPT_IPV4; - } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ? + IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM : + IAVF_TX_DESC_CMD_IIPT_IPV4; + } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) { + cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; @@ -2064,29 +2064,29 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, } /* compute inner L3 header size */ - offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; /* Enable L4 checksum offloads */ switch (l4_proto) { case IPPROTO_TCP: /* enable checksum offloads */ - cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; - offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; + offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_SCTP: /* enable SCTP checksum offload */ - cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; + cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP; offset |= (sizeof(struct sctphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_UDP: /* enable UDP checksum offload */ - cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; + cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; offset |= (sizeof(struct udphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; default: - if (*tx_flags & I40E_TX_FLAGS_TSO) + if (*tx_flags & IAVF_TX_FLAGS_TSO) return -1; skb_checksum_help(skb); return 0; @@ -2099,25 +2099,25 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, } /** - * i40e_create_tx_ctx Build the Tx context descriptor + * iavf_create_tx_ctx Build the Tx context descriptor * @tx_ring: ring to create the descriptor on * @cd_type_cmd_tso_mss: Quad Word 1 * @cd_tunneling: Quad Word 0 - bits 0-31 * @cd_l2tag2: Quad Word 0 - bits 32-63 **/ -static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, +static void iavf_create_tx_ctx(struct iavf_ring *tx_ring, const u64 cd_type_cmd_tso_mss, const u32 cd_tunneling, const u32 cd_l2tag2) { - struct i40e_tx_context_desc *context_desc; + struct iavf_tx_context_desc *context_desc; int i = tx_ring->next_to_use; - if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && + if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) && !cd_tunneling && !cd_l2tag2) return; /* grab the next descriptor */ - context_desc = I40E_TX_CTXTDESC(tx_ring, i); + context_desc = IAVF_TX_CTXTDESC(tx_ring, i); i++; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; @@ -2130,7 +2130,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, } /** - * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet + * __iavf_chk_linearize - Check if there are more than 8 buffers per packet * @skb: send buffer * * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire @@ -2142,20 +2142,20 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, * the segment payload in the first descriptor, and another 7 for the * fragments. **/ -bool __i40evf_chk_linearize(struct sk_buff *skb) +bool __iavf_chk_linearize(struct sk_buff *skb) { const struct skb_frag_struct *frag, *stale; int nr_frags, sum; /* no need to check if number of frags is less than 7 */ nr_frags = skb_shinfo(skb)->nr_frags; - if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) + if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1)) return false; /* We need to walk through the list and validate that each group * of 6 fragments totals at least gso_size. */ - nr_frags -= I40E_MAX_BUFFER_TXD - 2; + nr_frags -= IAVF_MAX_BUFFER_TXD - 2; frag = &skb_shinfo(skb)->frags[0]; /* Initialize size to the negative value of gso_size minus 1. We @@ -2187,17 +2187,17 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) * figure out what the remainder would be in the last * descriptor associated with the fragment. */ - if (stale_size > I40E_MAX_DATA_PER_TXD) { + if (stale_size > IAVF_MAX_DATA_PER_TXD) { int align_pad = -(stale->page_offset) & - (I40E_MAX_READ_REQ_SIZE - 1); + (IAVF_MAX_READ_REQ_SIZE - 1); sum -= align_pad; stale_size -= align_pad; do { - sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; - stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; - } while (stale_size > I40E_MAX_DATA_PER_TXD); + sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED; + stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED; + } while (stale_size > IAVF_MAX_DATA_PER_TXD); } /* if sum is negative we failed to make sufficient progress */ @@ -2214,20 +2214,20 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) } /** - * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions + * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions * @tx_ring: the ring to be checked * @size: the size buffer we want to assure is available * * Returns -EBUSY if a stop is needed, else 0 **/ -int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Memory barrier before checking head and tail */ smp_mb(); /* Check again in a case another CPU has just made room available. */ - if (likely(I40E_DESC_UNUSED(tx_ring) < size)) + if (likely(IAVF_DESC_UNUSED(tx_ring) < size)) return -EBUSY; /* A reprieve! - use start_queue because it doesn't call schedule */ @@ -2237,7 +2237,7 @@ int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) } /** - * i40evf_tx_map - Build the Tx descriptor + * iavf_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on * @skb: send buffer * @first: first buffer info buffer to use @@ -2246,34 +2246,34 @@ int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * @td_cmd: the command field in the descriptor * @td_offset: offset for checksum or crc **/ -static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, - struct i40e_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) +static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, + struct iavf_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); struct skb_frag_struct *frag; - struct i40e_tx_buffer *tx_bi; - struct i40e_tx_desc *tx_desc; + struct iavf_tx_buffer *tx_bi; + struct iavf_tx_desc *tx_desc; u16 i = tx_ring->next_to_use; u32 td_tag = 0; dma_addr_t dma; - if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { - td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; - td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> - I40E_TX_FLAGS_VLAN_SHIFT; + if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) { + td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1; + td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >> + IAVF_TX_FLAGS_VLAN_SHIFT; } first->tx_flags = tx_flags; dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); - tx_desc = I40E_TX_DESC(tx_ring, i); + tx_desc = IAVF_TX_DESC(tx_ring, i); tx_bi = first; for (frag = &skb_shinfo(skb)->frags[0];; frag++) { - unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; + unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED; if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; @@ -2283,10 +2283,10 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, dma_unmap_addr_set(tx_bi, dma, dma); /* align size to end of page */ - max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); + max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1); tx_desc->buffer_addr = cpu_to_le64(dma); - while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { + while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) { tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, max_data, td_tag); @@ -2295,14 +2295,14 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, i++; if (i == tx_ring->count) { - tx_desc = I40E_TX_DESC(tx_ring, 0); + tx_desc = IAVF_TX_DESC(tx_ring, 0); i = 0; } dma += max_data; size -= max_data; - max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; + max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED; tx_desc->buffer_addr = cpu_to_le64(dma); } @@ -2316,7 +2316,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, i++; if (i == tx_ring->count) { - tx_desc = I40E_TX_DESC(tx_ring, 0); + tx_desc = IAVF_TX_DESC(tx_ring, 0); i = 0; } @@ -2337,10 +2337,10 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; - i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); + iavf_maybe_stop_tx(tx_ring, DESC_NEEDED); /* write last descriptor with RS and EOP bits */ - td_cmd |= I40E_TXD_CMD; + td_cmd |= IAVF_TXD_CMD; tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, td_tag); @@ -2373,7 +2373,7 @@ dma_error: /* clear dma mappings for failed tx_bi map */ for (;;) { tx_bi = &tx_ring->tx_bi[i]; - i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); + iavf_unmap_and_free_tx_resource(tx_ring, tx_bi); if (tx_bi == first) break; if (i == 0) @@ -2385,18 +2385,18 @@ dma_error: } /** - * i40e_xmit_frame_ring - Sends buffer on Tx ring + * iavf_xmit_frame_ring - Sends buffer on Tx ring * @skb: send buffer * @tx_ring: ring to send buffer on * * Returns NETDEV_TX_OK if sent, else an error code **/ -static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, - struct i40e_ring *tx_ring) +static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb, + struct iavf_ring *tx_ring) { - u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT; + u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT; u32 cd_tunneling = 0, cd_l2tag2 = 0; - struct i40e_tx_buffer *first; + struct iavf_tx_buffer *first; u32 td_offset = 0; u32 tx_flags = 0; __be16 protocol; @@ -2407,25 +2407,25 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, /* prefetch the data, we'll need it later */ prefetch(skb->data); - i40e_trace(xmit_frame_ring, skb, tx_ring); + iavf_trace(xmit_frame_ring, skb, tx_ring); - count = i40e_xmit_descriptor_count(skb); - if (i40e_chk_linearize(skb, count)) { + count = iavf_xmit_descriptor_count(skb); + if (iavf_chk_linearize(skb, count)) { if (__skb_linearize(skb)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - count = i40e_txd_use_count(skb->len); + count = iavf_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } - /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, - * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, + /* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD, * + 4 desc gap to avoid the cache line where head is, * + 1 desc for context descriptor, * otherwise try next time */ - if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) { tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } @@ -2437,7 +2437,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, first->gso_segs = 1; /* prepare the xmit flags */ - if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) + if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; /* obtain protocol of skb */ @@ -2445,19 +2445,19 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, /* setup IPv4/IPv6 offloads */ if (protocol == htons(ETH_P_IP)) - tx_flags |= I40E_TX_FLAGS_IPV4; + tx_flags |= IAVF_TX_FLAGS_IPV4; else if (protocol == htons(ETH_P_IPV6)) - tx_flags |= I40E_TX_FLAGS_IPV6; + tx_flags |= IAVF_TX_FLAGS_IPV6; - tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); + tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; else if (tso) - tx_flags |= I40E_TX_FLAGS_TSO; + tx_flags |= IAVF_TX_FLAGS_TSO; /* Always offload the checksum, since it's in the data descriptor */ - tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, + tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tx_ring, &cd_tunneling); if (tso < 0) goto out_drop; @@ -2465,44 +2465,44 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, skb_tx_timestamp(skb); /* always enable CRC insertion offload */ - td_cmd |= I40E_TX_DESC_CMD_ICRC; + td_cmd |= IAVF_TX_DESC_CMD_ICRC; - i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, + iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); - i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, - td_cmd, td_offset); + iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, + td_cmd, td_offset); return NETDEV_TX_OK; out_drop: - i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); + iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring); dev_kfree_skb_any(first->skb); first->skb = NULL; return NETDEV_TX_OK; } /** - * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer + * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer * @skb: send buffer * @netdev: network interface device structure * * Returns NETDEV_TX_OK if sent, else an error code **/ -netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; /* hardware can't handle really short frames, hardware padding works * beyond this point */ - if (unlikely(skb->len < I40E_MIN_TX_LEN)) { - if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len)) + if (unlikely(skb->len < IAVF_MIN_TX_LEN)) { + if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len)) return NETDEV_TX_OK; - skb->len = I40E_MIN_TX_LEN; - skb_set_tail_pointer(skb, I40E_MIN_TX_LEN); + skb->len = IAVF_MIN_TX_LEN; + skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN); } - return i40e_xmit_frame_ring(skb, tx_ring); + return iavf_xmit_frame_ring(skb, tx_ring); } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index 3b5a63b3236e..71e7d090f8db 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -1,11 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#ifndef _I40E_TXRX_H_ -#define _I40E_TXRX_H_ +#ifndef _IAVF_TXRX_H_ +#define _IAVF_TXRX_H_ /* Interrupt Throttling and Rate Limiting Goodies */ -#define I40E_DEFAULT_IRQ_WORK 256 +#define IAVF_DEFAULT_IRQ_WORK 256 /* The datasheet for the X710 and XL710 indicate that the maximum value for * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec @@ -13,80 +13,80 @@ * the register value which is divided by 2 lets use the actual values and * avoid an excessive amount of translation. */ -#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ -#define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */ -#define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */ -#define I40E_ITR_100K 10 /* all values below must be even */ -#define I40E_ITR_50K 20 -#define I40E_ITR_20K 50 -#define I40E_ITR_18K 60 -#define I40E_ITR_8K 122 -#define I40E_MAX_ITR 8160 /* maximum value as per datasheet */ -#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC) -#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK) -#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC)) - -#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) -#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) +#define IAVF_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ +#define IAVF_ITR_MASK 0x1FFE /* mask for ITR register value */ +#define IAVF_MIN_ITR 2 /* reg uses 2 usec resolution */ +#define IAVF_ITR_100K 10 /* all values below must be even */ +#define IAVF_ITR_50K 20 +#define IAVF_ITR_20K 50 +#define IAVF_ITR_18K 60 +#define IAVF_ITR_8K 122 +#define IAVF_MAX_ITR 8160 /* maximum value as per datasheet */ +#define ITR_TO_REG(setting) ((setting) & ~IAVF_ITR_DYNAMIC) +#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK) +#define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC)) + +#define IAVF_ITR_RX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC) +#define IAVF_ITR_TX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC) /* 0x40 is the enable bit for interrupt rate limiting, and must be set if * the value of the rate limit is non-zero */ #define INTRL_ENA BIT(6) -#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ +#define IAVF_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) #define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) -#define I40E_INTRL_8K 125 /* 8000 ints/sec */ -#define I40E_INTRL_62K 16 /* 62500 ints/sec */ -#define I40E_INTRL_83K 12 /* 83333 ints/sec */ +#define IAVF_INTRL_8K 125 /* 8000 ints/sec */ +#define IAVF_INTRL_62K 16 /* 62500 ints/sec */ +#define IAVF_INTRL_83K 12 /* 83333 ints/sec */ -#define I40E_QUEUE_END_OF_LIST 0x7FF +#define IAVF_QUEUE_END_OF_LIST 0x7FF /* this enum matches hardware bits and is meant to be used by DYN_CTLN * registers and QINT registers or more generally anywhere in the manual * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any * register but instead is a special value meaning "don't update" ITR0/1/2. */ -enum i40e_dyn_idx_t { - I40E_IDX_ITR0 = 0, - I40E_IDX_ITR1 = 1, - I40E_IDX_ITR2 = 2, - I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ +enum iavf_dyn_idx_t { + IAVF_IDX_ITR0 = 0, + IAVF_IDX_ITR1 = 1, + IAVF_IDX_ITR2 = 2, + IAVF_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ }; /* these are indexes into ITRN registers */ -#define I40E_RX_ITR I40E_IDX_ITR0 -#define I40E_TX_ITR I40E_IDX_ITR1 -#define I40E_PE_ITR I40E_IDX_ITR2 +#define IAVF_RX_ITR IAVF_IDX_ITR0 +#define IAVF_TX_ITR IAVF_IDX_ITR1 +#define IAVF_PE_ITR IAVF_IDX_ITR2 /* Supported RSS offloads */ -#define I40E_DEFAULT_RSS_HENA ( \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ - BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) - -#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) +#define IAVF_DEFAULT_RSS_HENA ( \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD)) + +#define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) /* Supported Rx Buffer Sizes (a multiple of 128) */ -#define I40E_RXBUFFER_256 256 -#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ -#define I40E_RXBUFFER_2048 2048 -#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ -#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ +#define IAVF_RXBUFFER_256 256 +#define IAVF_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ +#define IAVF_RXBUFFER_2048 2048 +#define IAVF_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ +#define IAVF_MAX_RXBUFFER 9728 /* largest size for single descriptor */ /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, @@ -95,11 +95,11 @@ enum i40e_dyn_idx_t { * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) */ -#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 -#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) -#define i40e_rx_desc i40e_32byte_rx_desc +#define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256 +#define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) +#define iavf_rx_desc iavf_32byte_rx_desc -#define I40E_RX_DMA_ATTR \ +#define IAVF_RX_DMA_ATTR \ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) /* Attempt to maximize the headroom available for incoming frames. We @@ -113,10 +113,10 @@ enum i40e_dyn_idx_t { * receive path. */ #if (PAGE_SIZE < 8192) -#define I40E_2K_TOO_SMALL_WITH_PADDING \ -((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048)) +#define IAVF_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048)) -static inline int i40e_compute_pad(int rx_buf_len) +static inline int iavf_compute_pad(int rx_buf_len) { int page_size, pad_size; @@ -126,7 +126,7 @@ static inline int i40e_compute_pad(int rx_buf_len) return pad_size; } -static inline int i40e_skb_pad(void) +static inline int iavf_skb_pad(void) { int rx_buf_len; @@ -137,25 +137,25 @@ static inline int i40e_skb_pad(void) * tailroom due to NET_IP_ALIGN possibly shifting us out of * cache-line alignment. */ - if (I40E_2K_TOO_SMALL_WITH_PADDING) - rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); + if (IAVF_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); else - rx_buf_len = I40E_RXBUFFER_1536; + rx_buf_len = IAVF_RXBUFFER_1536; /* if needed make room for NET_IP_ALIGN */ rx_buf_len -= NET_IP_ALIGN; - return i40e_compute_pad(rx_buf_len); + return iavf_compute_pad(rx_buf_len); } -#define I40E_SKB_PAD i40e_skb_pad() +#define IAVF_SKB_PAD iavf_skb_pad() #else -#define I40E_2K_TOO_SMALL_WITH_PADDING false -#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#define IAVF_2K_TOO_SMALL_WITH_PADDING false +#define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) #endif /** - * i40e_test_staterr - tests bits in Rx descriptor status and error fields + * iavf_test_staterr - tests bits in Rx descriptor status and error fields * @rx_desc: pointer to receive descriptor (in le64 format) * @stat_err_bits: value to mask * @@ -164,7 +164,7 @@ static inline int i40e_skb_pad(void) * The status_error_len doesn't need to be shifted because it begins * at offset zero. */ -static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, +static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc, const u64 stat_err_bits) { return !!(rx_desc->wb.qword1.status_error_len & @@ -172,8 +172,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, } /* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */ -#define I40E_RX_INCREMENT(r, i) \ +#define IAVF_RX_INCREMENT(r, i) \ do { \ (i)++; \ if ((i) == (r)->count) \ @@ -181,34 +180,34 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, r->next_to_clean = i; \ } while (0) -#define I40E_RX_NEXT_DESC(r, i, n) \ +#define IAVF_RX_NEXT_DESC(r, i, n) \ do { \ (i)++; \ if ((i) == (r)->count) \ i = 0; \ - (n) = I40E_RX_DESC((r), (i)); \ + (n) = IAVF_RX_DESC((r), (i)); \ } while (0) -#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ +#define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n) \ do { \ - I40E_RX_NEXT_DESC((r), (i), (n)); \ + IAVF_RX_NEXT_DESC((r), (i), (n)); \ prefetch((n)); \ } while (0) -#define I40E_MAX_BUFFER_TXD 8 -#define I40E_MIN_TX_LEN 17 +#define IAVF_MAX_BUFFER_TXD 8 +#define IAVF_MIN_TX_LEN 17 /* The size limit for a transmit buffer in a descriptor is (16K - 1). * In order to align with the read requests we will align the value to * the nearest 4K which represents our maximum read request size. */ -#define I40E_MAX_READ_REQ_SIZE 4096 -#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) -#define I40E_MAX_DATA_PER_TXD_ALIGNED \ - (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) +#define IAVF_MAX_READ_REQ_SIZE 4096 +#define IAVF_MAX_DATA_PER_TXD (16 * 1024 - 1) +#define IAVF_MAX_DATA_PER_TXD_ALIGNED \ + (IAVF_MAX_DATA_PER_TXD & ~(IAVF_MAX_READ_REQ_SIZE - 1)) /** - * i40e_txd_use_count - estimate the number of descriptors needed for Tx + * iavf_txd_use_count - estimate the number of descriptors needed for Tx * @size: transmit request size in bytes * * Due to hardware alignment restrictions (4K alignment), we need to @@ -235,31 +234,31 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, * operations into: * return ((size * 85) >> 20) + 1; */ -static inline unsigned int i40e_txd_use_count(unsigned int size) +static inline unsigned int iavf_txd_use_count(unsigned int size) { return ((size * 85) >> 20) + 1; } /* Tx Descriptors needed, worst case */ #define DESC_NEEDED (MAX_SKB_FRAGS + 6) -#define I40E_MIN_DESC_PENDING 4 - -#define I40E_TX_FLAGS_HW_VLAN BIT(1) -#define I40E_TX_FLAGS_SW_VLAN BIT(2) -#define I40E_TX_FLAGS_TSO BIT(3) -#define I40E_TX_FLAGS_IPV4 BIT(4) -#define I40E_TX_FLAGS_IPV6 BIT(5) -#define I40E_TX_FLAGS_FCCRC BIT(6) -#define I40E_TX_FLAGS_FSO BIT(7) -#define I40E_TX_FLAGS_FD_SB BIT(9) -#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10) -#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 -#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 -#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 -#define I40E_TX_FLAGS_VLAN_SHIFT 16 - -struct i40e_tx_buffer { - struct i40e_tx_desc *next_to_watch; +#define IAVF_MIN_DESC_PENDING 4 + +#define IAVF_TX_FLAGS_HW_VLAN BIT(1) +#define IAVF_TX_FLAGS_SW_VLAN BIT(2) +#define IAVF_TX_FLAGS_TSO BIT(3) +#define IAVF_TX_FLAGS_IPV4 BIT(4) +#define IAVF_TX_FLAGS_IPV6 BIT(5) +#define IAVF_TX_FLAGS_FCCRC BIT(6) +#define IAVF_TX_FLAGS_FSO BIT(7) +#define IAVF_TX_FLAGS_FD_SB BIT(9) +#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10) +#define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define IAVF_TX_FLAGS_VLAN_SHIFT 16 + +struct iavf_tx_buffer { + struct iavf_tx_desc *next_to_watch; union { struct sk_buff *skb; void *raw_buf; @@ -272,7 +271,7 @@ struct i40e_tx_buffer { u32 tx_flags; }; -struct i40e_rx_buffer { +struct iavf_rx_buffer { dma_addr_t dma; struct page *page; #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) @@ -283,12 +282,12 @@ struct i40e_rx_buffer { __u16 pagecnt_bias; }; -struct i40e_queue_stats { +struct iavf_queue_stats { u64 packets; u64 bytes; }; -struct i40e_tx_queue_stats { +struct iavf_tx_queue_stats { u64 restart_queue; u64 tx_busy; u64 tx_done_old; @@ -298,7 +297,7 @@ struct i40e_tx_queue_stats { u64 tx_lost_interrupt; }; -struct i40e_rx_queue_stats { +struct iavf_rx_queue_stats { u64 non_eop_descs; u64 alloc_page_failed; u64 alloc_buff_failed; @@ -306,34 +305,34 @@ struct i40e_rx_queue_stats { u64 realloc_count; }; -enum i40e_ring_state_t { - __I40E_TX_FDIR_INIT_DONE, - __I40E_TX_XPS_INIT_DONE, - __I40E_RING_STATE_NBITS /* must be last */ +enum iavf_ring_state_t { + __IAVF_TX_FDIR_INIT_DONE, + __IAVF_TX_XPS_INIT_DONE, + __IAVF_RING_STATE_NBITS /* must be last */ }; /* some useful defines for virtchannel interface, which * is the only remaining user of header split */ -#define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_HEADER_SPLIT 1 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 -#define I40E_RX_SPLIT_L2 0x1 -#define I40E_RX_SPLIT_IP 0x2 -#define I40E_RX_SPLIT_TCP_UDP 0x4 -#define I40E_RX_SPLIT_SCTP 0x8 +#define IAVF_RX_DTYPE_NO_SPLIT 0 +#define IAVF_RX_DTYPE_HEADER_SPLIT 1 +#define IAVF_RX_DTYPE_SPLIT_ALWAYS 2 +#define IAVF_RX_SPLIT_L2 0x1 +#define IAVF_RX_SPLIT_IP 0x2 +#define IAVF_RX_SPLIT_TCP_UDP 0x4 +#define IAVF_RX_SPLIT_SCTP 0x8 /* struct that defines a descriptor ring, associated with a VSI */ -struct i40e_ring { - struct i40e_ring *next; /* pointer to next ring in q_vector */ +struct iavf_ring { + struct iavf_ring *next; /* pointer to next ring in q_vector */ void *desc; /* Descriptor ring memory */ struct device *dev; /* Used for DMA mapping */ struct net_device *netdev; /* netdev ring maps to */ union { - struct i40e_tx_buffer *tx_bi; - struct i40e_rx_buffer *rx_bi; + struct iavf_tx_buffer *tx_bi; + struct iavf_rx_buffer *rx_bi; }; - DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS); + DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS); u16 queue_index; /* Queue number of ring */ u8 dcb_tc; /* Traffic class of ring */ u8 __iomem *tail; @@ -361,59 +360,59 @@ struct i40e_ring { u8 packet_stride; u16 flags; -#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) -#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) +#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0) +#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) /* stats structs */ - struct i40e_queue_stats stats; + struct iavf_queue_stats stats; struct u64_stats_sync syncp; union { - struct i40e_tx_queue_stats tx_stats; - struct i40e_rx_queue_stats rx_stats; + struct iavf_tx_queue_stats tx_stats; + struct iavf_rx_queue_stats rx_stats; }; unsigned int size; /* length of descriptor ring in bytes */ dma_addr_t dma; /* physical address of ring */ - struct i40e_vsi *vsi; /* Backreference to associated VSI */ - struct i40e_q_vector *q_vector; /* Backreference to associated vector */ + struct iavf_vsi *vsi; /* Backreference to associated VSI */ + struct iavf_q_vector *q_vector; /* Backreference to associated vector */ struct rcu_head rcu; /* to avoid race on free */ u16 next_to_alloc; - struct sk_buff *skb; /* When i40evf_clean_rx_ring_irq() must + struct sk_buff *skb; /* When iavf_clean_rx_ring_irq() must * return before it sees the EOP for * the current packet, we save that skb * here and resume receiving this * packet the next time - * i40evf_clean_rx_ring_irq() is called + * iavf_clean_rx_ring_irq() is called * for this ring. */ } ____cacheline_internodealigned_in_smp; -static inline bool ring_uses_build_skb(struct i40e_ring *ring) +static inline bool ring_uses_build_skb(struct iavf_ring *ring) { - return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED); + return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED); } -static inline void set_ring_build_skb_enabled(struct i40e_ring *ring) +static inline void set_ring_build_skb_enabled(struct iavf_ring *ring) { - ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED; + ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED; } -static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring) +static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring) { - ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED; + ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED; } -#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002 -#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002 -#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e -#define I40E_ITR_ADAPTIVE_LATENCY 0x8000 -#define I40E_ITR_ADAPTIVE_BULK 0x0000 -#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY)) +#define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002 +#define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002 +#define IAVF_ITR_ADAPTIVE_MAX_USECS 0x007e +#define IAVF_ITR_ADAPTIVE_LATENCY 0x8000 +#define IAVF_ITR_ADAPTIVE_BULK 0x0000 +#define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY)) -struct i40e_ring_container { - struct i40e_ring *ring; /* pointer to linked list of ring(s) */ +struct iavf_ring_container { + struct iavf_ring *ring; /* pointer to linked list of ring(s) */ unsigned long next_update; /* jiffies value of next update */ unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_packets; /* total packets processed this int */ @@ -423,10 +422,10 @@ struct i40e_ring_container { }; /* iterator for handling rings in ring container */ -#define i40e_for_each_ring(pos, head) \ +#define iavf_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) +static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring) { #if (PAGE_SIZE < 8192) if (ring->rx_buf_len > (PAGE_SIZE / 2)) @@ -435,25 +434,25 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) return 0; } -#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring)) - -bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); -netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); -void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); -void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); -int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring); -int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring); -void i40evf_free_tx_resources(struct i40e_ring *tx_ring); -void i40evf_free_rx_resources(struct i40e_ring *rx_ring); -int i40evf_napi_poll(struct napi_struct *napi, int budget); -void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); -u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw); -void i40evf_detect_recover_hung(struct i40e_vsi *vsi); -int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size); -bool __i40evf_chk_linearize(struct sk_buff *skb); +#define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring)) + +bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count); +netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +void iavf_clean_tx_ring(struct iavf_ring *tx_ring); +void iavf_clean_rx_ring(struct iavf_ring *rx_ring); +int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring); +int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring); +void iavf_free_tx_resources(struct iavf_ring *tx_ring); +void iavf_free_rx_resources(struct iavf_ring *rx_ring); +int iavf_napi_poll(struct napi_struct *napi, int budget); +void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector); +u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw); +void iavf_detect_recover_hung(struct iavf_vsi *vsi); +int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size); +bool __iavf_chk_linearize(struct sk_buff *skb); /** - * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed + * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed * @skb: send buffer * @tx_ring: ring to send buffer on * @@ -461,14 +460,14 @@ bool __i40evf_chk_linearize(struct sk_buff *skb); * there is not enough descriptors available in this ring since we need at least * one descriptor. **/ -static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) +static inline int iavf_xmit_descriptor_count(struct sk_buff *skb) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; int count = 0, size = skb_headlen(skb); for (;;) { - count += i40e_txd_use_count(size); + count += iavf_txd_use_count(size); if (!nr_frags--) break; @@ -480,21 +479,21 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) } /** - * i40e_maybe_stop_tx - 1st level check for Tx stop conditions + * iavf_maybe_stop_tx - 1st level check for Tx stop conditions * @tx_ring: the ring to be checked * @size: the size buffer we want to assure is available * * Returns 0 if stop is not needed **/ -static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) { - if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + if (likely(IAVF_DESC_UNUSED(tx_ring) >= size)) return 0; - return __i40evf_maybe_stop_tx(tx_ring, size); + return __iavf_maybe_stop_tx(tx_ring, size); } /** - * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * iavf_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer * @count: number of buffers used * @@ -502,23 +501,23 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * a packet on the wire and so we need to figure out the cases where we * need to linearize the skb. **/ -static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) +static inline bool iavf_chk_linearize(struct sk_buff *skb, int count) { /* Both TSO and single send will work if count is less than 8 */ - if (likely(count < I40E_MAX_BUFFER_TXD)) + if (likely(count < IAVF_MAX_BUFFER_TXD)) return false; if (skb_is_gso(skb)) - return __i40evf_chk_linearize(skb); + return __iavf_chk_linearize(skb); /* we can support up to 8 data buffers for a single send */ - return count != I40E_MAX_BUFFER_TXD; + return count != IAVF_MAX_BUFFER_TXD; } /** * @ring: Tx ring to find the netdev equivalent of **/ -static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) +static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring) { return netdev_get_tx_queue(ring->netdev, ring->queue_index); } -#endif /* _I40E_TXRX_H_ */ +#endif /* _IAVF_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h new file mode 100644 index 000000000000..ca89583613fb --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_type.h @@ -0,0 +1,688 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _IAVF_TYPE_H_ +#define _IAVF_TYPE_H_ + +#include "iavf_status.h" +#include "iavf_osdep.h" +#include "iavf_register.h" +#include "i40e_adminq.h" +#include "iavf_devids.h" + +#define IAVF_RXQ_CTX_DBUFF_SHIFT 7 + +/* IAVF_MASK is a macro used on 32 bit registers */ +#define IAVF_MASK(mask, shift) ((u32)(mask) << (shift)) + +#define IAVF_MAX_VSI_QP 16 +#define IAVF_MAX_VF_VSI 3 +#define IAVF_MAX_CHAINED_RX_BUFFERS 5 + +/* forward declaration */ +struct iavf_hw; +typedef void (*I40E_ADMINQ_CALLBACK)(struct iavf_hw *, struct i40e_aq_desc *); + +/* Data type manipulation macros. */ + +#define IAVF_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +/* bitfields for Tx queue mapping in QTX_CTL */ +#define IAVF_QTX_CTL_VF_QUEUE 0x0 +#define IAVF_QTX_CTL_VM_QUEUE 0x1 +#define IAVF_QTX_CTL_PF_QUEUE 0x2 + +/* debug masks - set these bits in hw->debug_mask to control output */ +enum iavf_debug_mask { + IAVF_DEBUG_INIT = 0x00000001, + IAVF_DEBUG_RELEASE = 0x00000002, + + IAVF_DEBUG_LINK = 0x00000010, + IAVF_DEBUG_PHY = 0x00000020, + IAVF_DEBUG_HMC = 0x00000040, + IAVF_DEBUG_NVM = 0x00000080, + IAVF_DEBUG_LAN = 0x00000100, + IAVF_DEBUG_FLOW = 0x00000200, + IAVF_DEBUG_DCB = 0x00000400, + IAVF_DEBUG_DIAG = 0x00000800, + IAVF_DEBUG_FD = 0x00001000, + IAVF_DEBUG_PACKAGE = 0x00002000, + + IAVF_DEBUG_AQ_MESSAGE = 0x01000000, + IAVF_DEBUG_AQ_DESCRIPTOR = 0x02000000, + IAVF_DEBUG_AQ_DESC_BUFFER = 0x04000000, + IAVF_DEBUG_AQ_COMMAND = 0x06000000, + IAVF_DEBUG_AQ = 0x0F000000, + + IAVF_DEBUG_USER = 0xF0000000, + + IAVF_DEBUG_ALL = 0xFFFFFFFF +}; + +/* These are structs for managing the hardware information and the operations. + * The structures of function pointers are filled out at init time when we + * know for sure exactly which hardware we're working with. This gives us the + * flexibility of using the same main driver code but adapting to slightly + * different hardware needs as new parts are developed. For this architecture, + * the Firmware and AdminQ are intended to insulate the driver from most of the + * future changes, but these structures will also do part of the job. + */ +enum iavf_mac_type { + IAVF_MAC_UNKNOWN = 0, + IAVF_MAC_XL710, + IAVF_MAC_VF, + IAVF_MAC_X722, + IAVF_MAC_X722_VF, + IAVF_MAC_GENERIC, +}; + +enum iavf_vsi_type { + IAVF_VSI_MAIN = 0, + IAVF_VSI_VMDQ1 = 1, + IAVF_VSI_VMDQ2 = 2, + IAVF_VSI_CTRL = 3, + IAVF_VSI_FCOE = 4, + IAVF_VSI_MIRROR = 5, + IAVF_VSI_SRIOV = 6, + IAVF_VSI_FDIR = 7, + IAVF_VSI_TYPE_UNKNOWN +}; + +enum iavf_queue_type { + IAVF_QUEUE_TYPE_RX = 0, + IAVF_QUEUE_TYPE_TX, + IAVF_QUEUE_TYPE_PE_CEQ, + IAVF_QUEUE_TYPE_UNKNOWN +}; + +#define IAVF_HW_CAP_MAX_GPIO 30 +/* Capabilities of a PF or a VF or the whole device */ +struct iavf_hw_capabilities { + bool dcb; + bool fcoe; + u32 num_vsis; + u32 num_rx_qp; + u32 num_tx_qp; + u32 base_queue; + u32 num_msix_vectors_vf; +}; + +struct iavf_mac_info { + enum iavf_mac_type type; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + u16 max_fcoeq; +}; + +/* PCI bus types */ +enum iavf_bus_type { + iavf_bus_type_unknown = 0, + iavf_bus_type_pci, + iavf_bus_type_pcix, + iavf_bus_type_pci_express, + iavf_bus_type_reserved +}; + +/* PCI bus speeds */ +enum iavf_bus_speed { + iavf_bus_speed_unknown = 0, + iavf_bus_speed_33 = 33, + iavf_bus_speed_66 = 66, + iavf_bus_speed_100 = 100, + iavf_bus_speed_120 = 120, + iavf_bus_speed_133 = 133, + iavf_bus_speed_2500 = 2500, + iavf_bus_speed_5000 = 5000, + iavf_bus_speed_8000 = 8000, + iavf_bus_speed_reserved +}; + +/* PCI bus widths */ +enum iavf_bus_width { + iavf_bus_width_unknown = 0, + iavf_bus_width_pcie_x1 = 1, + iavf_bus_width_pcie_x2 = 2, + iavf_bus_width_pcie_x4 = 4, + iavf_bus_width_pcie_x8 = 8, + iavf_bus_width_32 = 32, + iavf_bus_width_64 = 64, + iavf_bus_width_reserved +}; + +/* Bus parameters */ +struct iavf_bus_info { + enum iavf_bus_speed speed; + enum iavf_bus_width width; + enum iavf_bus_type type; + + u16 func; + u16 device; + u16 lan_id; + u16 bus_id; +}; + +#define IAVF_MAX_USER_PRIORITY 8 +/* Port hardware description */ +struct iavf_hw { + u8 __iomem *hw_addr; + void *back; + + /* subsystem structs */ + struct iavf_mac_info mac; + struct iavf_bus_info bus; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + + /* capabilities for entire device and PCI func */ + struct iavf_hw_capabilities dev_caps; + + /* Admin Queue info */ + struct iavf_adminq_info aq; + + /* debug mask */ + u32 debug_mask; + char err_str[16]; +}; + +struct iavf_driver_version { + u8 major_version; + u8 minor_version; + u8 build_version; + u8 subbuild_version; + u8 driver_string[32]; +}; + +/* RX Descriptors */ +union iavf_16byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fd_id; /* Flow director filter id */ + __le32 fcoe_param; /* FCoE DDP Context id */ + } hi_dword; + } qword0; + struct { + /* ext status/error/pktype/length */ + __le64 status_error_len; + } qword1; + } wb; /* writeback */ +}; + +union iavf_32byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_buffer_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fcoe_param; /* FCoE DDP Context id */ + /* Flow director filter id in case of + * Programming status desc WB + */ + __le32 fd_id; + } hi_dword; + } qword0; + struct { + /* status/error/pktype/length */ + __le64 status_error_len; + } qword1; + struct { + __le16 ext_status; /* extended status */ + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + union { + __le32 flex_bytes_lo; + __le32 pe_status; + } lo_dword; + union { + __le32 flex_bytes_hi; + __le32 fd_id; + } hi_dword; + } qword3; + } wb; /* writeback */ +}; + +enum iavf_rx_desc_status_bits { + /* Note: These are predefined bit offsets */ + IAVF_RX_DESC_STATUS_DD_SHIFT = 0, + IAVF_RX_DESC_STATUS_EOF_SHIFT = 1, + IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, + IAVF_RX_DESC_STATUS_L3L4P_SHIFT = 3, + IAVF_RX_DESC_STATUS_CRCP_SHIFT = 4, + IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ + IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, + /* Note: Bit 8 is reserved in X710 and XL710 */ + IAVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, + IAVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ + IAVF_RX_DESC_STATUS_FLM_SHIFT = 11, + IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ + IAVF_RX_DESC_STATUS_LPBK_SHIFT = 14, + IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, + IAVF_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ + /* Note: For non-tunnel packets INT_UDP_0 is the right status for + * UDP header + */ + IAVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, + IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */ +}; + +#define IAVF_RXD_QW1_STATUS_SHIFT 0 +#define IAVF_RXD_QW1_STATUS_MASK ((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \ + << IAVF_RXD_QW1_STATUS_SHIFT) + +#define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT +#define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ + IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT) + +#define IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT +#define IAVF_RXD_QW1_STATUS_TSYNVALID_MASK \ + BIT_ULL(IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT) + +enum iavf_rx_desc_fltstat_values { + IAVF_RX_DESC_FLTSTAT_NO_DATA = 0, + IAVF_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ + IAVF_RX_DESC_FLTSTAT_RSV = 2, + IAVF_RX_DESC_FLTSTAT_RSS_HASH = 3, +}; + +#define IAVF_RXD_QW1_ERROR_SHIFT 19 +#define IAVF_RXD_QW1_ERROR_MASK (0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT) + +enum iavf_rx_desc_error_bits { + /* Note: These are predefined bit offsets */ + IAVF_RX_DESC_ERROR_RXE_SHIFT = 0, + IAVF_RX_DESC_ERROR_RECIPE_SHIFT = 1, + IAVF_RX_DESC_ERROR_HBO_SHIFT = 2, + IAVF_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ + IAVF_RX_DESC_ERROR_IPE_SHIFT = 3, + IAVF_RX_DESC_ERROR_L4E_SHIFT = 4, + IAVF_RX_DESC_ERROR_EIPE_SHIFT = 5, + IAVF_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, + IAVF_RX_DESC_ERROR_PPRS_SHIFT = 7 +}; + +enum iavf_rx_desc_error_l3l4e_fcoe_masks { + IAVF_RX_DESC_ERROR_L3L4E_NONE = 0, + IAVF_RX_DESC_ERROR_L3L4E_PROT = 1, + IAVF_RX_DESC_ERROR_L3L4E_FC = 2, + IAVF_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, + IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 +}; + +#define IAVF_RXD_QW1_PTYPE_SHIFT 30 +#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT) + +/* Packet type non-ip values */ +enum iavf_rx_l2_ptype { + IAVF_RX_PTYPE_L2_RESERVED = 0, + IAVF_RX_PTYPE_L2_MAC_PAY2 = 1, + IAVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, + IAVF_RX_PTYPE_L2_FIP_PAY2 = 3, + IAVF_RX_PTYPE_L2_OUI_PAY2 = 4, + IAVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, + IAVF_RX_PTYPE_L2_LLDP_PAY2 = 6, + IAVF_RX_PTYPE_L2_ECP_PAY2 = 7, + IAVF_RX_PTYPE_L2_EVB_PAY2 = 8, + IAVF_RX_PTYPE_L2_QCN_PAY2 = 9, + IAVF_RX_PTYPE_L2_EAPOL_PAY2 = 10, + IAVF_RX_PTYPE_L2_ARP = 11, + IAVF_RX_PTYPE_L2_FCOE_PAY3 = 12, + IAVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, + IAVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, + IAVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, + IAVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, + IAVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, + IAVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, + IAVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, + IAVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, + IAVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, + IAVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, + IAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, + IAVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, + IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 +}; + +struct iavf_rx_ptype_decoded { + u32 ptype:8; + u32 known:1; + u32 outer_ip:1; + u32 outer_ip_ver:1; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:4; + u32 payload_layer:3; +}; + +enum iavf_rx_ptype_outer_ip { + IAVF_RX_PTYPE_OUTER_L2 = 0, + IAVF_RX_PTYPE_OUTER_IP = 1 +}; + +enum iavf_rx_ptype_outer_ip_ver { + IAVF_RX_PTYPE_OUTER_NONE = 0, + IAVF_RX_PTYPE_OUTER_IPV4 = 0, + IAVF_RX_PTYPE_OUTER_IPV6 = 1 +}; + +enum iavf_rx_ptype_outer_fragmented { + IAVF_RX_PTYPE_NOT_FRAG = 0, + IAVF_RX_PTYPE_FRAG = 1 +}; + +enum iavf_rx_ptype_tunnel_type { + IAVF_RX_PTYPE_TUNNEL_NONE = 0, + IAVF_RX_PTYPE_TUNNEL_IP_IP = 1, + IAVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2, + IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, + IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, +}; + +enum iavf_rx_ptype_tunnel_end_prot { + IAVF_RX_PTYPE_TUNNEL_END_NONE = 0, + IAVF_RX_PTYPE_TUNNEL_END_IPV4 = 1, + IAVF_RX_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum iavf_rx_ptype_inner_prot { + IAVF_RX_PTYPE_INNER_PROT_NONE = 0, + IAVF_RX_PTYPE_INNER_PROT_UDP = 1, + IAVF_RX_PTYPE_INNER_PROT_TCP = 2, + IAVF_RX_PTYPE_INNER_PROT_SCTP = 3, + IAVF_RX_PTYPE_INNER_PROT_ICMP = 4, + IAVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5 +}; + +enum iavf_rx_ptype_payload_layer { + IAVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, + IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38 +#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ + IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) + +#define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT 52 +#define IAVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ + IAVF_RXD_QW1_LENGTH_HBUF_SHIFT) + +#define IAVF_RXD_QW1_LENGTH_SPH_SHIFT 63 +#define IAVF_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(IAVF_RXD_QW1_LENGTH_SPH_SHIFT) + +enum iavf_rx_desc_ext_status_bits { + /* Note: These are predefined bit offsets */ + IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, + IAVF_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, + IAVF_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ + IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ + IAVF_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, + IAVF_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, + IAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, +}; + +enum iavf_rx_desc_pe_status_bits { + /* Note: These are predefined bit offsets */ + IAVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ + IAVF_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ + IAVF_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ + IAVF_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, + IAVF_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, + IAVF_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, + IAVF_RX_DESC_PE_STATUS_URG_SHIFT = 27, + IAVF_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, + IAVF_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 +}; + +#define IAVF_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 +#define IAVF_RX_PROG_STATUS_DESC_LENGTH 0x2000000 + +#define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 +#define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ + IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) + +#define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 +#define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ + IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) + +enum iavf_rx_prog_status_desc_status_bits { + /* Note: These are predefined bit offsets */ + IAVF_RX_PROG_STATUS_DESC_DD_SHIFT = 0, + IAVF_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ +}; + +enum iavf_rx_prog_status_desc_prog_id_masks { + IAVF_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, + IAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, + IAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, +}; + +enum iavf_rx_prog_status_desc_error_bits { + /* Note: These are predefined bit offsets */ + IAVF_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, + IAVF_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, + IAVF_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, + IAVF_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 +}; + +/* TX Descriptor */ +struct iavf_tx_desc { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le64 cmd_type_offset_bsz; +}; + +#define IAVF_TXD_QW1_DTYPE_SHIFT 0 +#define IAVF_TXD_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT) + +enum iavf_tx_desc_dtype_value { + IAVF_TX_DESC_DTYPE_DATA = 0x0, + IAVF_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ + IAVF_TX_DESC_DTYPE_CONTEXT = 0x1, + IAVF_TX_DESC_DTYPE_FCOE_CTX = 0x2, + IAVF_TX_DESC_DTYPE_FILTER_PROG = 0x8, + IAVF_TX_DESC_DTYPE_DDP_CTX = 0x9, + IAVF_TX_DESC_DTYPE_FLEX_DATA = 0xB, + IAVF_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, + IAVF_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, + IAVF_TX_DESC_DTYPE_DESC_DONE = 0xF +}; + +#define IAVF_TXD_QW1_CMD_SHIFT 4 +#define IAVF_TXD_QW1_CMD_MASK (0x3FFUL << IAVF_TXD_QW1_CMD_SHIFT) + +enum iavf_tx_desc_cmd_bits { + IAVF_TX_DESC_CMD_EOP = 0x0001, + IAVF_TX_DESC_CMD_RS = 0x0002, + IAVF_TX_DESC_CMD_ICRC = 0x0004, + IAVF_TX_DESC_CMD_IL2TAG1 = 0x0008, + IAVF_TX_DESC_CMD_DUMMY = 0x0010, + IAVF_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ + IAVF_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ + IAVF_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ + IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ + IAVF_TX_DESC_CMD_FCOET = 0x0080, + IAVF_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ + IAVF_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ + IAVF_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ + IAVF_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ + IAVF_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ + IAVF_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ + IAVF_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ + IAVF_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ +}; + +#define IAVF_TXD_QW1_OFFSET_SHIFT 16 +#define IAVF_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ + IAVF_TXD_QW1_OFFSET_SHIFT) + +enum iavf_tx_desc_length_fields { + /* Note: These are predefined bit offsets */ + IAVF_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ +}; + +#define IAVF_TXD_QW1_TX_BUF_SZ_SHIFT 34 +#define IAVF_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ + IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) + +#define IAVF_TXD_QW1_L2TAG1_SHIFT 48 +#define IAVF_TXD_QW1_L2TAG1_MASK (0xFFFFULL << IAVF_TXD_QW1_L2TAG1_SHIFT) + +/* Context descriptors */ +struct iavf_tx_context_desc { + __le32 tunneling_params; + __le16 l2tag2; + __le16 rsvd; + __le64 type_cmd_tso_mss; +}; + +#define IAVF_TXD_CTX_QW1_CMD_SHIFT 4 +#define IAVF_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << IAVF_TXD_CTX_QW1_CMD_SHIFT) + +enum iavf_tx_ctx_desc_cmd_bits { + IAVF_TX_CTX_DESC_TSO = 0x01, + IAVF_TX_CTX_DESC_TSYN = 0x02, + IAVF_TX_CTX_DESC_IL2TAG2 = 0x04, + IAVF_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, + IAVF_TX_CTX_DESC_SWTCH_NOTAG = 0x00, + IAVF_TX_CTX_DESC_SWTCH_UPLINK = 0x10, + IAVF_TX_CTX_DESC_SWTCH_LOCAL = 0x20, + IAVF_TX_CTX_DESC_SWTCH_VSI = 0x30, + IAVF_TX_CTX_DESC_SWPE = 0x40 +}; + +/* Packet Classifier Types for filters */ +enum iavf_filter_pctype { + /* Note: Values 0-28 are reserved for future use. + * Value 29, 30, 32 are not supported on XL710 and X710. + */ + IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, + IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, + IAVF_FILTER_PCTYPE_NONF_IPV4_UDP = 31, + IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, + IAVF_FILTER_PCTYPE_NONF_IPV4_TCP = 33, + IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, + IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, + IAVF_FILTER_PCTYPE_FRAG_IPV4 = 36, + /* Note: Values 37-38 are reserved for future use. + * Value 39, 40, 42 are not supported on XL710 and X710. + */ + IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, + IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, + IAVF_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, + IAVF_FILTER_PCTYPE_NONF_IPV6_TCP = 43, + IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, + IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, + IAVF_FILTER_PCTYPE_FRAG_IPV6 = 46, + /* Note: Value 47 is reserved for future use */ + IAVF_FILTER_PCTYPE_FCOE_OX = 48, + IAVF_FILTER_PCTYPE_FCOE_RX = 49, + IAVF_FILTER_PCTYPE_FCOE_OTHER = 50, + /* Note: Values 51-62 are reserved for future use */ + IAVF_FILTER_PCTYPE_L2_PAYLOAD = 63, +}; + +#define IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT 30 +#define IAVF_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ + IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) + +#define IAVF_TXD_CTX_QW1_MSS_SHIFT 50 +#define IAVF_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ + IAVF_TXD_CTX_QW1_MSS_SHIFT) + +#define IAVF_TXD_CTX_QW1_VSI_SHIFT 50 +#define IAVF_TXD_CTX_QW1_VSI_MASK (0x1FFULL << IAVF_TXD_CTX_QW1_VSI_SHIFT) + +#define IAVF_TXD_CTX_QW0_EXT_IP_SHIFT 0 +#define IAVF_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ + IAVF_TXD_CTX_QW0_EXT_IP_SHIFT) + +enum iavf_tx_ctx_desc_eipt_offload { + IAVF_TX_CTX_EXT_IP_NONE = 0x0, + IAVF_TX_CTX_EXT_IP_IPV6 = 0x1, + IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, + IAVF_TX_CTX_EXT_IP_IPV4 = 0x3 +}; + +#define IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 +#define IAVF_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ + IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT) + +#define IAVF_TXD_CTX_QW0_NATT_SHIFT 9 +#define IAVF_TXD_CTX_QW0_NATT_MASK (0x3ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT) + +#define IAVF_TXD_CTX_UDP_TUNNELING BIT_ULL(IAVF_TXD_CTX_QW0_NATT_SHIFT) +#define IAVF_TXD_CTX_GRE_TUNNELING (0x2ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT) + +#define IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 +#define IAVF_TXD_CTX_QW0_EIP_NOINC_MASK \ + BIT_ULL(IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT) + +#define IAVF_TXD_CTX_EIP_NOINC_IPID_CONST IAVF_TXD_CTX_QW0_EIP_NOINC_MASK + +#define IAVF_TXD_CTX_QW0_NATLEN_SHIFT 12 +#define IAVF_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ + IAVF_TXD_CTX_QW0_NATLEN_SHIFT) + +#define IAVF_TXD_CTX_QW0_DECTTL_SHIFT 19 +#define IAVF_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ + IAVF_TXD_CTX_QW0_DECTTL_SHIFT) + +#define IAVF_TXD_CTX_QW0_L4T_CS_SHIFT 23 +#define IAVF_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(IAVF_TXD_CTX_QW0_L4T_CS_SHIFT) + +/* Statistics collected by each port, VSI, VEB, and S-channel */ +struct iavf_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ +}; +#endif /* _IAVF_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 565677de5ba3..e64751da0921 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1,16 +1,16 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include "i40evf.h" -#include "i40e_prototype.h" -#include "i40evf_client.h" +#include "iavf.h" +#include "iavf_prototype.h" +#include "iavf_client.h" /* busy wait delay in msec */ -#define I40EVF_BUSY_WAIT_DELAY 10 -#define I40EVF_BUSY_WAIT_COUNT 50 +#define IAVF_BUSY_WAIT_DELAY 10 +#define IAVF_BUSY_WAIT_COUNT 50 /** - * i40evf_send_pf_msg + * iavf_send_pf_msg * @adapter: adapter structure * @op: virtual channel opcode * @msg: pointer to message buffer @@ -18,44 +18,44 @@ * * Send message to PF and print status if failure. **/ -static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, - enum virtchnl_ops op, u8 *msg, u16 len) +static int iavf_send_pf_msg(struct iavf_adapter *adapter, + enum virtchnl_ops op, u8 *msg, u16 len) { - struct i40e_hw *hw = &adapter->hw; - i40e_status err; + struct iavf_hw *hw = &adapter->hw; + iavf_status err; - if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) return 0; /* nothing to see here, move along */ - err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); + err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); if (err) dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", - op, i40evf_stat_str(hw, err), - i40evf_aq_str(hw, hw->aq.asq_last_status)); + op, iavf_stat_str(hw, err), + iavf_aq_str(hw, hw->aq.asq_last_status)); return err; } /** - * i40evf_send_api_ver + * iavf_send_api_ver * @adapter: adapter structure * * Send API version admin queue message to the PF. The reply is not checked * in this function. Returns 0 if the message was successfully * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. **/ -int i40evf_send_api_ver(struct i40evf_adapter *adapter) +int iavf_send_api_ver(struct iavf_adapter *adapter) { struct virtchnl_version_info vvi; vvi.major = VIRTCHNL_VERSION_MAJOR; vvi.minor = VIRTCHNL_VERSION_MINOR; - return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, - sizeof(vvi)); + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, + sizeof(vvi)); } /** - * i40evf_verify_api_ver + * iavf_verify_api_ver * @adapter: adapter structure * * Compare API versions with the PF. Must be called after admin queue is @@ -63,15 +63,15 @@ int i40evf_send_api_ver(struct i40evf_adapter *adapter) * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors * from the firmware are propagated. **/ -int i40evf_verify_api_ver(struct i40evf_adapter *adapter) +int iavf_verify_api_ver(struct iavf_adapter *adapter) { struct virtchnl_version_info *pf_vvi; - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; struct i40e_arq_event_info event; enum virtchnl_ops op; - i40e_status err; + iavf_status err; - event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) { err = -ENOMEM; @@ -79,8 +79,8 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) } while (1) { - err = i40evf_clean_arq_element(hw, &event, NULL); - /* When the AQ is empty, i40evf_clean_arq_element will return + err = iavf_clean_arq_element(hw, &event, NULL); + /* When the AQ is empty, iavf_clean_arq_element will return * nonzero and this loop will terminate. */ if (err) @@ -92,7 +92,7 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) } - err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + err = (iavf_status)le32_to_cpu(event.desc.cookie_low); if (err) goto out_alloc; @@ -118,14 +118,14 @@ out: } /** - * i40evf_send_vf_config_msg + * iavf_send_vf_config_msg * @adapter: adapter structure * * Send VF configuration request admin queue message to the PF. The reply * is not checked in this function. Returns 0 if the message was * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. **/ -int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) +int iavf_send_vf_config_msg(struct iavf_adapter *adapter) { u32 caps; @@ -142,19 +142,43 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_ADQ; adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; - adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; + adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; if (PF_IS_V11(adapter)) - return i40evf_send_pf_msg(adapter, - VIRTCHNL_OP_GET_VF_RESOURCES, - (u8 *)&caps, sizeof(caps)); + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, + (u8 *)&caps, sizeof(caps)); else - return i40evf_send_pf_msg(adapter, - VIRTCHNL_OP_GET_VF_RESOURCES, - NULL, 0); + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, + NULL, 0); } /** - * i40evf_get_vf_config + * iavf_validate_num_queues + * @adapter: adapter structure + * + * Validate that the number of queues the PF has sent in + * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle. + **/ +static void iavf_validate_num_queues(struct iavf_adapter *adapter) +{ + if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) { + struct virtchnl_vsi_resource *vsi_res; + int i; + + dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n", + adapter->vf_res->num_queue_pairs, + IAVF_MAX_REQ_QUEUES); + dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n", + IAVF_MAX_REQ_QUEUES); + adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + vsi_res = &adapter->vf_res->vsi_res[i]; + vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; + } + } +} + +/** + * iavf_get_vf_config * @adapter: private adapter structure * * Get VF configuration from PF and populate hw structure. Must be called after @@ -162,16 +186,16 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) * with maximum timeout. Response from PF is returned in the buffer for further * processing by the caller. **/ -int i40evf_get_vf_config(struct i40evf_adapter *adapter) +int iavf_get_vf_config(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; struct i40e_arq_event_info event; enum virtchnl_ops op; - i40e_status err; + iavf_status err; u16 len; len = sizeof(struct virtchnl_vf_resource) + - I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); + IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); event.buf_len = len; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) { @@ -180,10 +204,10 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) } while (1) { - /* When the AQ is empty, i40evf_clean_arq_element will return + /* When the AQ is empty, iavf_clean_arq_element will return * nonzero and this loop will terminate. */ - err = i40evf_clean_arq_element(hw, &event, NULL); + err = iavf_clean_arq_element(hw, &event, NULL); if (err) goto out_alloc; op = @@ -192,10 +216,15 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) break; } - err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + err = (iavf_status)le32_to_cpu(event.desc.cookie_low); memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); - i40e_vf_parse_hw_config(hw, adapter->vf_res); + /* some PFs send more queues than we should have so validate that + * we aren't getting too many queues + */ + if (!err) + iavf_validate_num_queues(adapter); + iavf_vf_parse_hw_config(hw, adapter->vf_res); out_alloc: kfree(event.msg_buf); out: @@ -203,17 +232,17 @@ out: } /** - * i40evf_configure_queues + * iavf_configure_queues * @adapter: adapter structure * * Request that the PF set up our (previously allocated) queues. **/ -void i40evf_configure_queues(struct i40evf_adapter *adapter) +void iavf_configure_queues(struct iavf_adapter *adapter) { struct virtchnl_vsi_queue_config_info *vqci; struct virtchnl_queue_pair_info *vqpi; int pairs = adapter->num_active_queues; - int i, len, max_frame = I40E_MAX_RXBUFFER; + int i, len, max_frame = IAVF_MAX_RXBUFFER; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -229,9 +258,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) return; /* Limit maximum frame size when jumbo frames is not enabled */ - if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) && + if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) && (adapter->netdev->mtu <= ETH_DATA_LEN)) - max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; + max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; vqci->vsi_id = adapter->vsi_res->vsi_id; vqci->num_queue_pairs = pairs; @@ -251,23 +280,23 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) vqpi->rxq.max_pkt_size = max_frame; vqpi->rxq.databuffer_size = ALIGN(adapter->rx_rings[i].rx_buf_len, - BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); + BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT)); vqpi++; } - adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, - (u8 *)vqci, len); + adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, + (u8 *)vqci, len); kfree(vqci); } /** - * i40evf_enable_queues + * iavf_enable_queues * @adapter: adapter structure * * Request that the PF enable all of our queues. **/ -void i40evf_enable_queues(struct i40evf_adapter *adapter) +void iavf_enable_queues(struct iavf_adapter *adapter) { struct virtchnl_queue_select vqs; @@ -281,18 +310,18 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; - adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, - (u8 *)&vqs, sizeof(vqs)); + adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); } /** - * i40evf_disable_queues + * iavf_disable_queues * @adapter: adapter structure * * Request that the PF disable all of our queues. **/ -void i40evf_disable_queues(struct i40evf_adapter *adapter) +void iavf_disable_queues(struct iavf_adapter *adapter) { struct virtchnl_queue_select vqs; @@ -306,24 +335,24 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; - adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, - (u8 *)&vqs, sizeof(vqs)); + adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); } /** - * i40evf_map_queues + * iavf_map_queues * @adapter: adapter structure * * Request that the PF map queues to interrupt vectors. Misc causes, including * admin queue, are always mapped to vector 0. **/ -void i40evf_map_queues(struct i40evf_adapter *adapter) +void iavf_map_queues(struct iavf_adapter *adapter) { struct virtchnl_irq_map_info *vimi; struct virtchnl_vector_map *vecmap; int v_idx, q_vectors, len; - struct i40e_q_vector *q_vector; + struct iavf_q_vector *q_vector; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -352,8 +381,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) vecmap->vector_id = v_idx + NONQ_VECS; vecmap->txq_map = q_vector->ring_mask; vecmap->rxq_map = q_vector->ring_mask; - vecmap->rxitr_idx = I40E_RX_ITR; - vecmap->txitr_idx = I40E_TX_ITR; + vecmap->rxitr_idx = IAVF_RX_ITR; + vecmap->txitr_idx = IAVF_TX_ITR; } /* Misc vector last - this is only for AdminQ messages */ vecmap = &vimi->vecmap[v_idx]; @@ -362,21 +391,21 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) vecmap->txq_map = 0; vecmap->rxq_map = 0; - adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, - (u8 *)vimi, len); + adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, + (u8 *)vimi, len); kfree(vimi); } /** - * i40evf_request_queues + * iavf_request_queues * @adapter: adapter structure * @num: number of requested queues * * We get a default number of queues from the PF. This enables us to request a * different number. Returns 0 on success, negative on failure **/ -int i40evf_request_queues(struct i40evf_adapter *adapter, int num) +int iavf_request_queues(struct iavf_adapter *adapter, int num) { struct virtchnl_vf_res_request vfres; @@ -390,22 +419,22 @@ int i40evf_request_queues(struct i40evf_adapter *adapter, int num) vfres.num_queue_pairs = num; adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES; - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; - return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, - (u8 *)&vfres, sizeof(vfres)); + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, + (u8 *)&vfres, sizeof(vfres)); } /** - * i40evf_add_ether_addrs + * iavf_add_ether_addrs * @adapter: adapter structure * * Request that the PF add one or more addresses to our filters. **/ -void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) +void iavf_add_ether_addrs(struct iavf_adapter *adapter) { struct virtchnl_ether_addr_list *veal; int len, i = 0, count = 0; - struct i40evf_mac_filter *f; + struct iavf_mac_filter *f; bool more = false; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { @@ -422,7 +451,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) count++; } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } @@ -430,9 +459,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) len = sizeof(struct virtchnl_ether_addr_list) + (count * sizeof(struct virtchnl_ether_addr)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { + if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_ether_addr_list)) / sizeof(struct virtchnl_ether_addr); len = sizeof(struct virtchnl_ether_addr_list) + @@ -458,25 +487,24 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) } } if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, - (u8 *)veal, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); kfree(veal); } /** - * i40evf_del_ether_addrs + * iavf_del_ether_addrs * @adapter: adapter structure * * Request that the PF remove one or more addresses from our filters. **/ -void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) +void iavf_del_ether_addrs(struct iavf_adapter *adapter) { struct virtchnl_ether_addr_list *veal; - struct i40evf_mac_filter *f, *ftmp; + struct iavf_mac_filter *f, *ftmp; int len, i = 0, count = 0; bool more = false; @@ -494,7 +522,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) count++; } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } @@ -502,9 +530,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) len = sizeof(struct virtchnl_ether_addr_list) + (count * sizeof(struct virtchnl_ether_addr)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { + if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_ether_addr_list)) / sizeof(struct virtchnl_ether_addr); len = sizeof(struct virtchnl_ether_addr_list) + @@ -530,26 +558,25 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) } } if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, - (u8 *)veal, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len); kfree(veal); } /** - * i40evf_add_vlans + * iavf_add_vlans * @adapter: adapter structure * * Request that the PF add one or more VLAN filters to our VSI. **/ -void i40evf_add_vlans(struct i40evf_adapter *adapter) +void iavf_add_vlans(struct iavf_adapter *adapter) { struct virtchnl_vlan_filter_list *vvfl; int len, i = 0, count = 0; - struct i40evf_vlan_filter *f; + struct iavf_vlan_filter *f; bool more = false; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { @@ -566,7 +593,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) count++; } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } @@ -574,9 +601,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) len = sizeof(struct virtchnl_vlan_filter_list) + (count * sizeof(u16)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { + if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_vlan_filter_list)) / sizeof(u16); len = sizeof(struct virtchnl_vlan_filter_list) + @@ -601,24 +628,24 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) } } if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); kfree(vvfl); } /** - * i40evf_del_vlans + * iavf_del_vlans * @adapter: adapter structure * * Request that the PF remove one or more VLAN filters from our VSI. **/ -void i40evf_del_vlans(struct i40evf_adapter *adapter) +void iavf_del_vlans(struct iavf_adapter *adapter) { struct virtchnl_vlan_filter_list *vvfl; - struct i40evf_vlan_filter *f, *ftmp; + struct iavf_vlan_filter *f, *ftmp; int len, i = 0, count = 0; bool more = false; @@ -636,7 +663,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) count++; } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } @@ -644,9 +671,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) len = sizeof(struct virtchnl_vlan_filter_list) + (count * sizeof(u16)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { + if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_vlan_filter_list)) / sizeof(u16); len = sizeof(struct virtchnl_vlan_filter_list) + @@ -672,22 +699,22 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) } } if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); kfree(vvfl); } /** - * i40evf_set_promiscuous + * iavf_set_promiscuous * @adapter: adapter structure * @flags: bitmask to control unicast/multicast promiscuous. * * Request that the PF enable promiscuous mode for our VSI. **/ -void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) +void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) { struct virtchnl_promisc_info vpi; int promisc_all; @@ -702,39 +729,39 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) promisc_all = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC; if ((flags & promisc_all) == promisc_all) { - adapter->flags |= I40EVF_FLAG_PROMISC_ON; - adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC; + adapter->flags |= IAVF_FLAG_PROMISC_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC; dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); } if (flags & FLAG_VF_MULTICAST_PROMISC) { - adapter->flags |= I40EVF_FLAG_ALLMULTI_ON; - adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI; + adapter->flags |= IAVF_FLAG_ALLMULTI_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); } if (!flags) { - adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON | - I40EVF_FLAG_ALLMULTI_ON); - adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC | - I40EVF_FLAG_AQ_RELEASE_ALLMULTI); + adapter->flags &= ~(IAVF_FLAG_PROMISC_ON | + IAVF_FLAG_ALLMULTI_ON); + adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC | + IAVF_FLAG_AQ_RELEASE_ALLMULTI); dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); } adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; vpi.vsi_id = adapter->vsi_res->vsi_id; vpi.flags = flags; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, - (u8 *)&vpi, sizeof(vpi)); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + (u8 *)&vpi, sizeof(vpi)); } /** - * i40evf_request_stats + * iavf_request_stats * @adapter: adapter structure * * Request VSI statistics from PF. **/ -void i40evf_request_stats(struct i40evf_adapter *adapter) +void iavf_request_stats(struct iavf_adapter *adapter) { struct virtchnl_queue_select vqs; @@ -745,19 +772,19 @@ void i40evf_request_stats(struct i40evf_adapter *adapter) adapter->current_op = VIRTCHNL_OP_GET_STATS; vqs.vsi_id = adapter->vsi_res->vsi_id; /* queue maps are ignored for this message - only the vsi is used */ - if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, - (u8 *)&vqs, sizeof(vqs))) + if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, + sizeof(vqs))) /* if the request failed, don't lock out others */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; } /** - * i40evf_get_hena + * iavf_get_hena * @adapter: adapter structure * * Request hash enable capabilities from PF **/ -void i40evf_get_hena(struct i40evf_adapter *adapter) +void iavf_get_hena(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -766,18 +793,17 @@ void i40evf_get_hena(struct i40evf_adapter *adapter) return; } adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS; - adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, - NULL, 0); + adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0); } /** - * i40evf_set_hena + * iavf_set_hena * @adapter: adapter structure * * Request the PF to set our RSS hash capabilities **/ -void i40evf_set_hena(struct i40evf_adapter *adapter) +void iavf_set_hena(struct iavf_adapter *adapter) { struct virtchnl_rss_hena vrh; @@ -789,18 +815,18 @@ void i40evf_set_hena(struct i40evf_adapter *adapter) } vrh.hena = adapter->hena; adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA; - adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, - (u8 *)&vrh, sizeof(vrh)); + adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh, + sizeof(vrh)); } /** - * i40evf_set_rss_key + * iavf_set_rss_key * @adapter: adapter structure * * Request the PF to set our RSS hash key **/ -void i40evf_set_rss_key(struct i40evf_adapter *adapter) +void iavf_set_rss_key(struct iavf_adapter *adapter) { struct virtchnl_rss_key *vrk; int len; @@ -821,19 +847,18 @@ void i40evf_set_rss_key(struct i40evf_adapter *adapter) memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY; - adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, - (u8 *)vrk, len); + adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len); kfree(vrk); } /** - * i40evf_set_rss_lut + * iavf_set_rss_lut * @adapter: adapter structure * * Request the PF to set our RSS lookup table **/ -void i40evf_set_rss_lut(struct i40evf_adapter *adapter) +void iavf_set_rss_lut(struct iavf_adapter *adapter) { struct virtchnl_rss_lut *vrl; int len; @@ -853,19 +878,18 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter) vrl->lut_entries = adapter->rss_lut_size; memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT; - adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, - (u8 *)vrl, len); + adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len); kfree(vrl); } /** - * i40evf_enable_vlan_stripping + * iavf_enable_vlan_stripping * @adapter: adapter structure * * Request VLAN header stripping to be enabled **/ -void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter) +void iavf_enable_vlan_stripping(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -874,18 +898,17 @@ void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter) return; } adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; - adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, - NULL, 0); + adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0); } /** - * i40evf_disable_vlan_stripping + * iavf_disable_vlan_stripping * @adapter: adapter structure * * Request VLAN header stripping to be disabled **/ -void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter) +void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -894,18 +917,17 @@ void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter) return; } adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; - adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, - NULL, 0); + adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); } /** - * i40evf_print_link_message - print link up or down + * iavf_print_link_message - print link up or down * @adapter: adapter structure * * Log a message telling the world of our wonderous link status */ -static void i40evf_print_link_message(struct i40evf_adapter *adapter) +static void iavf_print_link_message(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; char *speed = "Unknown "; @@ -942,13 +964,13 @@ static void i40evf_print_link_message(struct i40evf_adapter *adapter) } /** - * i40evf_enable_channel + * iavf_enable_channel * @adapter: adapter structure * * Request that the PF enable channels as specified by * the user via tc tool. **/ -void i40evf_enable_channels(struct i40evf_adapter *adapter) +void iavf_enable_channels(struct iavf_adapter *adapter) { struct virtchnl_tc_info *vti = NULL; u16 len; @@ -976,22 +998,21 @@ void i40evf_enable_channels(struct i40evf_adapter *adapter) adapter->ch_config.ch_info[i].max_tx_rate; } - adapter->ch_config.state = __I40EVF_TC_RUNNING; - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __IAVF_TC_RUNNING; + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS; - adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_CHANNELS; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, - (u8 *)vti, len); + adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len); kfree(vti); } /** - * i40evf_disable_channel + * iavf_disable_channel * @adapter: adapter structure * * Request that the PF disable channels that are configured **/ -void i40evf_disable_channels(struct i40evf_adapter *adapter) +void iavf_disable_channels(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -1000,23 +1021,22 @@ void i40evf_disable_channels(struct i40evf_adapter *adapter) return; } - adapter->ch_config.state = __I40EVF_TC_INVALID; - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __IAVF_TC_INVALID; + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS; - adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_CHANNELS; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, - NULL, 0); + adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0); } /** - * i40evf_print_cloud_filter + * iavf_print_cloud_filter * @adapter: adapter structure * @f: cloud filter to print * * Print the cloud filter **/ -static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter, - struct virtchnl_filter *f) +static void iavf_print_cloud_filter(struct iavf_adapter *adapter, + struct virtchnl_filter *f) { switch (f->flow_type) { case VIRTCHNL_TCP_V4_FLOW: @@ -1043,15 +1063,15 @@ static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter, } /** - * i40evf_add_cloud_filter + * iavf_add_cloud_filter * @adapter: adapter structure * * Request that the PF add cloud filters as specified * by the user via tc tool. **/ -void i40evf_add_cloud_filter(struct i40evf_adapter *adapter) +void iavf_add_cloud_filter(struct iavf_adapter *adapter) { - struct i40evf_cloud_filter *cf; + struct iavf_cloud_filter *cf; struct virtchnl_filter *f; int len = 0, count = 0; @@ -1068,7 +1088,7 @@ void i40evf_add_cloud_filter(struct i40evf_adapter *adapter) } } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER; return; } adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER; @@ -1082,25 +1102,24 @@ void i40evf_add_cloud_filter(struct i40evf_adapter *adapter) if (cf->add) { memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); cf->add = false; - cf->state = __I40EVF_CF_ADD_PENDING; - i40evf_send_pf_msg(adapter, - VIRTCHNL_OP_ADD_CLOUD_FILTER, - (u8 *)f, len); + cf->state = __IAVF_CF_ADD_PENDING; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER, + (u8 *)f, len); } } kfree(f); } /** - * i40evf_del_cloud_filter + * iavf_del_cloud_filter * @adapter: adapter structure * * Request that the PF delete cloud filters as specified * by the user via tc tool. **/ -void i40evf_del_cloud_filter(struct i40evf_adapter *adapter) +void iavf_del_cloud_filter(struct iavf_adapter *adapter) { - struct i40evf_cloud_filter *cf, *cftmp; + struct iavf_cloud_filter *cf, *cftmp; struct virtchnl_filter *f; int len = 0, count = 0; @@ -1117,7 +1136,7 @@ void i40evf_del_cloud_filter(struct i40evf_adapter *adapter) } } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER; return; } adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER; @@ -1131,30 +1150,29 @@ void i40evf_del_cloud_filter(struct i40evf_adapter *adapter) if (cf->del) { memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); cf->del = false; - cf->state = __I40EVF_CF_DEL_PENDING; - i40evf_send_pf_msg(adapter, - VIRTCHNL_OP_DEL_CLOUD_FILTER, - (u8 *)f, len); + cf->state = __IAVF_CF_DEL_PENDING; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER, + (u8 *)f, len); } } kfree(f); } /** - * i40evf_request_reset + * iavf_request_reset * @adapter: adapter structure * * Request that the PF reset this VF. No response is expected. **/ -void i40evf_request_reset(struct i40evf_adapter *adapter) +void iavf_request_reset(struct iavf_adapter *adapter) { /* Don't check CURRENT_OP - this is always higher priority */ - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); adapter->current_op = VIRTCHNL_OP_UNKNOWN; } /** - * i40evf_virtchnl_completion + * iavf_virtchnl_completion * @adapter: adapter structure * @v_opcode: opcode sent by PF * @v_retval: retval sent by PF @@ -1165,10 +1183,9 @@ void i40evf_request_reset(struct i40evf_adapter *adapter) * wait, we fire off our requests and assume that no errors will be returned. * This function handles the reply messages. **/ -void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, - enum virtchnl_ops v_opcode, - i40e_status v_retval, - u8 *msg, u16 msglen) +void iavf_virtchnl_completion(struct iavf_adapter *adapter, + enum virtchnl_ops v_opcode, iavf_status v_retval, + u8 *msg, u16 msglen) { struct net_device *netdev = adapter->netdev; @@ -1176,6 +1193,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; bool link_up = vpe->event_data.link_event.link_status; + switch (vpe->event) { case VIRTCHNL_EVENT_LINK_CHANGE: adapter->link_speed = @@ -1193,7 +1211,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, * after we enable queues and actually prepared * to send traffic. */ - if (adapter->state != __I40EVF_RUNNING) + if (adapter->state != __IAVF_RUNNING) break; /* For ADq enabled VF, we reconfigure VSIs and @@ -1201,7 +1219,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, * queues are enabled. */ if (adapter->flags & - I40EVF_FLAG_QUEUES_DISABLED) + IAVF_FLAG_QUEUES_DISABLED) break; } @@ -1213,12 +1231,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); } - i40evf_print_link_message(adapter); + iavf_print_link_message(adapter); break; case VIRTCHNL_EVENT_RESET_IMPENDING: dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n"); - if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { - adapter->flags |= I40EVF_FLAG_RESET_PENDING; + if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { + adapter->flags |= IAVF_FLAG_RESET_PENDING; dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); schedule_work(&adapter->reset_task); } @@ -1234,48 +1252,48 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, switch (v_opcode) { case VIRTCHNL_OP_ADD_VLAN: dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); + iavf_stat_str(&adapter->hw, v_retval)); break; case VIRTCHNL_OP_ADD_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); + iavf_stat_str(&adapter->hw, v_retval)); break; case VIRTCHNL_OP_DEL_VLAN: dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); + iavf_stat_str(&adapter->hw, v_retval)); break; case VIRTCHNL_OP_DEL_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); + iavf_stat_str(&adapter->hw, v_retval)); break; case VIRTCHNL_OP_ENABLE_CHANNELS: dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; - adapter->ch_config.state = __I40EVF_TC_INVALID; + iavf_stat_str(&adapter->hw, v_retval)); + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __IAVF_TC_INVALID; netdev_reset_tc(netdev); netif_tx_start_all_queues(netdev); break; case VIRTCHNL_OP_DISABLE_CHANNELS: dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; - adapter->ch_config.state = __I40EVF_TC_RUNNING; + iavf_stat_str(&adapter->hw, v_retval)); + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __IAVF_TC_RUNNING; netif_tx_start_all_queues(netdev); break; case VIRTCHNL_OP_ADD_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf, *cftmp; + struct iavf_cloud_filter *cf, *cftmp; list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { - if (cf->state == __I40EVF_CF_ADD_PENDING) { - cf->state = __I40EVF_CF_INVALID; + if (cf->state == __IAVF_CF_ADD_PENDING) { + cf->state = __IAVF_CF_INVALID; dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n", - i40evf_stat_str(&adapter->hw, - v_retval)); - i40evf_print_cloud_filter(adapter, - &cf->f); + iavf_stat_str(&adapter->hw, + v_retval)); + iavf_print_cloud_filter(adapter, + &cf->f); list_del(&cf->list); kfree(cf); adapter->num_cloud_filters--; @@ -1284,32 +1302,31 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, } break; case VIRTCHNL_OP_DEL_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf; + struct iavf_cloud_filter *cf; list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - if (cf->state == __I40EVF_CF_DEL_PENDING) { - cf->state = __I40EVF_CF_ACTIVE; + if (cf->state == __IAVF_CF_DEL_PENDING) { + cf->state = __IAVF_CF_ACTIVE; dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n", - i40evf_stat_str(&adapter->hw, - v_retval)); - i40evf_print_cloud_filter(adapter, - &cf->f); + iavf_stat_str(&adapter->hw, + v_retval)); + iavf_print_cloud_filter(adapter, + &cf->f); } } } break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", - v_retval, - i40evf_stat_str(&adapter->hw, v_retval), + v_retval, iavf_stat_str(&adapter->hw, v_retval), v_opcode); } } switch (v_opcode) { case VIRTCHNL_OP_GET_STATS: { - struct i40e_eth_stats *stats = - (struct i40e_eth_stats *)msg; + struct iavf_eth_stats *stats = + (struct iavf_eth_stats *)msg; netdev->stats.rx_packets = stats->rx_unicast + stats->rx_multicast + stats->rx_broadcast; @@ -1326,25 +1343,33 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, break; case VIRTCHNL_OP_GET_VF_RESOURCES: { u16 len = sizeof(struct virtchnl_vf_resource) + - I40E_MAX_VF_VSI * + IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); memcpy(adapter->vf_res, msg, min(msglen, len)); - i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); - /* restore current mac address */ - ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); - i40evf_process_config(adapter); + iavf_validate_num_queues(adapter); + iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); + if (is_zero_ether_addr(adapter->hw.mac.addr)) { + /* restore current mac address */ + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + /* refresh current mac address if changed */ + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, + adapter->hw.mac.addr); + } + iavf_process_config(adapter); } break; case VIRTCHNL_OP_ENABLE_QUEUES: /* enable transmits */ - i40evf_irq_enable(adapter, true); - adapter->flags &= ~I40EVF_FLAG_QUEUES_DISABLED; + iavf_irq_enable(adapter, true); + adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED; break; case VIRTCHNL_OP_DISABLE_QUEUES: - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); - if (adapter->state == __I40EVF_DOWN_PENDING) { - adapter->state = __I40EVF_DOWN; + iavf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); + if (adapter->state == __IAVF_DOWN_PENDING) { + adapter->state = __IAVF_DOWN; wake_up(&adapter->down_waitqueue); } break; @@ -1363,8 +1388,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, * care about that. */ if (msglen && CLIENT_ENABLED(adapter)) - i40evf_notify_client_message(&adapter->vsi, - msg, msglen); + iavf_notify_client_message(&adapter->vsi, msg, msglen); break; case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: @@ -1373,6 +1397,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; + if (msglen == sizeof(*vrh)) adapter->hena = vrh->hena; else @@ -1383,32 +1408,33 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, case VIRTCHNL_OP_REQUEST_QUEUES: { struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; + if (vfres->num_queue_pairs != adapter->num_req_queues) { dev_info(&adapter->pdev->dev, "Requested %d queues, PF can support %d\n", adapter->num_req_queues, vfres->num_queue_pairs); adapter->num_req_queues = 0; - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; } } break; case VIRTCHNL_OP_ADD_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf; + struct iavf_cloud_filter *cf; list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - if (cf->state == __I40EVF_CF_ADD_PENDING) - cf->state = __I40EVF_CF_ACTIVE; + if (cf->state == __IAVF_CF_ADD_PENDING) + cf->state = __IAVF_CF_ACTIVE; } } break; case VIRTCHNL_OP_DEL_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf, *cftmp; + struct iavf_cloud_filter *cf, *cftmp; list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { - if (cf->state == __I40EVF_CF_DEL_PENDING) { - cf->state = __I40EVF_CF_INVALID; + if (cf->state == __IAVF_CF_DEL_PENDING) { + cf->state = __IAVF_CF_INVALID; list_del(&cf->list); kfree(cf); adapter->num_cloud_filters--; diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 4058673fd853..e5d6f684437e 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -13,5 +13,7 @@ ice-y := ice_main.o \ ice_nvm.o \ ice_switch.o \ ice_sched.o \ + ice_lib.o \ ice_txrx.o \ ice_ethtool.o +ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 868f4a1d0f72..4c4b5717a627 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -28,6 +28,7 @@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/if_bridge.h> +#include <linux/avf/virtchnl.h> #include <net/ipv6.h> #include "ice_devids.h" #include "ice_type.h" @@ -35,17 +36,20 @@ #include "ice_switch.h" #include "ice_common.h" #include "ice_sched.h" +#include "ice_virtchnl_pf.h" +#include "ice_sriov.h" extern const char ice_drv_ver[]; #define ICE_BAR0 0 #define ICE_DFLT_NUM_DESC 128 -#define ICE_MIN_NUM_DESC 8 -#define ICE_MAX_NUM_DESC 8160 #define ICE_REQ_DESC_MULTIPLE 32 +#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE +#define ICE_MAX_NUM_DESC 8160 #define ICE_DFLT_TRAFFIC_CLASS BIT(0) #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define ICE_ETHTOOL_FWVER_LEN 32 #define ICE_AQ_LEN 64 +#define ICE_MBXQ_LEN 64 #define ICE_MIN_MSIX 2 #define ICE_NO_VSI 0xffff #define ICE_MAX_VSI_ALLOC 130 @@ -62,6 +66,15 @@ extern const char ice_drv_ver[]; #define ICE_RES_VALID_BIT 0x8000 #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) #define ICE_INVAL_Q_INDEX 0xffff +#define ICE_INVAL_VFID 256 +#define ICE_MAX_VF_COUNT 256 +#define ICE_MAX_QS_PER_VF 256 +#define ICE_MIN_QS_PER_VF 1 +#define ICE_DFLT_QS_PER_VF 4 +#define ICE_MAX_BASE_QS_PER_VF 16 +#define ICE_MAX_INTR_PER_VF 65 +#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) +#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) @@ -122,7 +135,8 @@ struct ice_sw { enum ice_state { __ICE_DOWN, __ICE_NEEDS_RESTART, - __ICE_RESET_RECOVERY_PENDING, /* set by driver when reset starts */ + __ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ + __ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ __ICE_PFR_REQ, /* set by driver and peers */ __ICE_CORER_REQ, /* set by driver and peers */ __ICE_GLOBR_REQ, /* set by driver and peers */ @@ -131,10 +145,24 @@ enum ice_state { __ICE_EMPR_RECV, /* set by OICR handler */ __ICE_SUSPENDED, /* set on module remove path */ __ICE_RESET_FAILED, /* set by reset/rebuild */ + /* When checking for the PF to be in a nominal operating state, the + * bits that are grouped at the beginning of the list need to be + * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will + * be checked. If you need to add a bit into consideration for nominal + * operating state, it must be added before + * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position + * without appropriate consideration. + */ + __ICE_STATE_NOMINAL_CHECK_BITS, __ICE_ADMINQ_EVENT_PENDING, + __ICE_MAILBOXQ_EVENT_PENDING, + __ICE_MDD_EVENT_PENDING, + __ICE_VFLR_EVENT_PENDING, __ICE_FLTR_OVERFLOW_PROMISC, + __ICE_VF_DIS, __ICE_CFG_BUSY, __ICE_SERVICE_SCHED, + __ICE_SERVICE_DIS, __ICE_STATE_NBITS /* must be last */ }; @@ -168,7 +196,8 @@ struct ice_vsi { u32 rx_buf_failed; u32 rx_page_failed; int num_q_vectors; - int base_vector; + int sw_base_vector; /* Irq base for OS reserved vectors */ + int hw_base_vector; /* HW (absolute) index of a vector */ enum ice_vsi_type type; u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ @@ -176,6 +205,8 @@ struct ice_vsi { /* Interrupt thresholds */ u16 work_lmt; + s16 vf_id; /* VF ID for SR-IOV VSIs */ + /* RSS config */ u16 rss_table_size; /* HW RSS table size */ u16 rss_size; /* Allocated RSS queues */ @@ -225,21 +256,39 @@ struct ice_q_vector { u8 num_ring_tx; /* total number of tx rings in vector */ u8 num_ring_rx; /* total number of rx rings in vector */ char name[ICE_INT_NAME_STR_LEN]; + /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this + * value to the device + */ + u8 intrl; } ____cacheline_internodealigned_in_smp; enum ice_pf_flags { ICE_FLAG_MSIX_ENA, ICE_FLAG_FLTR_SYNC, ICE_FLAG_RSS_ENA, + ICE_FLAG_SRIOV_ENA, + ICE_FLAG_SRIOV_CAPABLE, ICE_PF_FLAGS_NBITS /* must be last */ }; struct ice_pf { struct pci_dev *pdev; + + /* OS reserved IRQ details */ struct msix_entry *msix_entries; - struct ice_res_tracker *irq_tracker; + struct ice_res_tracker *sw_irq_tracker; + + /* HW reserved Interrupts for this PF */ + struct ice_res_tracker *hw_irq_tracker; + struct ice_vsi **vsi; /* VSIs created by the driver */ struct ice_sw *first_sw; /* first switch created by firmware */ + /* Virtchnl/SR-IOV config info */ + struct ice_vf *vf; + int num_alloc_vfs; /* actual number of VFs allocated */ + u16 num_vfs_supported; /* num VFs supported for this PF */ + u16 num_vf_qps; /* num queue pairs per VF */ + u16 num_vf_msix; /* num vectors per VF */ DECLARE_BITMAP(state, __ICE_STATE_NBITS); DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS); DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS); @@ -252,9 +301,11 @@ struct ice_pf { struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ u32 msg_enable; u32 hw_csum_rx_error; - u32 oicr_idx; /* Other interrupt cause vector index */ + u32 sw_oicr_idx; /* Other interrupt cause SW vector index */ + u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ + u32 hw_oicr_idx; /* Other interrupt cause vector HW index */ + u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */ u32 num_lan_msix; /* Total MSIX vectors for base driver */ - u32 num_avail_msix; /* remaining MSIX vectors left unclaimed */ u16 num_lan_tx; /* num lan tx queues setup */ u16 num_lan_rx; /* num lan rx queues setup */ u16 q_left_tx; /* remaining num tx queues left unclaimed */ @@ -270,6 +321,9 @@ struct ice_pf { struct ice_hw_port_stats stats_prev; struct ice_hw hw; u8 stat_prev_loaded; /* has previous stats been loaded */ + u32 tx_timeout_count; + unsigned long tx_timeout_last_recovery; + u32 tx_timeout_recovery_level; char int_name[ICE_INT_NAME_STR_LEN]; }; @@ -286,8 +340,8 @@ struct ice_netdev_priv { static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, struct ice_q_vector *q_vector) { - u32 vector = (vsi && q_vector) ? vsi->base_vector + q_vector->v_idx : - ((struct ice_pf *)hw->back)->oicr_idx; + u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx : + ((struct ice_pf *)hw->back)->hw_oicr_idx; int itr = ICE_ITR_NONE; u32 val; diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index a0614f472658..6653555f55dd 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -87,6 +87,8 @@ struct ice_aqc_list_caps { /* Device/Function buffer entry, repeated per reported capability */ struct ice_aqc_list_caps_elem { __le16 cap; +#define ICE_AQC_CAPS_SRIOV 0x0012 +#define ICE_AQC_CAPS_VF 0x0013 #define ICE_AQC_CAPS_VSI 0x0017 #define ICE_AQC_CAPS_RSS 0x0040 #define ICE_AQC_CAPS_RXQS 0x0041 @@ -443,6 +445,8 @@ struct ice_aqc_vsi_props { u8 reserved[24]; }; +#define ICE_MAX_NUM_RECIPES 64 + /* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3) */ struct ice_aqc_sw_rules { @@ -734,6 +738,10 @@ struct ice_aqc_add_elem { struct ice_aqc_txsched_elem_data generic[1]; }; +struct ice_aqc_get_elem { + struct ice_aqc_txsched_elem_data generic[1]; +}; + struct ice_aqc_get_topo_elem { struct ice_aqc_txsched_topo_grp_info_hdr hdr; struct ice_aqc_txsched_elem_data @@ -771,9 +779,8 @@ struct ice_aqc_layer_props { u8 chunk_size; __le16 max_device_nodes; __le16 max_pf_nodes; - u8 rsvd0[2]; - __le16 max_shared_rate_lmtr; - __le16 max_children; + u8 rsvd0[4]; + __le16 max_sibl_grp_sz; __le16 max_cir_rl_profiles; __le16 max_eir_rl_profiles; __le16 max_srl_profiles; @@ -919,9 +926,11 @@ struct ice_aqc_set_phy_cfg_data { u8 caps; #define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0) #define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1) -#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2) -#define ICE_AQ_PHY_ENA_LINK BIT(3) -#define ICE_AQ_PHY_ENA_ATOMIC_LINK BIT(5) +#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2) +#define ICE_AQ_PHY_ENA_LINK BIT(3) +#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5) +#define ICE_AQ_PHY_ENA_LESM BIT(6) +#define ICE_AQ_PHY_ENA_AUTO_FEC BIT(7) u8 low_power_ctrl; __le16 eee_cap; /* Value from ice_aqc_get_phy_caps */ __le16 eeer_value; @@ -1068,6 +1077,19 @@ struct ice_aqc_nvm { __le32 addr_low; }; +/** + * Send to PF command (indirect 0x0801) id is only used by PF + * + * Send to VF command (indirect 0x0802) id is only used by PF + * + */ +struct ice_aqc_pf_vf_msg { + __le32 id; + u32 reserved; + __le32 addr_high; + __le32 addr_low; +}; + /* Get/Set RSS key (indirect 0x0B04/0x0B02) */ struct ice_aqc_get_set_rss_key { #define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15) @@ -1203,6 +1225,84 @@ struct ice_aqc_dis_txq { struct ice_aqc_dis_txq_item qgrps[1]; }; +/* Configure Firmware Logging Command (indirect 0xFF09) + * Logging Information Read Response (indirect 0xFF10) + * Note: The 0xFF10 command has no input parameters. + */ +struct ice_aqc_fw_logging { + u8 log_ctrl; +#define ICE_AQC_FW_LOG_AQ_EN BIT(0) +#define ICE_AQC_FW_LOG_UART_EN BIT(1) + u8 rsvd0; + u8 log_ctrl_valid; /* Not used by 0xFF10 Response */ +#define ICE_AQC_FW_LOG_AQ_VALID BIT(0) +#define ICE_AQC_FW_LOG_UART_VALID BIT(1) + u8 rsvd1[5]; + __le32 addr_high; + __le32 addr_low; +}; + +enum ice_aqc_fw_logging_mod { + ICE_AQC_FW_LOG_ID_GENERAL = 0, + ICE_AQC_FW_LOG_ID_CTRL, + ICE_AQC_FW_LOG_ID_LINK, + ICE_AQC_FW_LOG_ID_LINK_TOPO, + ICE_AQC_FW_LOG_ID_DNL, + ICE_AQC_FW_LOG_ID_I2C, + ICE_AQC_FW_LOG_ID_SDP, + ICE_AQC_FW_LOG_ID_MDIO, + ICE_AQC_FW_LOG_ID_ADMINQ, + ICE_AQC_FW_LOG_ID_HDMA, + ICE_AQC_FW_LOG_ID_LLDP, + ICE_AQC_FW_LOG_ID_DCBX, + ICE_AQC_FW_LOG_ID_DCB, + ICE_AQC_FW_LOG_ID_NETPROXY, + ICE_AQC_FW_LOG_ID_NVM, + ICE_AQC_FW_LOG_ID_AUTH, + ICE_AQC_FW_LOG_ID_VPD, + ICE_AQC_FW_LOG_ID_IOSF, + ICE_AQC_FW_LOG_ID_PARSER, + ICE_AQC_FW_LOG_ID_SW, + ICE_AQC_FW_LOG_ID_SCHEDULER, + ICE_AQC_FW_LOG_ID_TXQ, + ICE_AQC_FW_LOG_ID_RSVD, + ICE_AQC_FW_LOG_ID_POST, + ICE_AQC_FW_LOG_ID_WATCHDOG, + ICE_AQC_FW_LOG_ID_TASK_DISPATCH, + ICE_AQC_FW_LOG_ID_MNG, + ICE_AQC_FW_LOG_ID_MAX, +}; + +/* This is the buffer for both of the logging commands. + * The entry array size depends on the datalen parameter in the descriptor. + * There will be a total of datalen / 2 entries. + */ +struct ice_aqc_fw_logging_data { + __le16 entry[1]; +#define ICE_AQC_FW_LOG_ID_S 0 +#define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S) + +#define ICE_AQC_FW_LOG_CONF_SUCCESS 0 /* Used by response */ +#define ICE_AQC_FW_LOG_CONF_BAD_INDX BIT(12) /* Used by response */ + +#define ICE_AQC_FW_LOG_EN_S 12 +#define ICE_AQC_FW_LOG_EN_M (0xF << ICE_AQC_FW_LOG_EN_S) +#define ICE_AQC_FW_LOG_INFO_EN BIT(12) /* Used by command */ +#define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */ +#define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */ +#define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */ +}; + +/* Get/Clear FW Log (indirect 0xFF11) */ +struct ice_aqc_get_clear_fw_log { + u8 flags; +#define ICE_AQC_FW_LOG_CLEAR BIT(0) +#define ICE_AQC_FW_LOG_MORE_DATA_AVAIL BIT(1) + u8 rsvd1[7]; + __le32 addr_high; + __le32 addr_low; +}; + /** * struct ice_aq_desc - Admin Queue (AQ) descriptor * @flags: ICE_AQ_FLAG_* flags @@ -1247,11 +1347,15 @@ struct ice_aq_desc { struct ice_aqc_query_txsched_res query_sched_res; struct ice_aqc_add_move_delete_elem add_move_delete_elem; struct ice_aqc_nvm nvm; + struct ice_aqc_pf_vf_msg virt; struct ice_aqc_get_set_rss_lut get_set_rss_lut; struct ice_aqc_get_set_rss_key get_set_rss_key; struct ice_aqc_add_txqs add_txqs; struct ice_aqc_dis_txqs dis_txqs; struct ice_aqc_add_get_update_free_vsi vsi_cmd; + struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; + struct ice_aqc_fw_logging fw_logging; + struct ice_aqc_get_clear_fw_log get_clear_fw_log; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; struct ice_aqc_set_event_mask set_event_mask; struct ice_aqc_get_link_status get_link_status; @@ -1325,6 +1429,7 @@ enum ice_adminq_opc { /* transmit scheduler commands */ ice_aqc_opc_get_dflt_topo = 0x0400, ice_aqc_opc_add_sched_elems = 0x0401, + ice_aqc_opc_get_sched_elems = 0x0404, ice_aqc_opc_suspend_sched_elems = 0x0409, ice_aqc_opc_resume_sched_elems = 0x040A, ice_aqc_opc_delete_sched_elems = 0x040F, @@ -1340,6 +1445,10 @@ enum ice_adminq_opc { /* NVM commands */ ice_aqc_opc_nvm_read = 0x0701, + /* PF/VF mailbox commands */ + ice_mbx_opc_send_msg_to_pf = 0x0801, + ice_mbx_opc_send_msg_to_vf = 0x0802, + /* RSS commands */ ice_aqc_opc_set_rss_key = 0x0B02, ice_aqc_opc_set_rss_lut = 0x0B03, @@ -1349,6 +1458,9 @@ enum ice_adminq_opc { /* TX queue handling commands/events */ ice_aqc_opc_add_txqs = 0x0C30, ice_aqc_opc_dis_txqs = 0x0C31, + + /* debug commands */ + ice_aqc_opc_fw_logging = 0xFF09, }; #endif /* _ICE_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 661beea6af79..c52f450f2c0d 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -7,16 +7,16 @@ #define ICE_PF_RESET_WAIT_COUNT 200 -#define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \ - wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \ +#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \ + wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \ ((ICE_RX_OPC_MDID << \ GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \ GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \ (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \ GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M)) -#define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \ - wr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \ +#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \ + wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \ (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \ GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \ (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \ @@ -125,7 +125,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, * * Returns the various PHY capabilities supported on the Port (0x0600) */ -static enum ice_status +enum ice_status ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *pcaps, struct ice_sq_cd *cd) @@ -290,30 +290,85 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, } /** - * ice_init_flex_parser - initialize rx flex parser + * ice_init_flex_flags * @hw: pointer to the hardware structure + * @prof_id: Rx Descriptor Builder profile ID * - * Function to initialize flex descriptors + * Function to initialize Rx flex flags */ -static void ice_init_flex_parser(struct ice_hw *hw) +static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id) { u8 idx = 0; - ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0); - ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1); - ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2); - ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3); - ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE, - ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++); - ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST, - ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); - ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, - ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100, - idx++); - ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN, - ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++); - ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, - ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); + /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout: + * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE + * flexiflags1[3:0] - Not used for flag programming + * flexiflags2[7:0] - Tunnel and VLAN types + * 2 invalid fields in last index + */ + switch (prof_id) { + /* Rx flex flags are currently programmed for the NIC profiles only. + * Different flag bit programming configurations can be added per + * profile as needed. + */ + case ICE_RXDID_FLEX_NIC: + case ICE_RXDID_FLEX_NIC_2: + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG, + ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI, + ICE_RXFLG_FIN, idx++); + /* flex flag 1 is not used for flexi-flag programming, skipping + * these four FLG64 bits. + */ + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST, + ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI, + ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100, + ICE_RXFLG_EVLAN_x9100, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100, + ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC, + ICE_RXFLG_TNL0, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, + ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); + break; + + default: + ice_debug(hw, ICE_DBG_INIT, + "Flag programming for profile ID %d not supported\n", + prof_id); + } +} + +/** + * ice_init_flex_flds + * @hw: pointer to the hardware structure + * @prof_id: Rx Descriptor Builder profile ID + * + * Function to initialize flex descriptors + */ +static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id) +{ + enum ice_flex_rx_mdid mdid; + + switch (prof_id) { + case ICE_RXDID_FLEX_NIC: + case ICE_RXDID_FLEX_NIC_2: + ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0); + ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1); + ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2); + + mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ? + ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH; + + ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3); + + ice_init_flex_flags(hw, prof_id); + break; + + default: + ice_debug(hw, ICE_DBG_INIT, + "Field init for profile ID %d not supported\n", + prof_id); + } } /** @@ -333,20 +388,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) INIT_LIST_HEAD(&sw->vsi_list_map_head); - mutex_init(&sw->mac_list_lock); - INIT_LIST_HEAD(&sw->mac_list_head); - - mutex_init(&sw->vlan_list_lock); - INIT_LIST_HEAD(&sw->vlan_list_head); - - mutex_init(&sw->eth_m_list_lock); - INIT_LIST_HEAD(&sw->eth_m_list_head); - - mutex_init(&sw->promisc_list_lock); - INIT_LIST_HEAD(&sw->promisc_list_head); - - mutex_init(&sw->mac_vlan_list_lock); - INIT_LIST_HEAD(&sw->mac_vlan_list_head); + ice_init_def_sw_recp(hw); return 0; } @@ -360,20 +402,232 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) struct ice_switch_info *sw = hw->switch_info; struct ice_vsi_list_map_info *v_pos_map; struct ice_vsi_list_map_info *v_tmp_map; + struct ice_sw_recipe *recps; + u8 i; list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, list_entry) { list_del(&v_pos_map->list_entry); devm_kfree(ice_hw_to_dev(hw), v_pos_map); } + recps = hw->switch_info->recp_list; + for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; + + recps[i].root_rid = i; + mutex_destroy(&recps[i].filt_rule_lock); + list_for_each_entry_safe(lst_itr, tmp_entry, + &recps[i].filt_rules, list_entry) { + list_del(&lst_itr->list_entry); + devm_kfree(ice_hw_to_dev(hw), lst_itr); + } + } + ice_rm_all_sw_replay_rule_info(hw); + devm_kfree(ice_hw_to_dev(hw), sw->recp_list); + devm_kfree(ice_hw_to_dev(hw), sw); +} - mutex_destroy(&sw->mac_list_lock); - mutex_destroy(&sw->vlan_list_lock); - mutex_destroy(&sw->eth_m_list_lock); - mutex_destroy(&sw->promisc_list_lock); - mutex_destroy(&sw->mac_vlan_list_lock); +#define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \ + (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry))) +#define ICE_FW_LOG_DESC_SIZE_MAX \ + ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX) - devm_kfree(ice_hw_to_dev(hw), sw); +/** + * ice_cfg_fw_log - configure FW logging + * @hw: pointer to the hw struct + * @enable: enable certain FW logging events if true, disable all if false + * + * This function enables/disables the FW logging via Rx CQ events and a UART + * port based on predetermined configurations. FW logging via the Rx CQ can be + * enabled/disabled for individual PF's. However, FW logging via the UART can + * only be enabled/disabled for all PFs on the same device. + * + * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in + * hw->fw_log need to be set accordingly, e.g. based on user-provided input, + * before initializing the device. + * + * When re/configuring FW logging, callers need to update the "cfg" elements of + * the hw->fw_log.evnts array with the desired logging event configurations for + * modules of interest. When disabling FW logging completely, the callers can + * just pass false in the "enable" parameter. On completion, the function will + * update the "cur" element of the hw->fw_log.evnts array with the resulting + * logging event configurations of the modules that are being re/configured. FW + * logging modules that are not part of a reconfiguration operation retain their + * previous states. + * + * Before resetting the device, it is recommended that the driver disables FW + * logging before shutting down the control queue. When disabling FW logging + * ("enable" = false), the latest configurations of FW logging events stored in + * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after + * a device reset. + * + * When enabling FW logging to emit log messages via the Rx CQ during the + * device's initialization phase, a mechanism alternative to interrupt handlers + * needs to be used to extract FW log messages from the Rx CQ periodically and + * to prevent the Rx CQ from being full and stalling other types of control + * messages from FW to SW. Interrupts are typically disabled during the device's + * initialization phase. + */ +static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) +{ + struct ice_aqc_fw_logging_data *data = NULL; + struct ice_aqc_fw_logging *cmd; + enum ice_status status = 0; + u16 i, chgs = 0, len = 0; + struct ice_aq_desc desc; + u8 actv_evnts = 0; + void *buf = NULL; + + if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) + return 0; + + /* Disable FW logging only when the control queue is still responsive */ + if (!enable && + (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) + return 0; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); + cmd = &desc.params.fw_logging; + + /* Indicate which controls are valid */ + if (hw->fw_log.cq_en) + cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; + + if (hw->fw_log.uart_en) + cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; + + if (enable) { + /* Fill in an array of entries with FW logging modules and + * logging events being reconfigured. + */ + for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { + u16 val; + + /* Keep track of enabled event types */ + actv_evnts |= hw->fw_log.evnts[i].cfg; + + if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) + continue; + + if (!data) { + data = devm_kzalloc(ice_hw_to_dev(hw), + ICE_FW_LOG_DESC_SIZE_MAX, + GFP_KERNEL); + if (!data) + return ICE_ERR_NO_MEMORY; + } + + val = i << ICE_AQC_FW_LOG_ID_S; + val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; + data->entry[chgs++] = cpu_to_le16(val); + } + + /* Only enable FW logging if at least one module is specified. + * If FW logging is currently enabled but all modules are not + * enabled to emit log messages, disable FW logging altogether. + */ + if (actv_evnts) { + /* Leave if there is effectively no change */ + if (!chgs) + goto out; + + if (hw->fw_log.cq_en) + cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; + + if (hw->fw_log.uart_en) + cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; + + buf = data; + len = ICE_FW_LOG_DESC_SIZE(chgs); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + } + } + + status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); + if (!status) { + /* Update the current configuration to reflect events enabled. + * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW + * logging mode is enabled for the device. They do not reflect + * actual modules being enabled to emit log messages. So, their + * values remain unchanged even when all modules are disabled. + */ + u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; + + hw->fw_log.actv_evnts = actv_evnts; + for (i = 0; i < cnt; i++) { + u16 v, m; + + if (!enable) { + /* When disabling all FW logging events as part + * of device's de-initialization, the original + * configurations are retained, and can be used + * to reconfigure FW logging later if the device + * is re-initialized. + */ + hw->fw_log.evnts[i].cur = 0; + continue; + } + + v = le16_to_cpu(data->entry[i]); + m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; + hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; + } + } + +out: + if (data) + devm_kfree(ice_hw_to_dev(hw), data); + + return status; +} + +/** + * ice_output_fw_log + * @hw: pointer to the hw struct + * @desc: pointer to the AQ message descriptor + * @buf: pointer to the buffer accompanying the AQ message + * + * Formats a FW Log message and outputs it via the standard driver logs. + */ +void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) +{ + ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n"); + ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf, + le16_to_cpu(desc->datalen)); + ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n"); +} + +/** + * ice_get_itr_intrl_gran - determine int/intrl granularity + * @hw: pointer to the hw struct + * + * Determines the itr/intrl granularities based on the maximum aggregate + * bandwidth according to the device's configuration during power-on. + */ +static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw) +{ + u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & + GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> + GL_PWR_MODE_CTL_CAR_MAX_BW_S; + + switch (max_agg_bw) { + case ICE_MAX_AGG_BW_200G: + case ICE_MAX_AGG_BW_100G: + case ICE_MAX_AGG_BW_50G: + hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; + hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; + break; + case ICE_MAX_AGG_BW_25G: + hw->itr_gran = ICE_ITR_GRAN_MAX_25; + hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; + break; + default: + ice_debug(hw, ICE_DBG_INIT, + "Failed to determine itr/intrl granularity\n"); + return ICE_ERR_CFG; + } + + return 0; } /** @@ -400,16 +654,19 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) return status; - /* set these values to minimum allowed */ - hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200; - hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100; - hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50; - hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25; + status = ice_get_itr_intrl_gran(hw); + if (status) + return status; status = ice_init_all_ctrlq(hw); if (status) goto err_unroll_cqinit; + /* Enable FW logging. Not fatal if this fails. */ + status = ice_cfg_fw_log(hw, true); + if (status) + ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); + status = ice_clear_pf_cfg(hw); if (status) goto err_unroll_cqinit; @@ -472,6 +729,13 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_sched; + /* need a valid SW entry point to build a Tx tree */ + if (!hw->sw_entry_point_layer) { + ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); + status = ICE_ERR_CFG; + goto err_unroll_sched; + } + status = ice_init_fltr_mgmt_struct(hw); if (status) goto err_unroll_sched; @@ -494,7 +758,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_fltr_mgmt_struct; - ice_init_flex_parser(hw); + ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC); + ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2); return 0; @@ -515,15 +780,18 @@ err_unroll_cqinit: */ void ice_deinit_hw(struct ice_hw *hw) { + ice_cleanup_fltr_mgmt_struct(hw); + ice_sched_cleanup_all(hw); - ice_shutdown_all_ctrlq(hw); if (hw->port_info) { devm_kfree(ice_hw_to_dev(hw), hw->port_info); hw->port_info = NULL; } - ice_cleanup_fltr_mgmt_struct(hw); + /* Attempt to disable FW logging before shutting down control queues */ + ice_cfg_fw_log(hw, false); + ice_shutdown_all_ctrlq(hw); } /** @@ -652,6 +920,8 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); val = GLGEN_RTRIG_GLOBR_M; break; + default: + return ICE_ERR_PARAM; } val |= rd32(hw, GLGEN_RTRIG); @@ -904,7 +1174,22 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) * @timeout: the maximum time in ms that the driver may hold the resource * @cd: pointer to command details structure or NULL * - * requests common resource using the admin queue commands (0x0008) + * Requests common resource using the admin queue commands (0x0008). + * When attempting to acquire the Global Config Lock, the driver can + * learn of three states: + * 1) ICE_SUCCESS - acquired lock, and can perform download package + * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load + * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has + * successfully downloaded the package; the driver does + * not have to download the package and can continue + * loading + * + * Note that if the caller is in an acquire lock, perform action, release lock + * phase of operation, it is possible that the FW may detect a timeout and issue + * a CORER. In this case, the driver will receive a CORER interrupt and will + * have to determine its cause. The calling thread that is handling this flow + * will likely get an error propagated back to it indicating the Download + * Package, Update Package or the Release Resource AQ commands timed out. */ static enum ice_status ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, @@ -922,13 +1207,43 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, cmd_resp->res_id = cpu_to_le16(res); cmd_resp->access_type = cpu_to_le16(access); cmd_resp->res_number = cpu_to_le32(sdp_number); + cmd_resp->timeout = cpu_to_le32(*timeout); + *timeout = 0; status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + /* The completion specifies the maximum time in ms that the driver * may hold the resource in the Timeout field. - * If the resource is held by someone else, the command completes with - * busy return value and the timeout field indicates the maximum time - * the current owner of the resource has to free it. + */ + + /* Global config lock response utilizes an additional status field. + * + * If the Global config lock resource is held by some other driver, the + * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field + * and the timeout field indicates the maximum time the current owner + * of the resource has to free it. + */ + if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { + if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { + *timeout = le32_to_cpu(cmd_resp->timeout); + return 0; + } else if (le16_to_cpu(cmd_resp->status) == + ICE_AQ_RES_GLBL_IN_PROG) { + *timeout = le32_to_cpu(cmd_resp->timeout); + return ICE_ERR_AQ_ERROR; + } else if (le16_to_cpu(cmd_resp->status) == + ICE_AQ_RES_GLBL_DONE) { + return ICE_ERR_AQ_NO_WORK; + } + + /* invalid FW response, force a timeout immediately */ + *timeout = 0; + return ICE_ERR_AQ_ERROR; + } + + /* If the resource is held by some other driver, the command completes + * with a busy return value and the timeout field indicates the maximum + * time the current owner of the resource has to free it. */ if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) *timeout = le32_to_cpu(cmd_resp->timeout); @@ -967,30 +1282,28 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, * @hw: pointer to the HW structure * @res: resource id * @access: access type (read or write) + * @timeout: timeout in milliseconds * * This function will attempt to acquire the ownership of a resource. */ enum ice_status ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, - enum ice_aq_res_access_type access) + enum ice_aq_res_access_type access, u32 timeout) { #define ICE_RES_POLLING_DELAY_MS 10 u32 delay = ICE_RES_POLLING_DELAY_MS; + u32 time_left = timeout; enum ice_status status; - u32 time_left = 0; - u32 timeout; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - /* An admin queue return code of ICE_AQ_RC_EEXIST means that another - * driver has previously acquired the resource and performed any - * necessary updates; in this case the caller does not obtain the - * resource and has no further work to do. + /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has + * previously acquired the resource and performed any necessary updates; + * in this case the caller does not obtain the resource and has no + * further work to do. */ - if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { - status = ICE_ERR_AQ_NO_WORK; + if (status == ICE_ERR_AQ_NO_WORK) goto ice_acquire_res_exit; - } if (status) ice_debug(hw, ICE_DBG_RES, @@ -1003,11 +1316,9 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, timeout = (timeout > delay) ? timeout - delay : 0; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { + if (status == ICE_ERR_AQ_NO_WORK) /* lock free, but no work to do */ - status = ICE_ERR_AQ_NO_WORK; break; - } if (!status) /* lock acquired */ @@ -1095,6 +1406,28 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, u16 cap = le16_to_cpu(cap_resp->cap); switch (cap) { + case ICE_AQC_CAPS_SRIOV: + caps->sr_iov_1_1 = (number == 1); + ice_debug(hw, ICE_DBG_INIT, + "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1); + break; + case ICE_AQC_CAPS_VF: + if (dev_p) { + dev_p->num_vfs_exposed = number; + ice_debug(hw, ICE_DBG_INIT, + "HW caps: VFs exposed = %d\n", + dev_p->num_vfs_exposed); + } else if (func_p) { + func_p->num_allocd_vfs = number; + func_p->vf_base_id = logical_id; + ice_debug(hw, ICE_DBG_INIT, + "HW caps: VFs allocated = %d\n", + func_p->num_allocd_vfs); + ice_debug(hw, ICE_DBG_INIT, + "HW caps: VF base_id = %d\n", + func_p->vf_base_id); + } + break; case ICE_AQC_CAPS_VSI: if (dev_p) { dev_p->num_vsi_allocd_to_host = number; @@ -1171,7 +1504,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, * @hw: pointer to the hw struct * @buf: a virtual buffer to hold the capabilities * @buf_size: Size of the virtual buffer - * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM + * @cap_count: cap count needed if AQ err==ENOMEM * @opc: capabilities type to discover - pass in the command opcode * @cd: pointer to command details structure or NULL * @@ -1179,7 +1512,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, * the firmware. */ static enum ice_status -ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size, +ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aqc_list_caps *cmd; @@ -1197,59 +1530,77 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size, status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (!status) ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc); - *data_size = le16_to_cpu(desc.datalen); - + else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM) + *cap_count = + DIV_ROUND_UP(le16_to_cpu(desc.datalen), + sizeof(struct ice_aqc_list_caps_elem)); return status; } /** - * ice_get_caps - get info about the HW + * ice_discover_caps - get info about the HW * @hw: pointer to the hardware structure + * @opc: capabilities type to discover - pass in the command opcode */ -enum ice_status ice_get_caps(struct ice_hw *hw) +static enum ice_status ice_discover_caps(struct ice_hw *hw, + enum ice_adminq_opc opc) { enum ice_status status; - u16 data_size = 0; + u32 cap_count; u16 cbuf_len; u8 retries; /* The driver doesn't know how many capabilities the device will return * so the buffer size required isn't known ahead of time. The driver * starts with cbuf_len and if this turns out to be insufficient, the - * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs. - * The driver then allocates the buffer of this size and retries the - * operation. So it follows that the retry count is 2. + * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs. + * The driver then allocates the buffer based on the count and retries + * the operation. So it follows that the retry count is 2. */ #define ICE_GET_CAP_BUF_COUNT 40 #define ICE_GET_CAP_RETRY_COUNT 2 - cbuf_len = ICE_GET_CAP_BUF_COUNT * - sizeof(struct ice_aqc_list_caps_elem); - + cap_count = ICE_GET_CAP_BUF_COUNT; retries = ICE_GET_CAP_RETRY_COUNT; do { void *cbuf; + cbuf_len = (u16)(cap_count * + sizeof(struct ice_aqc_list_caps_elem)); cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL); if (!cbuf) return ICE_ERR_NO_MEMORY; - status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size, - ice_aqc_opc_list_func_caps, NULL); + status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count, + opc, NULL); devm_kfree(ice_hw_to_dev(hw), cbuf); if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM) break; /* If ENOMEM is returned, try again with bigger buffer */ - cbuf_len = data_size; } while (--retries); return status; } /** + * ice_get_caps - get info about the HW + * @hw: pointer to the hardware structure + */ +enum ice_status ice_get_caps(struct ice_hw *hw) +{ + enum ice_status status; + + status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps); + if (!status) + status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps); + + return status; +} + +/** * ice_aq_manage_mac_write - manage MAC address write command * @hw: pointer to the hw struct * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address @@ -1307,6 +1658,110 @@ void ice_clear_pxe_mode(struct ice_hw *hw) } /** + * ice_get_link_speed_based_on_phy_type - returns link speed + * @phy_type_low: lower part of phy_type + * + * This helper function will convert a phy_type_low to its corresponding link + * speed. + * Note: In the structure of phy_type_low, there should be one bit set, as + * this function will convert one phy type to its speed. + * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned + * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned + */ +static u16 +ice_get_link_speed_based_on_phy_type(u64 phy_type_low) +{ + u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; + + switch (phy_type_low) { + case ICE_PHY_TYPE_LOW_100BASE_TX: + case ICE_PHY_TYPE_LOW_100M_SGMII: + speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; + break; + case ICE_PHY_TYPE_LOW_1000BASE_T: + case ICE_PHY_TYPE_LOW_1000BASE_SX: + case ICE_PHY_TYPE_LOW_1000BASE_LX: + case ICE_PHY_TYPE_LOW_1000BASE_KX: + case ICE_PHY_TYPE_LOW_1G_SGMII: + speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; + break; + case ICE_PHY_TYPE_LOW_2500BASE_T: + case ICE_PHY_TYPE_LOW_2500BASE_X: + case ICE_PHY_TYPE_LOW_2500BASE_KX: + speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; + break; + case ICE_PHY_TYPE_LOW_5GBASE_T: + case ICE_PHY_TYPE_LOW_5GBASE_KR: + speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; + break; + case ICE_PHY_TYPE_LOW_10GBASE_T: + case ICE_PHY_TYPE_LOW_10G_SFI_DA: + case ICE_PHY_TYPE_LOW_10GBASE_SR: + case ICE_PHY_TYPE_LOW_10GBASE_LR: + case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: + case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: + case ICE_PHY_TYPE_LOW_10G_SFI_C2C: + speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; + break; + case ICE_PHY_TYPE_LOW_25GBASE_T: + case ICE_PHY_TYPE_LOW_25GBASE_CR: + case ICE_PHY_TYPE_LOW_25GBASE_CR_S: + case ICE_PHY_TYPE_LOW_25GBASE_CR1: + case ICE_PHY_TYPE_LOW_25GBASE_SR: + case ICE_PHY_TYPE_LOW_25GBASE_LR: + case ICE_PHY_TYPE_LOW_25GBASE_KR: + case ICE_PHY_TYPE_LOW_25GBASE_KR_S: + case ICE_PHY_TYPE_LOW_25GBASE_KR1: + case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_25G_AUI_C2C: + speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; + break; + case ICE_PHY_TYPE_LOW_40GBASE_CR4: + case ICE_PHY_TYPE_LOW_40GBASE_SR4: + case ICE_PHY_TYPE_LOW_40GBASE_LR4: + case ICE_PHY_TYPE_LOW_40GBASE_KR4: + case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_40G_XLAUI: + speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; + break; + default: + speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; + break; + } + + return speed_phy_type_low; +} + +/** + * ice_update_phy_type + * @phy_type_low: pointer to the lower part of phy_type + * @link_speeds_bitmap: targeted link speeds bitmap + * + * Note: For the link_speeds_bitmap structure, you can check it at + * [ice_aqc_get_link_status->link_speed]. Caller can pass in + * link_speeds_bitmap include multiple speeds. + * + * The value of phy_type_low will present a certain link speed. This helper + * function will turn on bits in the phy_type_low based on the value of + * link_speeds_bitmap input parameter. + */ +void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap) +{ + u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN; + u64 pt_low; + int index; + + /* We first check with low part of phy_type */ + for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { + pt_low = BIT_ULL(index); + speed = ice_get_link_speed_based_on_phy_type(pt_low); + + if (link_speeds_bitmap & speed) + *phy_type_low |= BIT_ULL(index); + } +} + +/** * ice_aq_set_phy_cfg * @hw: pointer to the hw struct * @lport: logical port number @@ -1318,19 +1773,18 @@ void ice_clear_pxe_mode(struct ice_hw *hw) * mode as the PF may not have the privilege to set some of the PHY Config * parameters. This status will be indicated by the command response (0x0601). */ -static enum ice_status +enum ice_status ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) { - struct ice_aqc_set_phy_cfg *cmd; struct ice_aq_desc desc; if (!cfg) return ICE_ERR_PARAM; - cmd = &desc.params.set_phy; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); - cmd->lport_num = lport; + desc.params.set_phy.lport_num = lport; + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); } @@ -1339,8 +1793,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, * ice_update_link_info - update status of the HW network link * @pi: port info structure of the interested logical port */ -static enum ice_status -ice_update_link_info(struct ice_port_info *pi) +enum ice_status ice_update_link_info(struct ice_port_info *pi) { struct ice_aqc_get_phy_caps_data *pcaps; struct ice_phy_info *phy_info; @@ -1379,12 +1832,12 @@ out: * ice_set_fc * @pi: port information structure * @aq_failures: pointer to status code, specific to ice_set_fc routine - * @atomic_restart: enable automatic link update + * @ena_auto_link_update: enable automatic link update * * Set the requested flow control mode. */ enum ice_status -ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart) +ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) { struct ice_aqc_set_phy_cfg_data cfg = { 0 }; struct ice_aqc_get_phy_caps_data *pcaps; @@ -1434,8 +1887,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart) int retry_count, retry_max = 10; /* Auto restart link so settings take effect */ - if (atomic_restart) - cfg.caps |= ICE_AQ_PHY_ENA_ATOMIC_LINK; + if (ena_auto_link_update) + cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; /* Copy over all the old settings */ cfg.phy_type_low = pcaps->phy_type_low; cfg.low_power_ctrl = pcaps->low_power_ctrl; @@ -1654,7 +2107,7 @@ ice_aq_get_set_rss_lut_exit: /** * ice_aq_get_rss_lut * @hw: pointer to the hardware structure - * @vsi_id: VSI FW index + * @vsi_handle: software VSI handle * @lut_type: LUT table type * @lut: pointer to the LUT buffer provided by the caller * @lut_size: size of the LUT buffer @@ -1662,17 +2115,20 @@ ice_aq_get_set_rss_lut_exit: * get the RSS lookup table, PF or VSI type */ enum ice_status -ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, - u16 lut_size) +ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, + u8 *lut, u16 lut_size) { - return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0, - false); + if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) + return ICE_ERR_PARAM; + + return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), + lut_type, lut, lut_size, 0, false); } /** * ice_aq_set_rss_lut * @hw: pointer to the hardware structure - * @vsi_id: VSI FW index + * @vsi_handle: software VSI handle * @lut_type: LUT table type * @lut: pointer to the LUT buffer provided by the caller * @lut_size: size of the LUT buffer @@ -1680,11 +2136,14 @@ ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, * set the RSS lookup table, PF or VSI type */ enum ice_status -ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, - u16 lut_size) +ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, + u8 *lut, u16 lut_size) { - return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0, - true); + if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) + return ICE_ERR_PARAM; + + return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), + lut_type, lut, lut_size, 0, true); } /** @@ -1725,31 +2184,39 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, /** * ice_aq_get_rss_key * @hw: pointer to the hw struct - * @vsi_id: VSI FW index + * @vsi_handle: software VSI handle * @key: pointer to key info struct * * get the RSS key per VSI */ enum ice_status -ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id, +ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *key) { - return __ice_aq_get_set_rss_key(hw, vsi_id, key, false); + if (!ice_is_vsi_valid(hw, vsi_handle) || !key) + return ICE_ERR_PARAM; + + return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), + key, false); } /** * ice_aq_set_rss_key * @hw: pointer to the hw struct - * @vsi_id: VSI FW index + * @vsi_handle: software VSI handle * @keys: pointer to key info struct * * set the RSS key per VSI */ enum ice_status -ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id, +ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys) { - return __ice_aq_get_set_rss_key(hw, vsi_id, keys, true); + if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) + return ICE_ERR_PARAM; + + return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), + keys, true); } /** @@ -1820,6 +2287,8 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, * @num_qgrps: number of groups in the list * @qg_list: the list of groups to disable * @buf_size: the total size of the qg_list buffer in bytes + * @rst_src: if called due to reset, specifies the RST source + * @vmvf_num: the relative VM or VF number that is undergoing the reset * @cd: pointer to command details structure or NULL * * Disable LAN Tx queue (0x0C31) @@ -1827,6 +2296,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, static enum ice_status ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, + enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd) { struct ice_aqc_dis_txqs *cmd; @@ -1836,14 +2306,45 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, cmd = &desc.params.dis_txqs; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); - if (!qg_list) + /* qg_list can be NULL only in VM/VF reset flow */ + if (!qg_list && !rst_src) return ICE_ERR_PARAM; if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) return ICE_ERR_PARAM; - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + cmd->num_entries = num_qgrps; + cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & + ICE_AQC_Q_DIS_TIMEOUT_M); + + switch (rst_src) { + case ICE_VM_RESET: + cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; + cmd->vmvf_and_timeout |= + cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); + break; + case ICE_VF_RESET: + cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; + /* In this case, FW expects vmvf_num to be absolute VF id */ + cmd->vmvf_and_timeout |= + cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & + ICE_AQC_Q_DIS_VMVF_NUM_M); + break; + case ICE_NO_RESET: + default: + break; + } + + /* If no queue group info, we are in a reset flow. Issue the AQ */ + if (!qg_list) + goto do_aq; + + /* set RD bit to indicate that command buffer is provided by the driver + * and it needs to be read by the firmware + */ + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + for (i = 0; i < num_qgrps; ++i) { /* Calculate the size taken up by the queue IDs in this group */ sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); @@ -1859,6 +2360,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, if (buf_size != sz) return ICE_ERR_PARAM; +do_aq: return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); } @@ -2088,7 +2590,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) /** * ice_ena_vsi_txq * @pi: port information structure - * @vsi_id: VSI id + * @vsi_handle: software VSI handle * @tc: tc number * @num_qgrps: Number of added queue groups * @buf: list of queue groups to be added @@ -2098,7 +2600,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) * This function adds one lan q */ enum ice_status -ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps, +ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd) { @@ -2115,15 +2617,19 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps, hw = pi->hw; + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + mutex_lock(&pi->sched_lock); /* find a parent node */ - parent = ice_sched_get_free_qparent(pi, vsi_id, tc, + parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, ICE_SCHED_NODE_OWNER_LAN); if (!parent) { status = ICE_ERR_PARAM; goto ena_txq_exit; } + buf->parent_teid = parent->info.node_teid; node.parent_teid = parent->info.node_teid; /* Mark that the values in the "generic" section as valid. The default @@ -2161,13 +2667,16 @@ ena_txq_exit: * @num_queues: number of queues * @q_ids: pointer to the q_id array * @q_teids: pointer to queue node teids + * @rst_src: if called due to reset, specifies the RST source + * @vmvf_num: the relative VM or VF number that is undergoing the reset * @cd: pointer to command details structure or NULL * * This function removes queues and their corresponding nodes in SW DB */ enum ice_status ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, - u32 *q_teids, struct ice_sq_cd *cd) + u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, + struct ice_sq_cd *cd) { enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_aqc_dis_txq_item qg_list; @@ -2176,6 +2685,15 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; + /* if queue is disabled already yet the disable queue command has to be + * sent to complete the VF reset, then call ice_aq_dis_lan_txq without + * any queue information + */ + + if (!num_queues && rst_src) + return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num, + NULL); + mutex_lock(&pi->sched_lock); for (i = 0; i < num_queues; i++) { @@ -2188,7 +2706,8 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, qg_list.num_qs = 1; qg_list.q_id[0] = cpu_to_le16(q_ids[i]); status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, - sizeof(qg_list), cd); + sizeof(qg_list), rst_src, vmvf_num, + cd); if (status) break; @@ -2201,7 +2720,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, /** * ice_cfg_vsi_qs - configure the new/exisiting VSI queues * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle * @tc_bitmap: TC bitmap * @maxqs: max queues array per TC * @owner: lan or rdma @@ -2209,7 +2728,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, * This function adds/updates the VSI queues per TC. */ static enum ice_status -ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, +ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *maxqs, u8 owner) { enum ice_status status = 0; @@ -2218,6 +2737,9 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return ICE_ERR_PARAM; + mutex_lock(&pi->sched_lock); for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { @@ -2225,7 +2747,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, if (!ice_sched_get_tc_node(pi, i)) continue; - status = ice_sched_cfg_vsi(pi, vsi_id, i, maxqs[i], owner, + status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, ice_is_tc_ena(tc_bitmap, i)); if (status) break; @@ -2238,16 +2760,140 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, /** * ice_cfg_vsi_lan - configure VSI lan queues * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle * @tc_bitmap: TC bitmap * @max_lanqs: max lan queues array per TC * * This function adds/updates the VSI lan queues per TC. */ enum ice_status -ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, +ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *max_lanqs) { - return ice_cfg_vsi_qs(pi, vsi_id, tc_bitmap, max_lanqs, + return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, ICE_SCHED_NODE_OWNER_LAN); } + +/** + * ice_replay_pre_init - replay pre initialization + * @hw: pointer to the hw struct + * + * Initializes required config data for VSI, FD, ACL, and RSS before replay. + */ +static enum ice_status ice_replay_pre_init(struct ice_hw *hw) +{ + struct ice_switch_info *sw = hw->switch_info; + u8 i; + + /* Delete old entries from replay filter list head if there is any */ + ice_rm_all_sw_replay_rule_info(hw); + /* In start of replay, move entries into replay_rules list, it + * will allow adding rules entries back to filt_rules list, + * which is operational list. + */ + for (i = 0; i < ICE_SW_LKUP_LAST; i++) + list_replace_init(&sw->recp_list[i].filt_rules, + &sw->recp_list[i].filt_replay_rules); + + return 0; +} + +/** + * ice_replay_vsi - replay VSI configuration + * @hw: pointer to the hw struct + * @vsi_handle: driver VSI handle + * + * Restore all VSI configuration after reset. It is required to call this + * function with main VSI first. + */ +enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) +{ + enum ice_status status; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + /* Replay pre-initialization if there is any */ + if (vsi_handle == ICE_MAIN_VSI_HANDLE) { + status = ice_replay_pre_init(hw); + if (status) + return status; + } + + /* Replay per VSI all filters */ + status = ice_replay_vsi_all_fltr(hw, vsi_handle); + return status; +} + +/** + * ice_replay_post - post replay configuration cleanup + * @hw: pointer to the hw struct + * + * Post replay cleanup. + */ +void ice_replay_post(struct ice_hw *hw) +{ + /* Delete old entries from replay filter list head */ + ice_rm_all_sw_replay_rule_info(hw); +} + +/** + * ice_stat_update40 - read 40 bit stat from the chip and update stat values + * @hw: ptr to the hardware info + * @hireg: high 32 bit HW register to read from + * @loreg: low 32 bit HW register to read from + * @prev_stat_loaded: bool to specify if previous stats are loaded + * @prev_stat: ptr to previous loaded stat value + * @cur_stat: ptr to current stat value + */ +void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, + bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) +{ + u64 new_data; + + new_data = rd32(hw, loreg); + new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; + + /* device stats are not reset at PFR, they likely will not be zeroed + * when the driver starts. So save the first values read and use them as + * offsets to be subtracted from the raw values in order to report stats + * that count from zero. + */ + if (!prev_stat_loaded) + *prev_stat = new_data; + if (new_data >= *prev_stat) + *cur_stat = new_data - *prev_stat; + else + /* to manage the potential roll-over */ + *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; + *cur_stat &= 0xFFFFFFFFFFULL; +} + +/** + * ice_stat_update32 - read 32 bit stat from the chip and update stat values + * @hw: ptr to the hardware info + * @reg: HW register to read from + * @prev_stat_loaded: bool to specify if previous stats are loaded + * @prev_stat: ptr to previous loaded stat value + * @cur_stat: ptr to current stat value + */ +void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat) +{ + u32 new_data; + + new_data = rd32(hw, reg); + + /* device stats are not reset at PFR, they likely will not be zeroed + * when the driver starts. So save the first values read and use them as + * offsets to be subtracted from the raw values in order to report stats + * that count from zero. + */ + if (!prev_stat_loaded) + *prev_stat = new_data; + if (new_data >= *prev_stat) + *cur_stat = new_data - *prev_stat; + else + /* to manage the potential roll-over */ + *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; +} diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 9a5519130af1..1900681289a4 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -7,6 +7,7 @@ #include "ice.h" #include "ice_type.h" #include "ice_switch.h" +#include <linux/avf/virtchnl.h> void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len); @@ -21,9 +22,10 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending); enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up); +enum ice_status ice_update_link_info(struct ice_port_info *pi); enum ice_status ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, - enum ice_aq_res_access_type access); + enum ice_aq_res_access_type access, u32 timeout); void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); enum ice_status ice_init_nvm(struct ice_hw *hw); enum ice_status @@ -37,17 +39,18 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index); enum ice_status -ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, +ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut, u16 lut_size); enum ice_status -ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, +ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut, u16 lut_size); enum ice_status -ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id, +ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); enum ice_status -ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id, +ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); + bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); @@ -58,12 +61,24 @@ enum ice_status ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); + +enum ice_status +ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, + struct ice_aqc_get_phy_caps_data *caps, + struct ice_sq_cd *cd); +void +ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap); enum ice_status ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags, struct ice_sq_cd *cd); enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); enum ice_status -ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart); +ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, + struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd); +enum ice_status +ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, + bool ena_auto_link_update); + enum ice_status ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd); @@ -75,12 +90,20 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd); enum ice_status ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, - u32 *q_teids, struct ice_sq_cd *cmd_details); + u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, + struct ice_sq_cd *cmd_details); enum ice_status -ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, +ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *max_lanqs); enum ice_status -ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps, +ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd); +enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); +void ice_replay_post(struct ice_hw *hw); +void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); +void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, + bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); +void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 62be72fdc8f3..84c967294eaf 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -33,6 +33,36 @@ static void ice_adminq_init_regs(struct ice_hw *hw) } /** + * ice_mailbox_init_regs - Initialize Mailbox registers + * @hw: pointer to the hardware structure + * + * This assumes the alloc_sq and alloc_rq functions have already been called + */ +static void ice_mailbox_init_regs(struct ice_hw *hw) +{ + struct ice_ctl_q_info *cq = &hw->mailboxq; + + /* set head and tail registers in our local struct */ + cq->sq.head = PF_MBX_ATQH; + cq->sq.tail = PF_MBX_ATQT; + cq->sq.len = PF_MBX_ATQLEN; + cq->sq.bah = PF_MBX_ATQBAH; + cq->sq.bal = PF_MBX_ATQBAL; + cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M; + cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M; + cq->sq.head_mask = PF_MBX_ATQH_ATQH_M; + + cq->rq.head = PF_MBX_ARQH; + cq->rq.tail = PF_MBX_ARQT; + cq->rq.len = PF_MBX_ARQLEN; + cq->rq.bah = PF_MBX_ARQBAH; + cq->rq.bal = PF_MBX_ARQBAL; + cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M; + cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M; + cq->rq.head_mask = PF_MBX_ARQH_ARQH_M; +} + +/** * ice_check_sq_alive * @hw: pointer to the hw struct * @cq: pointer to the specific Control queue @@ -518,22 +548,31 @@ shutdown_sq_out: /** * ice_aq_ver_check - Check the reported AQ API version. - * @fw_branch: The "branch" of FW, typically describes the device type - * @fw_major: The major version of the FW API - * @fw_minor: The minor version increment of the FW API + * @hw: pointer to the hardware structure * * Checks if the driver should load on a given AQ API version. * * Return: 'true' iff the driver should attempt to load. 'false' otherwise. */ -static bool ice_aq_ver_check(u8 fw_branch, u8 fw_major, u8 fw_minor) +static bool ice_aq_ver_check(struct ice_hw *hw) { - if (fw_branch != EXP_FW_API_VER_BRANCH) - return false; - if (fw_major != EXP_FW_API_VER_MAJOR) - return false; - if (fw_minor != EXP_FW_API_VER_MINOR) + if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { + /* Major API version is newer than expected, don't load */ + dev_warn(ice_hw_to_dev(hw), + "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); return false; + } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { + if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) + dev_info(ice_hw_to_dev(hw), + "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); + else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) + dev_info(ice_hw_to_dev(hw), + "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + } else { + /* Major API version is older than expected, log a warning */ + dev_info(ice_hw_to_dev(hw), + "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + } return true; } @@ -588,8 +627,7 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) if (status) goto init_ctrlq_free_rq; - if (!ice_aq_ver_check(hw->api_branch, hw->api_maj_ver, - hw->api_min_ver)) { + if (!ice_aq_ver_check(hw)) { status = ICE_ERR_FW_API_VER; goto init_ctrlq_free_rq; } @@ -597,11 +635,11 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) return 0; init_ctrlq_free_rq: - if (cq->rq.head) { + if (cq->rq.count) { ice_shutdown_rq(hw, cq); mutex_destroy(&cq->rq_lock); } - if (cq->sq.head) { + if (cq->sq.count) { ice_shutdown_sq(hw, cq); mutex_destroy(&cq->sq_lock); } @@ -631,6 +669,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) ice_adminq_init_regs(hw); cq = &hw->adminq; break; + case ICE_CTL_Q_MAILBOX: + ice_mailbox_init_regs(hw); + cq = &hw->mailboxq; + break; default: return ICE_ERR_PARAM; } @@ -688,7 +730,12 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) if (ret_code) return ret_code; - return ice_init_check_adminq(hw); + ret_code = ice_init_check_adminq(hw); + if (ret_code) + return ret_code; + + /* Init Mailbox queue */ + return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); } /** @@ -706,15 +753,18 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) if (ice_check_sq_alive(hw, cq)) ice_aq_q_shutdown(hw, true); break; + case ICE_CTL_Q_MAILBOX: + cq = &hw->mailboxq; + break; default: return; } - if (cq->sq.head) { + if (cq->sq.count) { ice_shutdown_sq(hw, cq); mutex_destroy(&cq->sq_lock); } - if (cq->rq.head) { + if (cq->rq.count) { ice_shutdown_rq(hw, cq); mutex_destroy(&cq->rq_lock); } @@ -728,6 +778,8 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw) { /* Shutdown FW admin queue */ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + /* Shutdown PF-VF Mailbox */ + ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); } /** @@ -806,6 +858,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, u16 retval = 0; u32 val = 0; + /* if reset is in progress return a soft error */ + if (hw->reset_ongoing) + return ICE_ERR_RESET_ONGOING; mutex_lock(&cq->sq_lock); cq->sq_last_status = ICE_AQ_RC_OK; @@ -847,7 +902,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); if (cd) - memcpy(details, cd, sizeof(*details)); + *details = *cd; else memset(details, 0, sizeof(*details)); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index ea02b89243e2..437f832fd7c4 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -8,6 +8,7 @@ /* Maximum buffer lengths for all control queue types */ #define ICE_AQ_MAX_BUF_LEN 4096 +#define ICE_MBXQ_MAX_BUF_LEN 4096 #define ICE_CTL_Q_DESC(R, i) \ (&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) @@ -28,6 +29,7 @@ enum ice_ctl_q { ICE_CTL_Q_UNKNOWN = 0, ICE_CTL_Q_ADMIN, + ICE_CTL_Q_MAILBOX, }; /* Control Queue default settings */ diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h index 0e14d7215a6e..a6f0a5c0c305 100644 --- a/drivers/net/ethernet/intel/ice/ice_devids.h +++ b/drivers/net/ethernet/intel/ice/ice_devids.h @@ -5,15 +5,11 @@ #define _ICE_DEVIDS_H_ /* Device IDs */ -/* Intel(R) Ethernet Controller C810 for backplane */ +/* Intel(R) Ethernet Controller E810-C for backplane */ #define ICE_DEV_ID_C810_BACKPLANE 0x1591 -/* Intel(R) Ethernet Controller C810 for QSFP */ +/* Intel(R) Ethernet Controller E810-C for QSFP */ #define ICE_DEV_ID_C810_QSFP 0x1592 -/* Intel(R) Ethernet Controller C810 for SFP */ +/* Intel(R) Ethernet Controller E810-C for SFP */ #define ICE_DEV_ID_C810_SFP 0x1593 -/* Intel(R) Ethernet Controller C810/X557-AT 10GBASE-T */ -#define ICE_DEV_ID_C810_10G_BASE_T 0x1594 -/* Intel(R) Ethernet Controller C810 1GbE */ -#define ICE_DEV_ID_C810_SGMII 0x1595 #endif /* _ICE_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index c71a9b528d6d..96923580f2a6 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -332,58 +332,473 @@ ice_get_ethtool_stats(struct net_device *netdev, } } -static int -ice_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *ks) +/** + * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes + * @netdev: network interface device structure + * @ks: ethtool link ksettings struct to fill out + */ +static void ice_phy_type_to_ethtool(struct net_device *netdev, + struct ethtool_link_ksettings *ks) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_link_status *hw_link_info; struct ice_vsi *vsi = np->vsi; - bool link_up; + u64 phy_types_low; hw_link_info = &vsi->port_info->phy.link_info; - link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; + phy_types_low = vsi->port_info->phy.phy_type_low; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + + if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || + phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseKX_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_SX || + phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 2500baseT_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 2500baseT_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_X || + phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 2500baseX_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 2500baseX_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 5000baseT_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 5000baseT_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_DA || + phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC || + phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 || + phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC || + phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_SR || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseSR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseKR4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseKR4_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 || + phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC || + phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseCR4_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseSR4_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseLR4_Full); + } - ethtool_link_ksettings_add_link_mode(ks, supported, - 10000baseT_Full); - ethtool_link_ksettings_add_link_mode(ks, advertising, - 10000baseT_Full); + /* Autoneg PHY types */ + if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || + phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX || + phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX || + phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR || + phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 || + phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 || + phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Autoneg); + } +} - /* set speed and duplex */ - if (link_up) { - switch (hw_link_info->link_speed) { - case ICE_AQ_LINK_SPEED_100MB: - ks->base.speed = SPEED_100; - break; - case ICE_AQ_LINK_SPEED_2500MB: - ks->base.speed = SPEED_2500; - break; - case ICE_AQ_LINK_SPEED_5GB: - ks->base.speed = SPEED_5000; - break; - case ICE_AQ_LINK_SPEED_10GB: - ks->base.speed = SPEED_10000; - break; - case ICE_AQ_LINK_SPEED_25GB: - ks->base.speed = SPEED_25000; - break; - case ICE_AQ_LINK_SPEED_40GB: - ks->base.speed = SPEED_40000; - break; - default: - ks->base.speed = SPEED_UNKNOWN; - break; - } +#define TEST_SET_BITS_TIMEOUT 50 +#define TEST_SET_BITS_SLEEP_MAX 2000 +#define TEST_SET_BITS_SLEEP_MIN 1000 - ks->base.duplex = DUPLEX_FULL; - } else { - ks->base.speed = SPEED_UNKNOWN; - ks->base.duplex = DUPLEX_UNKNOWN; +/** + * ice_get_settings_link_up - Get Link settings for when link is up + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + */ +static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ethtool_link_ksettings cap_ksettings; + struct ice_link_status *link_info; + struct ice_vsi *vsi = np->vsi; + bool unrecog_phy_low = false; + + link_info = &vsi->port_info->phy.link_info; + + /* Initialize supported and advertised settings based on phy settings */ + switch (link_info->phy_type_low) { + case ICE_PHY_TYPE_LOW_100BASE_TX: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Full); + break; + case ICE_PHY_TYPE_LOW_100M_SGMII: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + break; + case ICE_PHY_TYPE_LOW_1000BASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + break; + case ICE_PHY_TYPE_LOW_1G_SGMII: + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + break; + case ICE_PHY_TYPE_LOW_1000BASE_SX: + case ICE_PHY_TYPE_LOW_1000BASE_LX: + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + break; + case ICE_PHY_TYPE_LOW_1000BASE_KX: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseKX_Full); + break; + case ICE_PHY_TYPE_LOW_2500BASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 2500baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 2500baseT_Full); + break; + case ICE_PHY_TYPE_LOW_2500BASE_X: + ethtool_link_ksettings_add_link_mode(ks, supported, + 2500baseX_Full); + break; + case ICE_PHY_TYPE_LOW_2500BASE_KX: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 2500baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 2500baseX_Full); + break; + case ICE_PHY_TYPE_LOW_5GBASE_T: + case ICE_PHY_TYPE_LOW_5GBASE_KR: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 5000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 5000baseT_Full); + break; + case ICE_PHY_TYPE_LOW_10GBASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + break; + case ICE_PHY_TYPE_LOW_10G_SFI_DA: + case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: + case ICE_PHY_TYPE_LOW_10G_SFI_C2C: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + break; + case ICE_PHY_TYPE_LOW_10GBASE_SR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + break; + case ICE_PHY_TYPE_LOW_10GBASE_LR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + break; + case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + break; + case ICE_PHY_TYPE_LOW_25GBASE_T: + case ICE_PHY_TYPE_LOW_25GBASE_CR: + case ICE_PHY_TYPE_LOW_25GBASE_CR_S: + case ICE_PHY_TYPE_LOW_25GBASE_CR1: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + break; + case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + break; + case ICE_PHY_TYPE_LOW_25GBASE_SR: + case ICE_PHY_TYPE_LOW_25GBASE_LR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + break; + case ICE_PHY_TYPE_LOW_25GBASE_KR: + case ICE_PHY_TYPE_LOW_25GBASE_KR1: + case ICE_PHY_TYPE_LOW_25GBASE_KR_S: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + break; + case ICE_PHY_TYPE_LOW_40GBASE_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseCR4_Full); + break; + case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_40G_XLAUI: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + break; + case ICE_PHY_TYPE_LOW_40GBASE_SR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); + break; + case ICE_PHY_TYPE_LOW_40GBASE_LR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + break; + case ICE_PHY_TYPE_LOW_40GBASE_KR4: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseKR4_Full); + break; + default: + unrecog_phy_low = true; + } + + if (unrecog_phy_low) { + /* if we got here and link is up something bad is afoot */ + netdev_info(netdev, "WARNING: Unrecognized PHY_Low (0x%llx).\n", + (u64)link_info->phy_type_low); } + /* Now that we've worked out everything that could be supported by the + * current PHY type, get what is supported by the NVM and intersect + * them to get what is truly supported + */ + memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings)); + ice_phy_type_to_ethtool(netdev, &cap_ksettings); + ethtool_intersect_link_masks(ks, &cap_ksettings); + + switch (link_info->link_speed) { + case ICE_AQ_LINK_SPEED_40GB: + ks->base.speed = SPEED_40000; + break; + case ICE_AQ_LINK_SPEED_25GB: + ks->base.speed = SPEED_25000; + break; + case ICE_AQ_LINK_SPEED_20GB: + ks->base.speed = SPEED_20000; + break; + case ICE_AQ_LINK_SPEED_10GB: + ks->base.speed = SPEED_10000; + break; + case ICE_AQ_LINK_SPEED_5GB: + ks->base.speed = SPEED_5000; + break; + case ICE_AQ_LINK_SPEED_2500MB: + ks->base.speed = SPEED_2500; + break; + case ICE_AQ_LINK_SPEED_1000MB: + ks->base.speed = SPEED_1000; + break; + case ICE_AQ_LINK_SPEED_100MB: + ks->base.speed = SPEED_100; + break; + default: + netdev_info(netdev, + "WARNING: Unrecognized link_speed (0x%x).\n", + link_info->link_speed); + break; + } + ks->base.duplex = DUPLEX_FULL; +} + +/** + * ice_get_settings_link_down - Get the Link settings when link is down + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + * + * Reports link settings that can be determined when link is down + */ +static void +ice_get_settings_link_down(struct ethtool_link_ksettings *ks, + struct net_device __always_unused *netdev) +{ + /* link is down and the driver needs to fall back on + * supported phy types to figure out what info to display + */ + ice_phy_type_to_ethtool(netdev, ks); + + /* With no link, speed and duplex are unknown */ + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; +} + +/** + * ice_get_link_ksettings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ks: ethtool ksettings + * + * Reports speed/duplex settings based on media_type + */ +static int ice_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_link_status *hw_link_info; + struct ice_vsi *vsi = np->vsi; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + hw_link_info = &vsi->port_info->phy.link_info; + + /* set speed and duplex */ + if (hw_link_info->link_info & ICE_AQ_LINK_UP) + ice_get_settings_link_up(ks, netdev); + else + ice_get_settings_link_down(ks, netdev); + /* set autoneg settings */ - ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ? - AUTONEG_ENABLE : AUTONEG_DISABLE); + ks->base.autoneg = (hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; /* set media type settings */ switch (vsi->port_info->phy.media_type) { @@ -442,6 +857,311 @@ ice_get_link_ksettings(struct net_device *netdev, } /** + * ice_ksettings_find_adv_link_speed - Find advertising link speed + * @ks: ethtool ksettings + */ +static u16 +ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) +{ + u16 adv_link_speed = 0; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 100baseT_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_100MB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseX_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseT_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseKX_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 2500baseT_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 2500baseX_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 5000baseT_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_5GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseT_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseKR_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_10GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseLR_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_10GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseCR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseKR_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_25GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseCR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseSR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseLR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseKR4_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_40GB; + + return adv_link_speed; +} + +/** + * ice_setup_autoneg + * @p: port info + * @ks: ethtool_link_ksettings + * @config: configuration that will be sent down to FW + * @autoneg_enabled: autonegotiation is enabled or not + * @autoneg_changed: will there a change in autonegotiation + * @netdev: network interface device structure + * + * Setup PHY autonegotiation feature + */ +static int +ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks, + struct ice_aqc_set_phy_cfg_data *config, + u8 autoneg_enabled, u8 *autoneg_changed, + struct net_device *netdev) +{ + int err = 0; + + *autoneg_changed = 0; + + /* Check autoneg */ + if (autoneg_enabled == AUTONEG_ENABLE) { + /* If autoneg was not already enabled */ + if (!(p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)) { + /* If autoneg is not supported, return error */ + if (!ethtool_link_ksettings_test_link_mode(ks, + supported, + Autoneg)) { + netdev_info(netdev, "Autoneg not supported on this phy.\n"); + err = -EINVAL; + } else { + /* Autoneg is allowed to change */ + config->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + *autoneg_changed = 1; + } + } + } else { + /* If autoneg is currently enabled */ + if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) { + /* If autoneg is supported 10GBASE_T is the only phy + * that can disable it, so otherwise return error + */ + if (ethtool_link_ksettings_test_link_mode(ks, + supported, + Autoneg)) { + netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); + err = -EINVAL; + } else { + /* Autoneg is allowed to change */ + config->caps &= ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + *autoneg_changed = 1; + } + } + } + + return err; +} + +/** + * ice_set_link_ksettings - Set Speed and Duplex + * @netdev: network interface device structure + * @ks: ethtool ksettings + * + * Set speed/duplex per media_types advertised/forced + */ +static int ice_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0; + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ethtool_link_ksettings safe_ks, copy_ks; + struct ice_aqc_get_phy_caps_data *abilities; + u16 adv_link_speed, curr_link_speed, idx; + struct ice_aqc_set_phy_cfg_data config; + struct ice_pf *pf = np->vsi->back; + struct ice_port_info *p; + u8 autoneg_changed = 0; + enum ice_status status; + u64 phy_type_low; + int err = 0; + bool linkup; + + p = np->vsi->port_info; + + if (!p) + return -EOPNOTSUPP; + + /* Check if this is lan vsi */ + for (idx = 0 ; idx < pf->num_alloc_vsi ; idx++) { + if (pf->vsi[idx]->type == ICE_VSI_PF) { + if (np->vsi != pf->vsi[idx]) + return -EOPNOTSUPP; + break; + } + } + + if (p->phy.media_type != ICE_MEDIA_BASET && + p->phy.media_type != ICE_MEDIA_FIBER && + p->phy.media_type != ICE_MEDIA_BACKPLANE && + p->phy.media_type != ICE_MEDIA_DA && + p->phy.link_info.link_info & ICE_AQ_LINK_UP) + return -EOPNOTSUPP; + + /* copy the ksettings to copy_ks to avoid modifying the original */ + memcpy(©_ks, ks, sizeof(struct ethtool_link_ksettings)); + + /* save autoneg out of ksettings */ + autoneg = copy_ks.base.autoneg; + + memset(&safe_ks, 0, sizeof(safe_ks)); + + /* Get link modes supported by hardware.*/ + ice_phy_type_to_ethtool(netdev, &safe_ks); + + /* and check against modes requested by user. + * Return an error if unsupported mode was set. + */ + if (!bitmap_subset(copy_ks.link_modes.advertising, + safe_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + + /* get our own copy of the bits to check against */ + memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); + safe_ks.base.cmd = copy_ks.base.cmd; + safe_ks.base.link_mode_masks_nwords = + copy_ks.base.link_mode_masks_nwords; + ice_get_link_ksettings(netdev, &safe_ks); + + /* set autoneg back to what it currently is */ + copy_ks.base.autoneg = safe_ks.base.autoneg; + /* we don't compare the speed */ + copy_ks.base.speed = safe_ks.base.speed; + + /* If copy_ks.base and safe_ks.base are not the same now, then they are + * trying to set something that we do not support. + */ + if (memcmp(©_ks.base, &safe_ks.base, + sizeof(struct ethtool_link_settings))) + return -EOPNOTSUPP; + + while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; + usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX); + } + + abilities = devm_kzalloc(&pf->pdev->dev, sizeof(*abilities), + GFP_KERNEL); + if (!abilities) + return -ENOMEM; + + /* Get the current phy config */ + status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities, + NULL); + if (status) { + err = -EAGAIN; + goto done; + } + + /* Copy abilities to config in case autoneg is not set below */ + memset(&config, 0, sizeof(struct ice_aqc_set_phy_cfg_data)); + config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE; + if (abilities->caps & ICE_AQC_PHY_AN_MODE) + config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + + /* Check autoneg */ + err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed, + netdev); + + if (err) + goto done; + + /* Call to get the current link speed */ + p->phy.get_link_info = true; + status = ice_get_link_status(p, &linkup); + if (status) { + err = -EAGAIN; + goto done; + } + + curr_link_speed = p->phy.link_info.link_speed; + adv_link_speed = ice_ksettings_find_adv_link_speed(ks); + + /* If speed didn't get set, set it to what it currently is. + * This is needed because if advertise is 0 (as it is when autoneg + * is disabled) then speed won't get set. + */ + if (!adv_link_speed) + adv_link_speed = curr_link_speed; + + /* Convert the advertise link speeds to their corresponded PHY_TYPE */ + ice_update_phy_type(&phy_type_low, adv_link_speed); + + if (!autoneg_changed && adv_link_speed == curr_link_speed) { + netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); + goto done; + } + + /* copy over the rest of the abilities */ + config.low_power_ctrl = abilities->low_power_ctrl; + config.eee_cap = abilities->eee_cap; + config.eeer_value = abilities->eeer_value; + config.link_fec_opt = abilities->link_fec_options; + + /* save the requested speeds */ + p->phy.link_info.req_speeds = adv_link_speed; + + /* set link and auto negotiation so changes take effect */ + config.caps |= ICE_AQ_PHY_ENA_LINK; + + if (phy_type_low) { + config.phy_type_low = cpu_to_le64(phy_type_low) & + abilities->phy_type_low; + } else { + err = -EAGAIN; + netdev_info(netdev, "Nothing changed. No PHY_TYPE is corresponded to advertised link speed.\n"); + goto done; + } + + /* If link is up put link down */ + if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) { + /* Tell the OS link is going down, the link will go + * back up when fw says it is ready asynchronously + */ + ice_print_link_msg(np->vsi, false); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + + /* make the aq call */ + status = ice_aq_set_phy_cfg(&pf->hw, lport, &config, NULL); + if (status) { + netdev_info(netdev, "Set phy config failed,\n"); + err = -EAGAIN; + } + +done: + devm_kfree(&pf->pdev->dev, abilities); + clear_bit(__ICE_CFG_BUSY, pf->state); + + return err; +} + +/** * ice_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command @@ -478,9 +1198,11 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ring->tx_max_pending = ICE_MAX_NUM_DESC; ring->rx_pending = vsi->rx_rings[0]->count; ring->tx_pending = vsi->tx_rings[0]->count; - ring->rx_mini_pending = ICE_MIN_NUM_DESC; + + /* Rx mini and jumbo rings are not supported */ ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; + ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; } @@ -498,14 +1220,23 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ring->tx_pending < ICE_MIN_NUM_DESC || ring->rx_pending > ICE_MAX_NUM_DESC || ring->rx_pending < ICE_MIN_NUM_DESC) { - netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", ring->tx_pending, ring->rx_pending, - ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC); + ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC, + ICE_REQ_DESC_MULTIPLE); return -EINVAL; } new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE); + if (new_tx_cnt != ring->tx_pending) + netdev_info(netdev, + "Requested Tx descriptor count rounded up to %d\n", + new_tx_cnt); new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE); + if (new_rx_cnt != ring->rx_pending) + netdev_info(netdev, + "Requested Rx descriptor count rounded up to %d\n", + new_rx_cnt); /* if nothing to do return success */ if (new_tx_cnt == vsi->tx_rings[0]->count && @@ -933,6 +1664,7 @@ static int ice_set_rxfh(struct net_device *netdev, const u32 *indir, static const struct ethtool_ops ice_ethtool_ops = { .get_link_ksettings = ice_get_link_ksettings, + .set_link_ksettings = ice_set_link_ksettings, .get_drvinfo = ice_get_drvinfo, .get_regs_len = ice_get_regs_len, .get_regs = ice_get_regs, diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 6076fc87df9d..a6679a9bfd3a 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -6,251 +6,323 @@ #ifndef _ICE_HW_AUTOGEN_H_ #define _ICE_HW_AUTOGEN_H_ -#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) -#define PF_FW_ARQBAH 0x00080180 -#define PF_FW_ARQBAL 0x00080080 -#define PF_FW_ARQH 0x00080380 -#define PF_FW_ARQH_ARQH_S 0 -#define PF_FW_ARQH_ARQH_M ICE_M(0x3FF, PF_FW_ARQH_ARQH_S) -#define PF_FW_ARQLEN 0x00080280 -#define PF_FW_ARQLEN_ARQLEN_S 0 -#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, PF_FW_ARQLEN_ARQLEN_S) -#define PF_FW_ARQLEN_ARQVFE_S 28 -#define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S) -#define PF_FW_ARQLEN_ARQOVFL_S 29 -#define PF_FW_ARQLEN_ARQOVFL_M BIT(PF_FW_ARQLEN_ARQOVFL_S) -#define PF_FW_ARQLEN_ARQCRIT_S 30 -#define PF_FW_ARQLEN_ARQCRIT_M BIT(PF_FW_ARQLEN_ARQCRIT_S) -#define PF_FW_ARQLEN_ARQENABLE_S 31 -#define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S) -#define PF_FW_ARQT 0x00080480 -#define PF_FW_ATQBAH 0x00080100 -#define PF_FW_ATQBAL 0x00080000 -#define PF_FW_ATQH 0x00080300 -#define PF_FW_ATQH_ATQH_S 0 -#define PF_FW_ATQH_ATQH_M ICE_M(0x3FF, PF_FW_ATQH_ATQH_S) -#define PF_FW_ATQLEN 0x00080200 -#define PF_FW_ATQLEN_ATQLEN_S 0 -#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, PF_FW_ATQLEN_ATQLEN_S) -#define PF_FW_ATQLEN_ATQVFE_S 28 -#define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S) -#define PF_FW_ATQLEN_ATQOVFL_S 29 -#define PF_FW_ATQLEN_ATQOVFL_M BIT(PF_FW_ATQLEN_ATQOVFL_S) -#define PF_FW_ATQLEN_ATQCRIT_S 30 -#define PF_FW_ATQLEN_ATQCRIT_M BIT(PF_FW_ATQLEN_ATQCRIT_S) -#define PF_FW_ATQLEN_ATQENABLE_S 31 -#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S) -#define PF_FW_ATQT 0x00080400 - +#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) +#define PF_FW_ARQBAH 0x00080180 +#define PF_FW_ARQBAL 0x00080080 +#define PF_FW_ARQH 0x00080380 +#define PF_FW_ARQH_ARQH_M ICE_M(0x3FF, 0) +#define PF_FW_ARQLEN 0x00080280 +#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0) +#define PF_FW_ARQLEN_ARQVFE_M BIT(28) +#define PF_FW_ARQLEN_ARQOVFL_M BIT(29) +#define PF_FW_ARQLEN_ARQCRIT_M BIT(30) +#define PF_FW_ARQLEN_ARQENABLE_M BIT(31) +#define PF_FW_ARQT 0x00080480 +#define PF_FW_ATQBAH 0x00080100 +#define PF_FW_ATQBAL 0x00080000 +#define PF_FW_ATQH 0x00080300 +#define PF_FW_ATQH_ATQH_M ICE_M(0x3FF, 0) +#define PF_FW_ATQLEN 0x00080200 +#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0) +#define PF_FW_ATQLEN_ATQVFE_M BIT(28) +#define PF_FW_ATQLEN_ATQOVFL_M BIT(29) +#define PF_FW_ATQLEN_ATQCRIT_M BIT(30) +#define PF_FW_ATQLEN_ATQENABLE_M BIT(31) +#define PF_FW_ATQT 0x00080400 +#define PF_MBX_ARQBAH 0x0022E400 +#define PF_MBX_ARQBAL 0x0022E380 +#define PF_MBX_ARQH 0x0022E500 +#define PF_MBX_ARQH_ARQH_M ICE_M(0x3FF, 0) +#define PF_MBX_ARQLEN 0x0022E480 +#define PF_MBX_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0) +#define PF_MBX_ARQLEN_ARQENABLE_M BIT(31) +#define PF_MBX_ARQT 0x0022E580 +#define PF_MBX_ATQBAH 0x0022E180 +#define PF_MBX_ATQBAL 0x0022E100 +#define PF_MBX_ATQH 0x0022E280 +#define PF_MBX_ATQH_ATQH_M ICE_M(0x3FF, 0) +#define PF_MBX_ATQLEN 0x0022E200 +#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0) +#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31) +#define PF_MBX_ATQT 0x0022E300 #define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256)) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0 -#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8 -#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, 8) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16 -#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, 16) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24 -#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, 24) #define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0 -#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S 30 -#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S) +#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, 30) #define GLFLXP_RXDID_FLX_WRD_1(_i) (0x0045c900 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S 0 -#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S 30 -#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S) +#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, 30) #define GLFLXP_RXDID_FLX_WRD_2(_i) (0x0045ca00 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S 0 -#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S 30 -#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S) +#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, 30) #define GLFLXP_RXDID_FLX_WRD_3(_i) (0x0045cb00 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S 0 -#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S 30 -#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S) - -#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4)) -#define QRXFLXP_CNTXT_RXDID_IDX_S 0 -#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, QRXFLXP_CNTXT_RXDID_IDX_S) -#define QRXFLXP_CNTXT_RXDID_PRIO_S 8 -#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, QRXFLXP_CNTXT_RXDID_PRIO_S) -#define QRXFLXP_CNTXT_TS_S 11 -#define QRXFLXP_CNTXT_TS_M BIT(QRXFLXP_CNTXT_TS_S) -#define GLGEN_RSTAT 0x000B8188 -#define GLGEN_RSTAT_DEVSTATE_S 0 -#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, GLGEN_RSTAT_DEVSTATE_S) -#define GLGEN_RSTCTL 0x000B8180 -#define GLGEN_RSTCTL_GRSTDEL_S 0 -#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S) -#define GLGEN_RSTAT_RESET_TYPE_S 2 -#define GLGEN_RSTAT_RESET_TYPE_M ICE_M(0x3, GLGEN_RSTAT_RESET_TYPE_S) -#define GLGEN_RTRIG 0x000B8190 -#define GLGEN_RTRIG_CORER_S 0 -#define GLGEN_RTRIG_CORER_M BIT(GLGEN_RTRIG_CORER_S) -#define GLGEN_RTRIG_GLOBR_S 1 -#define GLGEN_RTRIG_GLOBR_M BIT(GLGEN_RTRIG_GLOBR_S) -#define GLGEN_STAT 0x000B612C -#define PFGEN_CTRL 0x00091000 -#define PFGEN_CTRL_PFSWR_S 0 -#define PFGEN_CTRL_PFSWR_M BIT(PFGEN_CTRL_PFSWR_S) -#define PFGEN_STATE 0x00088000 -#define PRTGEN_STATUS 0x000B8100 -#define PFHMC_ERRORDATA 0x00520500 -#define PFHMC_ERRORINFO 0x00520400 -#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) -#define GLINT_DYN_CTL_INTENA_S 0 -#define GLINT_DYN_CTL_INTENA_M BIT(GLINT_DYN_CTL_INTENA_S) -#define GLINT_DYN_CTL_CLEARPBA_S 1 -#define GLINT_DYN_CTL_CLEARPBA_M BIT(GLINT_DYN_CTL_CLEARPBA_S) -#define GLINT_DYN_CTL_SWINT_TRIG_S 2 -#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(GLINT_DYN_CTL_SWINT_TRIG_S) -#define GLINT_DYN_CTL_ITR_INDX_S 3 -#define GLINT_DYN_CTL_SW_ITR_INDX_S 25 -#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, GLINT_DYN_CTL_SW_ITR_INDX_S) -#define GLINT_DYN_CTL_INTENA_MSK_S 31 -#define GLINT_DYN_CTL_INTENA_MSK_M BIT(GLINT_DYN_CTL_INTENA_MSK_S) -#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) -#define PFINT_FW_CTL 0x0016C800 -#define PFINT_FW_CTL_MSIX_INDX_S 0 -#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_FW_CTL_MSIX_INDX_S) -#define PFINT_FW_CTL_ITR_INDX_S 11 -#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, PFINT_FW_CTL_ITR_INDX_S) -#define PFINT_FW_CTL_CAUSE_ENA_S 30 -#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) -#define PFINT_OICR 0x0016CA00 -#define PFINT_OICR_ECC_ERR_S 16 -#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) -#define PFINT_OICR_MAL_DETECT_S 19 -#define PFINT_OICR_MAL_DETECT_M BIT(PFINT_OICR_MAL_DETECT_S) -#define PFINT_OICR_GRST_S 20 -#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) -#define PFINT_OICR_PCI_EXCEPTION_S 21 -#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) -#define PFINT_OICR_HMC_ERR_S 26 -#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) -#define PFINT_OICR_PE_CRITERR_S 28 -#define PFINT_OICR_PE_CRITERR_M BIT(PFINT_OICR_PE_CRITERR_S) -#define PFINT_OICR_CTL 0x0016CA80 -#define PFINT_OICR_CTL_MSIX_INDX_S 0 -#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_OICR_CTL_MSIX_INDX_S) -#define PFINT_OICR_CTL_ITR_INDX_S 11 -#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, PFINT_OICR_CTL_ITR_INDX_S) -#define PFINT_OICR_CTL_CAUSE_ENA_S 30 -#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(PFINT_OICR_CTL_CAUSE_ENA_S) -#define PFINT_OICR_ENA 0x0016C900 -#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) -#define QINT_RQCTL_MSIX_INDX_S 0 -#define QINT_RQCTL_ITR_INDX_S 11 -#define QINT_RQCTL_CAUSE_ENA_S 30 -#define QINT_RQCTL_CAUSE_ENA_M BIT(QINT_RQCTL_CAUSE_ENA_S) -#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4)) -#define QINT_TQCTL_MSIX_INDX_S 0 -#define QINT_TQCTL_ITR_INDX_S 11 -#define QINT_TQCTL_CAUSE_ENA_S 30 -#define QINT_TQCTL_CAUSE_ENA_M BIT(QINT_TQCTL_CAUSE_ENA_S) -#define GLLAN_RCTL_0 0x002941F8 -#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) -#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) -#define QRX_CTRL_MAX_INDEX 2047 -#define QRX_CTRL_QENA_REQ_S 0 -#define QRX_CTRL_QENA_REQ_M BIT(QRX_CTRL_QENA_REQ_S) -#define QRX_CTRL_QENA_STAT_S 2 -#define QRX_CTRL_QENA_STAT_M BIT(QRX_CTRL_QENA_STAT_S) -#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4)) -#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4)) -#define GLNVM_FLA 0x000B6108 -#define GLNVM_FLA_LOCKED_S 6 -#define GLNVM_FLA_LOCKED_M BIT(GLNVM_FLA_LOCKED_S) -#define GLNVM_GENS 0x000B6100 -#define GLNVM_GENS_SR_SIZE_S 5 -#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, GLNVM_GENS_SR_SIZE_S) -#define GLNVM_ULD 0x000B6008 -#define GLNVM_ULD_CORER_DONE_S 3 -#define GLNVM_ULD_CORER_DONE_M BIT(GLNVM_ULD_CORER_DONE_S) -#define GLNVM_ULD_GLOBR_DONE_S 4 -#define GLNVM_ULD_GLOBR_DONE_M BIT(GLNVM_ULD_GLOBR_DONE_S) -#define PF_FUNC_RID 0x0009E880 -#define PF_FUNC_RID_FUNC_NUM_S 0 -#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, PF_FUNC_RID_FUNC_NUM_S) -#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) -#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) -#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) -#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) -#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) -#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8)) -#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8)) -#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8)) -#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8)) -#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8)) -#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8)) -#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8)) -#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8)) -#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8)) -#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8)) -#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8)) -#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8)) -#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8)) -#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8)) -#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8)) -#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8)) -#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8)) -#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8)) -#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8)) -#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8)) -#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8)) -#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8)) -#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8)) -#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8)) -#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8)) -#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8)) -#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8)) -#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8)) -#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8)) -#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8)) -#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8)) -#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8)) -#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8)) -#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8)) -#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8)) -#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8)) -#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8)) -#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8)) -#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8)) -#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8)) -#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) -#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8)) -#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) -#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8)) -#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8)) -#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8)) -#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8)) -#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) -#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) -#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8)) -#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) -#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8)) -#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) -#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8)) -#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) -#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8)) -#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) -#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8)) -#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) -#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8)) -#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8)) -#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8)) -#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8)) -#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8)) -#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8)) -#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4)) -#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) -#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8)) -#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) -#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) -#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) -#define VSIQF_HKEY_MAX_INDEX 12 +#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, 30) +#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4)) +#define QRXFLXP_CNTXT_RXDID_IDX_S 0 +#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, 0) +#define QRXFLXP_CNTXT_RXDID_PRIO_S 8 +#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, 8) +#define GLGEN_RSTAT 0x000B8188 +#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0) +#define GLGEN_RSTCTL 0x000B8180 +#define GLGEN_RSTCTL_GRSTDEL_S 0 +#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S) +#define GLGEN_RSTAT_RESET_TYPE_S 2 +#define GLGEN_RSTAT_RESET_TYPE_M ICE_M(0x3, 2) +#define GLGEN_RTRIG 0x000B8190 +#define GLGEN_RTRIG_CORER_M BIT(0) +#define GLGEN_RTRIG_GLOBR_M BIT(1) +#define GLGEN_STAT 0x000B612C +#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4)) +#define PFGEN_CTRL 0x00091000 +#define PFGEN_CTRL_PFSWR_M BIT(0) +#define PFGEN_STATE 0x00088000 +#define PRTGEN_STATUS 0x000B8100 +#define VFGEN_RSTAT(_VF) (0x00074000 + ((_VF) * 4)) +#define VPGEN_VFRSTAT(_VF) (0x00090800 + ((_VF) * 4)) +#define VPGEN_VFRSTAT_VFRD_M BIT(0) +#define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4)) +#define VPGEN_VFRTRIG_VFSWR_M BIT(0) +#define PFHMC_ERRORDATA 0x00520500 +#define PFHMC_ERRORINFO 0x00520400 +#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) +#define GLINT_DYN_CTL_INTENA_M BIT(0) +#define GLINT_DYN_CTL_CLEARPBA_M BIT(1) +#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2) +#define GLINT_DYN_CTL_ITR_INDX_S 3 +#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25) +#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31) +#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) +#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) +#define GLINT_RATE_INTRL_ENA_M BIT(6) +#define GLINT_VECT2FUNC(_INT) (0x00162000 + ((_INT) * 4)) +#define GLINT_VECT2FUNC_VF_NUM_S 0 +#define GLINT_VECT2FUNC_VF_NUM_M ICE_M(0xFF, 0) +#define GLINT_VECT2FUNC_PF_NUM_S 12 +#define GLINT_VECT2FUNC_PF_NUM_M ICE_M(0x7, 12) +#define GLINT_VECT2FUNC_IS_PF_S 16 +#define GLINT_VECT2FUNC_IS_PF_M BIT(16) +#define PFINT_FW_CTL 0x0016C800 +#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) +#define PFINT_FW_CTL_ITR_INDX_S 11 +#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, 11) +#define PFINT_FW_CTL_CAUSE_ENA_M BIT(30) +#define PFINT_MBX_CTL 0x0016B280 +#define PFINT_MBX_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) +#define PFINT_MBX_CTL_ITR_INDX_S 11 +#define PFINT_MBX_CTL_ITR_INDX_M ICE_M(0x3, 11) +#define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30) +#define PFINT_OICR 0x0016CA00 +#define PFINT_OICR_ECC_ERR_M BIT(16) +#define PFINT_OICR_MAL_DETECT_M BIT(19) +#define PFINT_OICR_GRST_M BIT(20) +#define PFINT_OICR_PCI_EXCEPTION_M BIT(21) +#define PFINT_OICR_HMC_ERR_M BIT(26) +#define PFINT_OICR_PE_CRITERR_M BIT(28) +#define PFINT_OICR_VFLR_M BIT(29) +#define PFINT_OICR_CTL 0x0016CA80 +#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) +#define PFINT_OICR_CTL_ITR_INDX_S 11 +#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, 11) +#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(30) +#define PFINT_OICR_ENA 0x0016C900 +#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) +#define QINT_RQCTL_MSIX_INDX_S 0 +#define QINT_RQCTL_ITR_INDX_S 11 +#define QINT_RQCTL_CAUSE_ENA_M BIT(30) +#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4)) +#define QINT_TQCTL_MSIX_INDX_S 0 +#define QINT_TQCTL_ITR_INDX_S 11 +#define QINT_TQCTL_CAUSE_ENA_M BIT(30) +#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4)) +#define VPINT_ALLOC_FIRST_S 0 +#define VPINT_ALLOC_FIRST_M ICE_M(0x7FF, 0) +#define VPINT_ALLOC_LAST_S 12 +#define VPINT_ALLOC_LAST_M ICE_M(0x7FF, 12) +#define VPINT_ALLOC_VALID_M BIT(31) +#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) +#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) +#define QRX_CTRL_MAX_INDEX 2047 +#define QRX_CTRL_QENA_REQ_S 0 +#define QRX_CTRL_QENA_REQ_M BIT(0) +#define QRX_CTRL_QENA_STAT_S 2 +#define QRX_CTRL_QENA_STAT_M BIT(2) +#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4)) +#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4)) +#define QRX_TAIL_MAX_INDEX 2047 +#define QRX_TAIL_TAIL_S 0 +#define QRX_TAIL_TAIL_M ICE_M(0x1FFF, 0) +#define VPLAN_RX_QBASE(_VF) (0x00072000 + ((_VF) * 4)) +#define VPLAN_RX_QBASE_VFFIRSTQ_S 0 +#define VPLAN_RX_QBASE_VFFIRSTQ_M ICE_M(0x7FF, 0) +#define VPLAN_RX_QBASE_VFNUMQ_S 16 +#define VPLAN_RX_QBASE_VFNUMQ_M ICE_M(0xFF, 16) +#define VPLAN_RXQ_MAPENA(_VF) (0x00073000 + ((_VF) * 4)) +#define VPLAN_RXQ_MAPENA_RX_ENA_M BIT(0) +#define VPLAN_TX_QBASE(_VF) (0x001D1800 + ((_VF) * 4)) +#define VPLAN_TX_QBASE_VFFIRSTQ_S 0 +#define VPLAN_TX_QBASE_VFFIRSTQ_M ICE_M(0x3FFF, 0) +#define VPLAN_TX_QBASE_VFNUMQ_S 16 +#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16) +#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4)) +#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0) +#define GL_MDET_RX 0x00294C00 +#define GL_MDET_RX_QNUM_S 0 +#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0) +#define GL_MDET_RX_VF_NUM_S 15 +#define GL_MDET_RX_VF_NUM_M ICE_M(0xFF, 15) +#define GL_MDET_RX_PF_NUM_S 23 +#define GL_MDET_RX_PF_NUM_M ICE_M(0x7, 23) +#define GL_MDET_RX_MAL_TYPE_S 26 +#define GL_MDET_RX_MAL_TYPE_M ICE_M(0x1F, 26) +#define GL_MDET_RX_VALID_M BIT(31) +#define GL_MDET_TX_PQM 0x002D2E00 +#define GL_MDET_TX_PQM_PF_NUM_S 0 +#define GL_MDET_TX_PQM_PF_NUM_M ICE_M(0x7, 0) +#define GL_MDET_TX_PQM_VF_NUM_S 4 +#define GL_MDET_TX_PQM_VF_NUM_M ICE_M(0xFF, 4) +#define GL_MDET_TX_PQM_QNUM_S 12 +#define GL_MDET_TX_PQM_QNUM_M ICE_M(0x3FFF, 12) +#define GL_MDET_TX_PQM_MAL_TYPE_S 26 +#define GL_MDET_TX_PQM_MAL_TYPE_M ICE_M(0x1F, 26) +#define GL_MDET_TX_PQM_VALID_M BIT(31) +#define GL_MDET_TX_TCLAN 0x000FC068 +#define GL_MDET_TX_TCLAN_QNUM_S 0 +#define GL_MDET_TX_TCLAN_QNUM_M ICE_M(0x7FFF, 0) +#define GL_MDET_TX_TCLAN_VF_NUM_S 15 +#define GL_MDET_TX_TCLAN_VF_NUM_M ICE_M(0xFF, 15) +#define GL_MDET_TX_TCLAN_PF_NUM_S 23 +#define GL_MDET_TX_TCLAN_PF_NUM_M ICE_M(0x7, 23) +#define GL_MDET_TX_TCLAN_MAL_TYPE_S 26 +#define GL_MDET_TX_TCLAN_MAL_TYPE_M ICE_M(0x1F, 26) +#define GL_MDET_TX_TCLAN_VALID_M BIT(31) +#define PF_MDET_RX 0x00294280 +#define PF_MDET_RX_VALID_M BIT(0) +#define PF_MDET_TX_PQM 0x002D2C80 +#define PF_MDET_TX_PQM_VALID_M BIT(0) +#define PF_MDET_TX_TCLAN 0x000FC000 +#define PF_MDET_TX_TCLAN_VALID_M BIT(0) +#define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4)) +#define VP_MDET_RX_VALID_M BIT(0) +#define VP_MDET_TX_PQM(_VF) (0x002D2000 + ((_VF) * 4)) +#define VP_MDET_TX_PQM_VALID_M BIT(0) +#define VP_MDET_TX_TCLAN(_VF) (0x000FB800 + ((_VF) * 4)) +#define VP_MDET_TX_TCLAN_VALID_M BIT(0) +#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4)) +#define VP_MDET_TX_TDPU_VALID_M BIT(0) +#define GLNVM_FLA 0x000B6108 +#define GLNVM_FLA_LOCKED_M BIT(6) +#define GLNVM_GENS 0x000B6100 +#define GLNVM_GENS_SR_SIZE_S 5 +#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5) +#define GLNVM_ULD 0x000B6008 +#define GLNVM_ULD_CORER_DONE_M BIT(3) +#define GLNVM_ULD_GLOBR_DONE_M BIT(4) +#define PF_FUNC_RID 0x0009E880 +#define PF_FUNC_RID_FUNC_NUM_S 0 +#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0) +#define PF_PCI_CIAA 0x0009E580 +#define PF_PCI_CIAA_VF_NUM_S 12 +#define PF_PCI_CIAD 0x0009E500 +#define GL_PWR_MODE_CTL 0x000B820C +#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 +#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) +#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) +#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) +#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) +#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) +#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) +#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8)) +#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8)) +#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8)) +#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8)) +#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8)) +#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8)) +#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8)) +#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8)) +#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8)) +#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8)) +#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8)) +#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8)) +#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8)) +#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8)) +#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8)) +#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8)) +#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8)) +#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8)) +#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8)) +#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8)) +#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8)) +#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8)) +#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8)) +#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8)) +#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8)) +#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8)) +#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8)) +#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8)) +#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8)) +#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8)) +#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8)) +#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8)) +#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8)) +#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8)) +#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8)) +#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8)) +#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8)) +#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8)) +#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8)) +#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8)) +#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) +#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8)) +#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) +#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8)) +#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8)) +#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8)) +#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8)) +#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) +#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) +#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8)) +#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) +#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8)) +#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) +#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8)) +#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) +#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8)) +#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) +#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8)) +#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) +#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8)) +#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8)) +#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8)) +#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8)) +#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8)) +#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8)) +#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4)) +#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) +#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8)) +#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) +#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) +#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) +#define VSIQF_HKEY_MAX_INDEX 12 +#define VSIQF_HLUT_MAX_INDEX 15 +#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) +#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) #endif /* _ICE_HW_AUTOGEN_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 068dbc740b76..7d2a66739e3f 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -188,23 +188,25 @@ struct ice_32b_rx_flex_desc_nic { * with a specific metadata (profile 7 reserved for HW) */ enum ice_rxdid { - ICE_RXDID_START = 0, - ICE_RXDID_LEGACY_0 = ICE_RXDID_START, - ICE_RXDID_LEGACY_1, - ICE_RXDID_FLX_START, - ICE_RXDID_FLEX_NIC = ICE_RXDID_FLX_START, - ICE_RXDID_FLX_LAST = 63, - ICE_RXDID_LAST = ICE_RXDID_FLX_LAST + ICE_RXDID_LEGACY_0 = 0, + ICE_RXDID_LEGACY_1 = 1, + ICE_RXDID_FLEX_NIC = 2, + ICE_RXDID_FLEX_NIC_2 = 6, + ICE_RXDID_HW = 7, + ICE_RXDID_LAST = 63, }; /* Receive Flex Descriptor Rx opcode values */ #define ICE_RX_OPC_MDID 0x01 /* Receive Descriptor MDID values */ -#define ICE_RX_MDID_FLOW_ID_LOWER 5 -#define ICE_RX_MDID_FLOW_ID_HIGH 6 -#define ICE_RX_MDID_HASH_LOW 56 -#define ICE_RX_MDID_HASH_HIGH 57 +enum ice_flex_rx_mdid { + ICE_RX_MDID_FLOW_ID_LOWER = 5, + ICE_RX_MDID_FLOW_ID_HIGH, + ICE_RX_MDID_SRC_VSI = 19, + ICE_RX_MDID_HASH_LOW = 56, + ICE_RX_MDID_HASH_HIGH, +}; /* Rx Flag64 packet flag bits */ enum ice_rx_flg64_bits { @@ -416,6 +418,7 @@ struct ice_tlan_ctx { u8 pf_num; u16 vmvf_num; u8 vmvf_type; +#define ICE_TLAN_CTX_VMVF_TYPE_VF 0 #define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1 #define ICE_TLAN_CTX_VMVF_TYPE_PF 2 u16 src_vsi; @@ -471,4 +474,16 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) { return ice_ptype_lkup[ptype]; } + +#define ICE_LINK_SPEED_UNKNOWN 0 +#define ICE_LINK_SPEED_10MBPS 10 +#define ICE_LINK_SPEED_100MBPS 100 +#define ICE_LINK_SPEED_1000MBPS 1000 +#define ICE_LINK_SPEED_2500MBPS 2500 +#define ICE_LINK_SPEED_5000MBPS 5000 +#define ICE_LINK_SPEED_10000MBPS 10000 +#define ICE_LINK_SPEED_20000MBPS 20000 +#define ICE_LINK_SPEED_25000MBPS 25000 +#define ICE_LINK_SPEED_40000MBPS 40000 + #endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c new file mode 100644 index 000000000000..49f1940772ed --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -0,0 +1,2619 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" + +/** + * ice_setup_rx_ctx - Configure a receive ring context + * @ring: The Rx ring to configure + * + * Configure the Rx descriptor ring in RLAN context. + */ +static int ice_setup_rx_ctx(struct ice_ring *ring) +{ + struct ice_vsi *vsi = ring->vsi; + struct ice_hw *hw = &vsi->back->hw; + u32 rxdid = ICE_RXDID_FLEX_NIC; + struct ice_rlan_ctx rlan_ctx; + u32 regval; + u16 pf_q; + int err; + + /* what is RX queue number in global space of 2K Rx queues */ + pf_q = vsi->rxq_map[ring->q_index]; + + /* clear the context structure first */ + memset(&rlan_ctx, 0, sizeof(rlan_ctx)); + + rlan_ctx.base = ring->dma >> 7; + + rlan_ctx.qlen = ring->count; + + /* Receive Packet Data Buffer Size. + * The Packet Data Buffer Size is defined in 128 byte units. + */ + rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; + + /* use 32 byte descriptors */ + rlan_ctx.dsize = 1; + + /* Strip the Ethernet CRC bytes before the packet is posted to host + * memory. + */ + rlan_ctx.crcstrip = 1; + + /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ + rlan_ctx.l2tsel = 1; + + rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; + rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; + rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; + + /* This controls whether VLAN is stripped from inner headers + * The VLAN in the inner L2 header is stripped to the receive + * descriptor if enabled by this flag. + */ + rlan_ctx.showiv = 0; + + /* Max packet size for this queue - must not be set to a larger value + * than 5 x DBUF + */ + rlan_ctx.rxmax = min_t(u16, vsi->max_frame, + ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); + + /* Rx queue threshold in units of 64 */ + rlan_ctx.lrxqthresh = 1; + + /* Enable Flexible Descriptors in the queue context which + * allows this driver to select a specific receive descriptor format + */ + if (vsi->type != ICE_VSI_VF) { + regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); + regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & + QRXFLXP_CNTXT_RXDID_IDX_M; + + /* increasing context priority to pick up profile id; + * default is 0x01; setting to 0x03 to ensure profile + * is programming if prev context is of same priority + */ + regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & + QRXFLXP_CNTXT_RXDID_PRIO_M; + + wr32(hw, QRXFLXP_CNTXT(pf_q), regval); + } + + /* Absolute queue number out of 2K needs to be passed */ + err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); + if (err) { + dev_err(&vsi->back->pdev->dev, + "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", + pf_q, err); + return -EIO; + } + + if (vsi->type == ICE_VSI_VF) + return 0; + + /* init queue specific tail register */ + ring->tail = hw->hw_addr + QRX_TAIL(pf_q); + writel(0, ring->tail); + ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); + + return 0; +} + +/** + * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance + * @ring: The Tx ring to configure + * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized + * @pf_q: queue index in the PF space + * + * Configure the Tx descriptor ring in TLAN context. + */ +static void +ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) +{ + struct ice_vsi *vsi = ring->vsi; + struct ice_hw *hw = &vsi->back->hw; + + tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; + + tlan_ctx->port_num = vsi->port_info->lport; + + /* Transmit Queue Length */ + tlan_ctx->qlen = ring->count; + + /* PF number */ + tlan_ctx->pf_num = hw->pf_id; + + /* queue belongs to a specific VSI type + * VF / VM index should be programmed per vmvf_type setting: + * for vmvf_type = VF, it is VF number between 0-256 + * for vmvf_type = VM, it is VM number between 0-767 + * for PF or EMP this field should be set to zero + */ + switch (vsi->type) { + case ICE_VSI_PF: + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; + break; + case ICE_VSI_VF: + /* Firmware expects vmvf_num to be absolute VF id */ + tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; + break; + default: + return; + } + + /* make sure the context is associated with the right VSI */ + tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); + + tlan_ctx->tso_ena = ICE_TX_LEGACY; + tlan_ctx->tso_qnum = pf_q; + + /* Legacy or Advanced Host Interface: + * 0: Advanced Host Interface + * 1: Legacy Host Interface + */ + tlan_ctx->legacy_int = ICE_TX_LEGACY; +} + +/** + * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled + * @pf: the PF being configured + * @pf_q: the PF queue + * @ena: enable or disable state of the queue + * + * This routine will wait for the given Rx queue of the PF to reach the + * enabled or disabled state. + * Returns -ETIMEDOUT in case of failing to reach the requested state after + * multiple retries; else will return 0 in case of success. + */ +static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) +{ + int i; + + for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { + u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q)); + + if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) + break; + + usleep_range(10, 20); + } + if (i >= ICE_Q_WAIT_RETRY_LIMIT) + return -ETIMEDOUT; + + return 0; +} + +/** + * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings + * @vsi: the VSI being configured + * @ena: start or stop the Rx rings + */ +static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + int i, j, ret = 0; + + for (i = 0; i < vsi->num_rxq; i++) { + int pf_q = vsi->rxq_map[i]; + u32 rx_reg; + + for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) { + rx_reg = rd32(hw, QRX_CTRL(pf_q)); + if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) == + ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1)) + break; + usleep_range(1000, 2000); + } + + /* Skip if the queue is already in the requested state */ + if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) + continue; + + /* turn on/off the queue */ + if (ena) + rx_reg |= QRX_CTRL_QENA_REQ_M; + else + rx_reg &= ~QRX_CTRL_QENA_REQ_M; + wr32(hw, QRX_CTRL(pf_q), rx_reg); + + /* wait for the change to finish */ + ret = ice_pf_rxq_wait(pf, pf_q, ena); + if (ret) { + dev_err(&pf->pdev->dev, + "VSI idx %d Rx ring %d %sable timeout\n", + vsi->idx, pf_q, (ena ? "en" : "dis")); + break; + } + } + + return ret; +} + +/** + * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI + * @vsi: VSI pointer + * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. + * + * On error: returns error code (negative) + * On success: returns 0 + */ +static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors) +{ + struct ice_pf *pf = vsi->back; + + /* allocate memory for both Tx and Rx ring pointers */ + vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, + sizeof(struct ice_ring *), GFP_KERNEL); + if (!vsi->tx_rings) + goto err_txrings; + + vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, + sizeof(struct ice_ring *), GFP_KERNEL); + if (!vsi->rx_rings) + goto err_rxrings; + + if (alloc_qvectors) { + /* allocate memory for q_vector pointers */ + vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, + vsi->num_q_vectors, + sizeof(struct ice_q_vector *), + GFP_KERNEL); + if (!vsi->q_vectors) + goto err_vectors; + } + + return 0; + +err_vectors: + devm_kfree(&pf->pdev->dev, vsi->rx_rings); +err_rxrings: + devm_kfree(&pf->pdev->dev, vsi->tx_rings); +err_txrings: + return -ENOMEM; +} + +/** + * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + */ +static void ice_vsi_set_num_qs(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + + switch (vsi->type) { + case ICE_VSI_PF: + vsi->alloc_txq = pf->num_lan_tx; + vsi->alloc_rxq = pf->num_lan_rx; + vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE); + vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); + break; + case ICE_VSI_VF: + vsi->alloc_txq = pf->num_vf_qps; + vsi->alloc_rxq = pf->num_vf_qps; + /* pf->num_vf_msix includes (VF miscellaneous vector + + * data queue interrupts). Since vsi->num_q_vectors is number + * of queues vectors, subtract 1 from the original vector + * count + */ + vsi->num_q_vectors = pf->num_vf_msix - 1; + break; + default: + dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", + vsi->type); + break; + } +} + +/** + * ice_get_free_slot - get the next non-NULL location index in array + * @array: array to search + * @size: size of the array + * @curr: last known occupied index to be used as a search hint + * + * void * is being used to keep the functionality generic. This lets us use this + * function on any array of pointers. + */ +static int ice_get_free_slot(void *array, int size, int curr) +{ + int **tmp_array = (int **)array; + int next; + + if (curr < (size - 1) && !tmp_array[curr + 1]) { + next = curr + 1; + } else { + int i = 0; + + while ((i < size) && (tmp_array[i])) + i++; + if (i == size) + next = ICE_NO_VSI; + else + next = i; + } + return next; +} + +/** + * ice_vsi_delete - delete a VSI from the switch + * @vsi: pointer to VSI being removed + */ +void ice_vsi_delete(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct ice_vsi_ctx ctxt; + enum ice_status status; + + if (vsi->type == ICE_VSI_VF) + ctxt.vf_num = vsi->vf_id; + ctxt.vsi_num = vsi->vsi_num; + + memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); + + status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL); + if (status) + dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", + vsi->vsi_num); +} + +/** + * ice_vsi_free_arrays - clean up VSI resources + * @vsi: pointer to VSI being cleared + * @free_qvectors: bool to specify if q_vectors should be deallocated + */ +static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors) +{ + struct ice_pf *pf = vsi->back; + + /* free the ring and vector containers */ + if (free_qvectors && vsi->q_vectors) { + devm_kfree(&pf->pdev->dev, vsi->q_vectors); + vsi->q_vectors = NULL; + } + if (vsi->tx_rings) { + devm_kfree(&pf->pdev->dev, vsi->tx_rings); + vsi->tx_rings = NULL; + } + if (vsi->rx_rings) { + devm_kfree(&pf->pdev->dev, vsi->rx_rings); + vsi->rx_rings = NULL; + } +} + +/** + * ice_vsi_clear - clean up and deallocate the provided VSI + * @vsi: pointer to VSI being cleared + * + * This deallocates the VSI's queue resources, removes it from the PF's + * VSI array if necessary, and deallocates the VSI + * + * Returns 0 on success, negative on failure + */ +int ice_vsi_clear(struct ice_vsi *vsi) +{ + struct ice_pf *pf = NULL; + + if (!vsi) + return 0; + + if (!vsi->back) + return -EINVAL; + + pf = vsi->back; + + if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { + dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n", + vsi->idx); + return -EINVAL; + } + + mutex_lock(&pf->sw_mutex); + /* updates the PF for this cleared VSI */ + + pf->vsi[vsi->idx] = NULL; + if (vsi->idx < pf->next_vsi) + pf->next_vsi = vsi->idx; + + ice_vsi_free_arrays(vsi, true); + mutex_unlock(&pf->sw_mutex); + devm_kfree(&pf->pdev->dev, vsi); + + return 0; +} + +/** + * ice_msix_clean_rings - MSIX mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a q_vector + */ +irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) +{ + struct ice_q_vector *q_vector = (struct ice_q_vector *)data; + + if (!q_vector->tx.ring && !q_vector->rx.ring) + return IRQ_HANDLED; + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * ice_vsi_alloc - Allocates the next available struct VSI in the PF + * @pf: board private structure + * @type: type of VSI + * + * returns a pointer to a VSI on success, NULL on failure. + */ +static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) +{ + struct ice_vsi *vsi = NULL; + + /* Need to protect the allocation of the VSIs at the PF level */ + mutex_lock(&pf->sw_mutex); + + /* If we have already allocated our maximum number of VSIs, + * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index + * is available to be populated + */ + if (pf->next_vsi == ICE_NO_VSI) { + dev_dbg(&pf->pdev->dev, "out of VSI slots!\n"); + goto unlock_pf; + } + + vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL); + if (!vsi) + goto unlock_pf; + + vsi->type = type; + vsi->back = pf; + set_bit(__ICE_DOWN, vsi->state); + vsi->idx = pf->next_vsi; + vsi->work_lmt = ICE_DFLT_IRQ_WORK; + + ice_vsi_set_num_qs(vsi); + + switch (vsi->type) { + case ICE_VSI_PF: + if (ice_vsi_alloc_arrays(vsi, true)) + goto err_rings; + + /* Setup default MSIX irq handler for VSI */ + vsi->irq_handler = ice_msix_clean_rings; + break; + case ICE_VSI_VF: + if (ice_vsi_alloc_arrays(vsi, true)) + goto err_rings; + break; + default: + dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); + goto unlock_pf; + } + + /* fill VSI slot in the PF struct */ + pf->vsi[pf->next_vsi] = vsi; + + /* prepare pf->next_vsi for next use */ + pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, + pf->next_vsi); + goto unlock_pf; + +err_rings: + devm_kfree(&pf->pdev->dev, vsi); + vsi = NULL; +unlock_pf: + mutex_unlock(&pf->sw_mutex); + return vsi; +} + +/** + * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI + * @vsi: the VSI getting queues + * + * Return 0 on success and a negative value on error + */ +static int ice_vsi_get_qs_contig(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int offset, ret = 0; + + mutex_lock(&pf->avail_q_mutex); + /* look for contiguous block of queues for Tx */ + offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS, + 0, vsi->alloc_txq, 0); + if (offset < ICE_MAX_TXQS) { + int i; + + bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq); + for (i = 0; i < vsi->alloc_txq; i++) + vsi->txq_map[i] = i + offset; + } else { + ret = -ENOMEM; + vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER; + } + + /* look for contiguous block of queues for Rx */ + offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS, + 0, vsi->alloc_rxq, 0); + if (offset < ICE_MAX_RXQS) { + int i; + + bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq); + for (i = 0; i < vsi->alloc_rxq; i++) + vsi->rxq_map[i] = i + offset; + } else { + ret = -ENOMEM; + vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER; + } + mutex_unlock(&pf->avail_q_mutex); + + return ret; +} + +/** + * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI + * @vsi: the VSI getting queues + * + * Return 0 on success and a negative value on error + */ +static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int i, index = 0; + + mutex_lock(&pf->avail_q_mutex); + + if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) { + for (i = 0; i < vsi->alloc_txq; i++) { + index = find_next_zero_bit(pf->avail_txqs, + ICE_MAX_TXQS, index); + if (index < ICE_MAX_TXQS) { + set_bit(index, pf->avail_txqs); + vsi->txq_map[i] = index; + } else { + goto err_scatter_tx; + } + } + } + + if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) { + for (i = 0; i < vsi->alloc_rxq; i++) { + index = find_next_zero_bit(pf->avail_rxqs, + ICE_MAX_RXQS, index); + if (index < ICE_MAX_RXQS) { + set_bit(index, pf->avail_rxqs); + vsi->rxq_map[i] = index; + } else { + goto err_scatter_rx; + } + } + } + + mutex_unlock(&pf->avail_q_mutex); + return 0; + +err_scatter_rx: + /* unflag any queues we have grabbed (i is failed position) */ + for (index = 0; index < i; index++) { + clear_bit(vsi->rxq_map[index], pf->avail_rxqs); + vsi->rxq_map[index] = 0; + } + i = vsi->alloc_txq; +err_scatter_tx: + /* i is either position of failed attempt or vsi->alloc_txq */ + for (index = 0; index < i; index++) { + clear_bit(vsi->txq_map[index], pf->avail_txqs); + vsi->txq_map[index] = 0; + } + + mutex_unlock(&pf->avail_q_mutex); + return -ENOMEM; +} + +/** + * ice_vsi_get_qs - Assign queues from PF to VSI + * @vsi: the VSI to assign queues to + * + * Returns 0 on success and a negative value on error + */ +static int ice_vsi_get_qs(struct ice_vsi *vsi) +{ + int ret = 0; + + vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; + vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; + + /* NOTE: ice_vsi_get_qs_contig() will set the Rx/Tx mapping + * modes individually to scatter if assigning contiguous queues + * to Rx or Tx fails + */ + ret = ice_vsi_get_qs_contig(vsi); + if (ret < 0) { + if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) + vsi->alloc_txq = max_t(u16, vsi->alloc_txq, + ICE_MAX_SCATTER_TXQS); + if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) + vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq, + ICE_MAX_SCATTER_RXQS); + ret = ice_vsi_get_qs_scatter(vsi); + } + + return ret; +} + +/** + * ice_vsi_put_qs - Release queues from VSI to PF + * @vsi: the VSI that is going to release queues + */ +void ice_vsi_put_qs(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int i; + + mutex_lock(&pf->avail_q_mutex); + + for (i = 0; i < vsi->alloc_txq; i++) { + clear_bit(vsi->txq_map[i], pf->avail_txqs); + vsi->txq_map[i] = ICE_INVAL_Q_INDEX; + } + + for (i = 0; i < vsi->alloc_rxq; i++) { + clear_bit(vsi->rxq_map[i], pf->avail_rxqs); + vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; + } + + mutex_unlock(&pf->avail_q_mutex); +} + +/** + * ice_rss_clean - Delete RSS related VSI structures that hold user inputs + * @vsi: the VSI being removed + */ +static void ice_rss_clean(struct ice_vsi *vsi) +{ + struct ice_pf *pf; + + pf = vsi->back; + + if (vsi->rss_hkey_user) + devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user); + if (vsi->rss_lut_user) + devm_kfree(&pf->pdev->dev, vsi->rss_lut_user); +} + +/** + * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type + * @vsi: the VSI being configured + */ +static void ice_vsi_set_rss_params(struct ice_vsi *vsi) +{ + struct ice_hw_common_caps *cap; + struct ice_pf *pf = vsi->back; + + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + vsi->rss_size = 1; + return; + } + + cap = &pf->hw.func_caps.common_cap; + switch (vsi->type) { + case ICE_VSI_PF: + /* PF VSI will inherit RSS instance of PF */ + vsi->rss_table_size = cap->rss_table_size; + vsi->rss_size = min_t(int, num_online_cpus(), + BIT(cap->rss_table_entry_width)); + vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; + break; + case ICE_VSI_VF: + /* VF VSI will gets a small RSS table + * For VSI_LUT, LUT size should be set to 64 bytes + */ + vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; + vsi->rss_size = min_t(int, num_online_cpus(), + BIT(cap->rss_table_entry_width)); + vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; + break; + default: + dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", + vsi->type); + break; + } +} + +/** + * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI + * @ctxt: the VSI context being set + * + * This initializes a default VSI context for all sections except the Queues. + */ +static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) +{ + u32 table = 0; + + memset(&ctxt->info, 0, sizeof(ctxt->info)); + /* VSI's should be allocated from shared pool */ + ctxt->alloc_from_pool = true; + /* Src pruning enabled by default */ + ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; + /* Traffic from VSI can be sent to LAN */ + ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; + /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy + * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all + * packets untagged/tagged. + */ + ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & + ICE_AQ_VSI_VLAN_MODE_M) >> + ICE_AQ_VSI_VLAN_MODE_S); + /* Have 1:1 UP mapping for both ingress/egress tables */ + table |= ICE_UP_TABLE_TRANSLATE(0, 0); + table |= ICE_UP_TABLE_TRANSLATE(1, 1); + table |= ICE_UP_TABLE_TRANSLATE(2, 2); + table |= ICE_UP_TABLE_TRANSLATE(3, 3); + table |= ICE_UP_TABLE_TRANSLATE(4, 4); + table |= ICE_UP_TABLE_TRANSLATE(5, 5); + table |= ICE_UP_TABLE_TRANSLATE(6, 6); + table |= ICE_UP_TABLE_TRANSLATE(7, 7); + ctxt->info.ingress_table = cpu_to_le32(table); + ctxt->info.egress_table = cpu_to_le32(table); + /* Have 1:1 UP mapping for outer to inner UP table */ + ctxt->info.outer_up_table = cpu_to_le32(table); + /* No Outer tag support outer_tag_flags remains to zero */ +} + +/** + * ice_vsi_setup_q_map - Setup a VSI queue map + * @vsi: the VSI being configured + * @ctxt: VSI context structure + */ +static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) +{ + u16 offset = 0, qmap = 0, numq_tc; + u16 pow = 0, max_rss = 0, qcount; + u16 qcount_tx = vsi->alloc_txq; + u16 qcount_rx = vsi->alloc_rxq; + bool ena_tc0 = false; + int i; + + /* at least TC0 should be enabled by default */ + if (vsi->tc_cfg.numtc) { + if (!(vsi->tc_cfg.ena_tc & BIT(0))) + ena_tc0 = true; + } else { + ena_tc0 = true; + } + + if (ena_tc0) { + vsi->tc_cfg.numtc++; + vsi->tc_cfg.ena_tc |= 1; + } + + numq_tc = qcount_rx / vsi->tc_cfg.numtc; + + /* TC mapping is a function of the number of Rx queues assigned to the + * VSI for each traffic class and the offset of these queues. + * The first 10 bits are for queue offset for TC0, next 4 bits for no:of + * queues allocated to TC0. No:of queues is a power-of-2. + * + * If TC is not enabled, the queue offset is set to 0, and allocate one + * queue, this way, traffic for the given TC will be sent to the default + * queue. + * + * Setup number and offset of Rx queues for all TCs for the VSI + */ + + qcount = numq_tc; + /* qcount will change if RSS is enabled */ + if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { + if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) { + if (vsi->type == ICE_VSI_PF) + max_rss = ICE_MAX_LG_RSS_QS; + else + max_rss = ICE_MAX_SMALL_RSS_QS; + qcount = min_t(int, numq_tc, max_rss); + qcount = min_t(int, qcount, vsi->rss_size); + } + } + + /* find the (rounded up) power-of-2 of qcount */ + pow = order_base_2(qcount); + + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->tc_cfg.ena_tc & BIT(i))) { + /* TC is not enabled */ + vsi->tc_cfg.tc_info[i].qoffset = 0; + vsi->tc_cfg.tc_info[i].qcount = 1; + ctxt->info.tc_mapping[i] = 0; + continue; + } + + /* TC is enabled */ + vsi->tc_cfg.tc_info[i].qoffset = offset; + vsi->tc_cfg.tc_info[i].qcount = qcount; + + qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & + ICE_AQ_VSI_TC_Q_OFFSET_M) | + ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & + ICE_AQ_VSI_TC_Q_NUM_M); + offset += qcount; + ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); + } + + vsi->num_txq = qcount_tx; + vsi->num_rxq = offset; + + if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { + dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); + /* since there is a chance that num_rxq could have been changed + * in the above for loop, make num_txq equal to num_rxq. + */ + vsi->num_txq = vsi->num_rxq; + } + + /* Rx queue mapping */ + ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); + /* q_mapping buffer holds the info for the first queue allocated for + * this VSI in the PF space and also the number of queues associated + * with this VSI. + */ + ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); + ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); +} + +/** + * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI + * @ctxt: the VSI context being set + * @vsi: the VSI being configured + */ +static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) +{ + u8 lut_type, hash_type; + + switch (vsi->type) { + case ICE_VSI_PF: + /* PF VSI will inherit RSS instance of PF */ + lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; + hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; + break; + case ICE_VSI_VF: + /* VF VSI will gets a small RSS table which is a VSI LUT type */ + lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; + hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; + break; + default: + dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", + vsi->type); + return; + } + + ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & + ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | + ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & + ICE_AQ_VSI_Q_OPT_RSS_HASH_M); +} + +/** + * ice_vsi_init - Create and initialize a VSI + * @vsi: the VSI being configured + * + * This initializes a VSI context depending on the VSI type to be added and + * passes it down to the add_vsi aq command to create a new VSI. + */ +static int ice_vsi_init(struct ice_vsi *vsi) +{ + struct ice_vsi_ctx ctxt = { 0 }; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + int ret = 0; + + switch (vsi->type) { + case ICE_VSI_PF: + ctxt.flags = ICE_AQ_VSI_TYPE_PF; + break; + case ICE_VSI_VF: + ctxt.flags = ICE_AQ_VSI_TYPE_VF; + /* VF number here is the absolute VF number (0-255) */ + ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; + break; + default: + return -ENODEV; + } + + ice_set_dflt_vsi_ctx(&ctxt); + /* if the switch is in VEB mode, allow VSI loopback */ + if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) + ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + + /* Set LUT type and HASH type if RSS is enabled */ + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + ice_set_rss_vsi_ctx(&ctxt, vsi); + + ctxt.info.sw_id = vsi->port_info->sw_id; + ice_vsi_setup_q_map(vsi, &ctxt); + + ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL); + if (ret) { + dev_err(&pf->pdev->dev, + "Add VSI failed, err %d\n", ret); + return -EIO; + } + + /* keep context for update VSI operations */ + vsi->info = ctxt.info; + + /* record VSI number returned */ + vsi->vsi_num = ctxt.vsi_num; + + return ret; +} + +/** + * ice_free_q_vector - Free memory allocated for a specific interrupt vector + * @vsi: VSI having the memory freed + * @v_idx: index of the vector to be freed + */ +static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) +{ + struct ice_q_vector *q_vector; + struct ice_ring *ring; + + if (!vsi->q_vectors[v_idx]) { + dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n", + v_idx); + return; + } + q_vector = vsi->q_vectors[v_idx]; + + ice_for_each_ring(ring, q_vector->tx) + ring->q_vector = NULL; + ice_for_each_ring(ring, q_vector->rx) + ring->q_vector = NULL; + + /* only VSI with an associated netdev is set up with NAPI */ + if (vsi->netdev) + netif_napi_del(&q_vector->napi); + + devm_kfree(&vsi->back->pdev->dev, q_vector); + vsi->q_vectors[v_idx] = NULL; +} + +/** + * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors + * @vsi: the VSI having memory freed + */ +void ice_vsi_free_q_vectors(struct ice_vsi *vsi) +{ + int v_idx; + + for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) + ice_free_q_vector(vsi, v_idx); +} + +/** + * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector + * @vsi: the VSI being configured + * @v_idx: index of the vector in the VSI struct + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + */ +static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) +{ + struct ice_pf *pf = vsi->back; + struct ice_q_vector *q_vector; + + /* allocate q_vector */ + q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + q_vector->vsi = vsi; + q_vector->v_idx = v_idx; + if (vsi->type == ICE_VSI_VF) + goto out; + /* only set affinity_mask if the CPU is online */ + if (cpu_online(v_idx)) + cpumask_set_cpu(v_idx, &q_vector->affinity_mask); + + /* This will not be called in the driver load path because the netdev + * will not be created yet. All other cases with register the NAPI + * handler here (i.e. resume, reset/rebuild, etc.) + */ + if (vsi->netdev) + netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, + NAPI_POLL_WEIGHT); + +out: + /* tie q_vector and VSI together */ + vsi->q_vectors[v_idx] = q_vector; + + return 0; +} + +/** + * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors + * @vsi: the VSI being configured + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + */ +static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int v_idx = 0, num_q_vectors; + int err; + + if (vsi->q_vectors[0]) { + dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", + vsi->vsi_num); + return -EEXIST; + } + + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { + num_q_vectors = vsi->num_q_vectors; + } else { + err = -EINVAL; + goto err_out; + } + + for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { + err = ice_vsi_alloc_q_vector(vsi, v_idx); + if (err) + goto err_out; + } + + return 0; + +err_out: + while (v_idx--) + ice_free_q_vector(vsi, v_idx); + + dev_err(&pf->pdev->dev, + "Failed to allocate %d q_vector for VSI %d, ret=%d\n", + vsi->num_q_vectors, vsi->vsi_num, err); + vsi->num_q_vectors = 0; + return err; +} + +/** + * ice_vsi_setup_vector_base - Set up the base vector for the given VSI + * @vsi: ptr to the VSI + * + * This should only be called after ice_vsi_alloc() which allocates the + * corresponding SW VSI structure and initializes num_queue_pairs for the + * newly allocated VSI. + * + * Returns 0 on success or negative on failure + */ +static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int num_q_vectors = 0; + + if (vsi->sw_base_vector || vsi->hw_base_vector) { + dev_dbg(&pf->pdev->dev, "VSI %d has non-zero HW base vector %d or SW base vector %d\n", + vsi->vsi_num, vsi->hw_base_vector, vsi->sw_base_vector); + return -EEXIST; + } + + if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + return -ENOENT; + + switch (vsi->type) { + case ICE_VSI_PF: + num_q_vectors = vsi->num_q_vectors; + /* reserve slots from OS requested IRQs */ + vsi->sw_base_vector = ice_get_res(pf, pf->sw_irq_tracker, + num_q_vectors, vsi->idx); + if (vsi->sw_base_vector < 0) { + dev_err(&pf->pdev->dev, + "Failed to get tracking for %d SW vectors for VSI %d, err=%d\n", + num_q_vectors, vsi->vsi_num, + vsi->sw_base_vector); + return -ENOENT; + } + pf->num_avail_sw_msix -= num_q_vectors; + + /* reserve slots from HW interrupts */ + vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, + num_q_vectors, vsi->idx); + break; + case ICE_VSI_VF: + /* take VF misc vector and data vectors into account */ + num_q_vectors = pf->num_vf_msix; + /* For VF VSI, reserve slots only from HW interrupts */ + vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, + num_q_vectors, vsi->idx); + break; + default: + dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", + vsi->type); + break; + } + + if (vsi->hw_base_vector < 0) { + dev_err(&pf->pdev->dev, + "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n", + num_q_vectors, vsi->vsi_num, vsi->hw_base_vector); + if (vsi->type != ICE_VSI_VF) { + ice_free_res(vsi->back->sw_irq_tracker, + vsi->sw_base_vector, vsi->idx); + pf->num_avail_sw_msix += num_q_vectors; + } + return -ENOENT; + } + + pf->num_avail_hw_msix -= num_q_vectors; + + return 0; +} + +/** + * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI + * @vsi: the VSI having rings deallocated + */ +static void ice_vsi_clear_rings(struct ice_vsi *vsi) +{ + int i; + + if (vsi->tx_rings) { + for (i = 0; i < vsi->alloc_txq; i++) { + if (vsi->tx_rings[i]) { + kfree_rcu(vsi->tx_rings[i], rcu); + vsi->tx_rings[i] = NULL; + } + } + } + if (vsi->rx_rings) { + for (i = 0; i < vsi->alloc_rxq; i++) { + if (vsi->rx_rings[i]) { + kfree_rcu(vsi->rx_rings[i], rcu); + vsi->rx_rings[i] = NULL; + } + } + } +} + +/** + * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI + * @vsi: VSI which is having rings allocated + */ +static int ice_vsi_alloc_rings(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int i; + + /* Allocate tx_rings */ + for (i = 0; i < vsi->alloc_txq; i++) { + struct ice_ring *ring; + + /* allocate with kzalloc(), free with kfree_rcu() */ + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + + if (!ring) + goto err_out; + + ring->q_index = i; + ring->reg_idx = vsi->txq_map[i]; + ring->ring_active = false; + ring->vsi = vsi; + ring->dev = &pf->pdev->dev; + ring->count = vsi->num_desc; + vsi->tx_rings[i] = ring; + } + + /* Allocate rx_rings */ + for (i = 0; i < vsi->alloc_rxq; i++) { + struct ice_ring *ring; + + /* allocate with kzalloc(), free with kfree_rcu() */ + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto err_out; + + ring->q_index = i; + ring->reg_idx = vsi->rxq_map[i]; + ring->ring_active = false; + ring->vsi = vsi; + ring->netdev = vsi->netdev; + ring->dev = &pf->pdev->dev; + ring->count = vsi->num_desc; + vsi->rx_rings[i] = ring; + } + + return 0; + +err_out: + ice_vsi_clear_rings(vsi); + return -ENOMEM; +} + +/** + * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors + * @vsi: the VSI being configured + * + * This function maps descriptor rings to the queue-specific vectors allotted + * through the MSI-X enabling code. On a constrained vector budget, we map Tx + * and Rx rings to the vector as "efficiently" as possible. + */ +static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) +{ + int q_vectors = vsi->num_q_vectors; + int tx_rings_rem, rx_rings_rem; + int v_id; + + /* initially assigning remaining rings count to VSIs num queue value */ + tx_rings_rem = vsi->num_txq; + rx_rings_rem = vsi->num_rxq; + + for (v_id = 0; v_id < q_vectors; v_id++) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; + int tx_rings_per_v, rx_rings_per_v, q_id, q_base; + + /* Tx rings mapping to vector */ + tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); + q_vector->num_ring_tx = tx_rings_per_v; + q_vector->tx.ring = NULL; + q_vector->tx.itr_idx = ICE_TX_ITR; + q_base = vsi->num_txq - tx_rings_rem; + + for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { + struct ice_ring *tx_ring = vsi->tx_rings[q_id]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; + } + tx_rings_rem -= tx_rings_per_v; + + /* Rx rings mapping to vector */ + rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); + q_vector->num_ring_rx = rx_rings_per_v; + q_vector->rx.ring = NULL; + q_vector->rx.itr_idx = ICE_RX_ITR; + q_base = vsi->num_rxq - rx_rings_rem; + + for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { + struct ice_ring *rx_ring = vsi->rx_rings[q_id]; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + } + rx_rings_rem -= rx_rings_per_v; + } +} + +/** + * ice_vsi_manage_rss_lut - disable/enable RSS + * @vsi: the VSI being changed + * @ena: boolean value indicating if this is an enable or disable request + * + * In the event of disable request for RSS, this function will zero out RSS + * LUT, while in the event of enable request for RSS, it will reconfigure RSS + * LUT. + */ +int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) +{ + int err = 0; + u8 *lut; + + lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size, + GFP_KERNEL); + if (!lut) + return -ENOMEM; + + if (ena) { + if (vsi->rss_lut_user) + memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); + else + ice_fill_rss_lut(lut, vsi->rss_table_size, + vsi->rss_size); + } + + err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size); + devm_kfree(&vsi->back->pdev->dev, lut); + return err; +} + +/** + * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI + * @vsi: VSI to be configured + */ +static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) +{ + u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; + struct ice_aqc_get_set_rss_keys *key; + struct ice_pf *pf = vsi->back; + enum ice_status status; + int err = 0; + u8 *lut; + + vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq); + + lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + if (vsi->rss_lut_user) + memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); + else + ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); + + status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut, + vsi->rss_table_size); + + if (status) { + dev_err(&vsi->back->pdev->dev, + "set_rss_lut failed, error %d\n", status); + err = -EIO; + goto ice_vsi_cfg_rss_exit; + } + + key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL); + if (!key) { + err = -ENOMEM; + goto ice_vsi_cfg_rss_exit; + } + + if (vsi->rss_hkey_user) + memcpy(seed, vsi->rss_hkey_user, + ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); + else + netdev_rss_key_fill((void *)seed, + ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); + memcpy(&key->standard_rss_key, seed, + ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); + + status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key); + + if (status) { + dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n", + status); + err = -EIO; + } + + devm_kfree(&pf->pdev->dev, key); +ice_vsi_cfg_rss_exit: + devm_kfree(&pf->pdev->dev, lut); + return err; +} + +/** + * ice_add_mac_to_list - Add a mac address filter entry to the list + * @vsi: the VSI to be forwarded to + * @add_list: pointer to the list which contains MAC filter entries + * @macaddr: the MAC address to be added. + * + * Adds mac address filter entry to the temp list + * + * Returns 0 on success or ENOMEM on failure. + */ +int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, + const u8 *macaddr) +{ + struct ice_fltr_list_entry *tmp; + struct ice_pf *pf = vsi->back; + + tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC); + if (!tmp) + return -ENOMEM; + + tmp->fltr_info.flag = ICE_FLTR_TX; + tmp->fltr_info.src_id = ICE_SRC_ID_VSI; + tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC; + tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; + tmp->fltr_info.vsi_handle = vsi->idx; + ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr); + + INIT_LIST_HEAD(&tmp->list_entry); + list_add(&tmp->list_entry, add_list); + + return 0; +} + +/** + * ice_update_eth_stats - Update VSI-specific ethernet statistics counters + * @vsi: the VSI to be updated + */ +void ice_update_eth_stats(struct ice_vsi *vsi) +{ + struct ice_eth_stats *prev_es, *cur_es; + struct ice_hw *hw = &vsi->back->hw; + u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ + + prev_es = &vsi->eth_stats_prev; + cur_es = &vsi->eth_stats; + + ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->rx_bytes, + &cur_es->rx_bytes); + + ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->rx_unicast, + &cur_es->rx_unicast); + + ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->rx_multicast, + &cur_es->rx_multicast); + + ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->rx_broadcast, + &cur_es->rx_broadcast); + + ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_discards, &cur_es->rx_discards); + + ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->tx_bytes, + &cur_es->tx_bytes); + + ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->tx_unicast, + &cur_es->tx_unicast); + + ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->tx_multicast, + &cur_es->tx_multicast); + + ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->tx_broadcast, + &cur_es->tx_broadcast); + + ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_errors, &cur_es->tx_errors); + + vsi->stat_offsets_loaded = true; +} + +/** + * ice_free_fltr_list - free filter lists helper + * @dev: pointer to the device struct + * @h: pointer to the list head to be freed + * + * Helper function to free filter lists previously created using + * ice_add_mac_to_list + */ +void ice_free_fltr_list(struct device *dev, struct list_head *h) +{ + struct ice_fltr_list_entry *e, *tmp; + + list_for_each_entry_safe(e, tmp, h, list_entry) { + list_del(&e->list_entry); + devm_kfree(dev, e); + } +} + +/** + * ice_vsi_add_vlan - Add VSI membership for given VLAN + * @vsi: the VSI being configured + * @vid: VLAN id to be added + */ +int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) +{ + struct ice_fltr_list_entry *tmp; + struct ice_pf *pf = vsi->back; + LIST_HEAD(tmp_add_list); + enum ice_status status; + int err = 0; + + tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; + tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; + tmp->fltr_info.flag = ICE_FLTR_TX; + tmp->fltr_info.src_id = ICE_SRC_ID_VSI; + tmp->fltr_info.vsi_handle = vsi->idx; + tmp->fltr_info.l_data.vlan.vlan_id = vid; + + INIT_LIST_HEAD(&tmp->list_entry); + list_add(&tmp->list_entry, &tmp_add_list); + + status = ice_add_vlan(&pf->hw, &tmp_add_list); + if (status) { + err = -ENODEV; + dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n", + vid, vsi->vsi_num); + } + + ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + return err; +} + +/** + * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN + * @vsi: the VSI being configured + * @vid: VLAN id to be removed + * + * Returns 0 on success and negative on failure + */ +int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) +{ + struct ice_fltr_list_entry *list; + struct ice_pf *pf = vsi->back; + LIST_HEAD(tmp_add_list); + int status = 0; + + list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); + if (!list) + return -ENOMEM; + + list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; + list->fltr_info.vsi_handle = vsi->idx; + list->fltr_info.fltr_act = ICE_FWD_TO_VSI; + list->fltr_info.l_data.vlan.vlan_id = vid; + list->fltr_info.flag = ICE_FLTR_TX; + list->fltr_info.src_id = ICE_SRC_ID_VSI; + + INIT_LIST_HEAD(&list->list_entry); + list_add(&list->list_entry, &tmp_add_list); + + if (ice_remove_vlan(&pf->hw, &tmp_add_list)) { + dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n", + vid, vsi->vsi_num); + status = -EIO; + } + + ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + return status; +} + +/** + * ice_vsi_cfg_rxqs - Configure the VSI for Rx + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + * Configure the Rx VSI for operation. + */ +int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) +{ + int err = 0; + u16 i; + + if (vsi->type == ICE_VSI_VF) + goto setup_rings; + + if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) + vsi->max_frame = vsi->netdev->mtu + + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + else + vsi->max_frame = ICE_RXBUF_2048; + + vsi->rx_buf_len = ICE_RXBUF_2048; +setup_rings: + /* set up individual rings */ + for (i = 0; i < vsi->num_rxq && !err; i++) + err = ice_setup_rx_ctx(vsi->rx_rings[i]); + + if (err) { + dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n"); + return -EIO; + } + return err; +} + +/** + * ice_vsi_cfg_txqs - Configure the VSI for Tx + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + * Configure the Tx VSI for operation. + */ +int ice_vsi_cfg_txqs(struct ice_vsi *vsi) +{ + struct ice_aqc_add_tx_qgrp *qg_buf; + struct ice_aqc_add_txqs_perq *txq; + struct ice_pf *pf = vsi->back; + enum ice_status status; + u16 buf_len, i, pf_q; + int err = 0, tc = 0; + u8 num_q_grps; + + buf_len = sizeof(struct ice_aqc_add_tx_qgrp); + qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); + if (!qg_buf) + return -ENOMEM; + + if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) { + err = -EINVAL; + goto err_cfg_txqs; + } + qg_buf->num_txqs = 1; + num_q_grps = 1; + + /* set up and configure the Tx queues */ + ice_for_each_txq(vsi, i) { + struct ice_tlan_ctx tlan_ctx = { 0 }; + + pf_q = vsi->txq_map[i]; + ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q); + /* copy context contents into the qg_buf */ + qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); + ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, + ice_tlan_ctx_info); + + /* init queue specific tail reg. It is referred as transmit + * comm scheduler queue doorbell. + */ + vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); + status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, + num_q_grps, qg_buf, buf_len, NULL); + if (status) { + dev_err(&vsi->back->pdev->dev, + "Failed to set LAN Tx queue context, error: %d\n", + status); + err = -ENODEV; + goto err_cfg_txqs; + } + + /* Add Tx Queue TEID into the VSI Tx ring from the response + * This will complete configuring and enabling the queue. + */ + txq = &qg_buf->txqs[0]; + if (pf_q == le16_to_cpu(txq->txq_id)) + vsi->tx_rings[i]->txq_teid = + le32_to_cpu(txq->q_teid); + } +err_cfg_txqs: + devm_kfree(&pf->pdev->dev, qg_buf); + return err; +} + +/** + * ice_intrl_usec_to_reg - convert interrupt rate limit to register value + * @intrl: interrupt rate limit in usecs + * @gran: interrupt rate limit granularity in usecs + * + * This function converts a decimal interrupt rate limit in usecs to the format + * expected by firmware. + */ +static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) +{ + u32 val = intrl / gran; + + if (val) + return val | GLINT_RATE_INTRL_ENA_M; + return 0; +} + +/** + * ice_cfg_itr - configure the initial interrupt throttle values + * @hw: pointer to the HW structure + * @q_vector: interrupt vector that's being configured + * @vector: HW vector index to apply the interrupt throttling to + * + * Configure interrupt throttling values for the ring containers that are + * associated with the interrupt vector passed in. + */ +static void +ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) +{ + u8 itr_gran = hw->itr_gran; + + if (q_vector->num_ring_rx) { + struct ice_ring_container *rc = &q_vector->rx; + + rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran); + rc->latency_range = ICE_LOW_LATENCY; + wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr); + } + + if (q_vector->num_ring_tx) { + struct ice_ring_container *rc = &q_vector->tx; + + rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran); + rc->latency_range = ICE_LOW_LATENCY; + wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr); + } +} + +/** + * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW + * @vsi: the VSI being configured + */ +void ice_vsi_cfg_msix(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + u16 vector = vsi->hw_base_vector; + struct ice_hw *hw = &pf->hw; + u32 txq = 0, rxq = 0; + int i, q; + + for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + struct ice_q_vector *q_vector = vsi->q_vectors[i]; + + ice_cfg_itr(hw, q_vector, vector); + + wr32(hw, GLINT_RATE(vector), + ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); + + /* Both Transmit Queue Interrupt Cause Control register + * and Receive Queue Interrupt Cause control register + * expects MSIX_INDX field to be the vector index + * within the function space and not the absolute + * vector index across PF or across device. + * For SR-IOV VF VSIs queue vector index always starts + * with 1 since first vector index(0) is used for OICR + * in VF space. Since VMDq and other PF VSIs are within + * the PF function space, use the vector index that is + * tracked for this PF. + */ + for (q = 0; q < q_vector->num_ring_tx; q++) { + int itr_idx = q_vector->tx.itr_idx; + u32 val; + + if (vsi->type == ICE_VSI_VF) + val = QINT_TQCTL_CAUSE_ENA_M | + (itr_idx << QINT_TQCTL_ITR_INDX_S) | + ((i + 1) << QINT_TQCTL_MSIX_INDX_S); + else + val = QINT_TQCTL_CAUSE_ENA_M | + (itr_idx << QINT_TQCTL_ITR_INDX_S) | + (vector << QINT_TQCTL_MSIX_INDX_S); + wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); + txq++; + } + + for (q = 0; q < q_vector->num_ring_rx; q++) { + int itr_idx = q_vector->rx.itr_idx; + u32 val; + + if (vsi->type == ICE_VSI_VF) + val = QINT_RQCTL_CAUSE_ENA_M | + (itr_idx << QINT_RQCTL_ITR_INDX_S) | + ((i + 1) << QINT_RQCTL_MSIX_INDX_S); + else + val = QINT_RQCTL_CAUSE_ENA_M | + (itr_idx << QINT_RQCTL_ITR_INDX_S) | + (vector << QINT_RQCTL_MSIX_INDX_S); + wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); + rxq++; + } + } + + ice_flush(hw); +} + +/** + * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx + * @vsi: the VSI being changed + */ +int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) +{ + struct device *dev = &vsi->back->pdev->dev; + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx ctxt = { 0 }; + enum ice_status status; + + /* Here we are configuring the VSI to let the driver add VLAN tags by + * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag + * insertion happens in the Tx hot path, in ice_tx_map. + */ + ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; + + ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); + + status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + if (status) { + dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", + status, hw->adminq.sq_last_status); + return -EIO; + } + + vsi->info.vlan_flags = ctxt.info.vlan_flags; + return 0; +} + +/** + * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx + * @vsi: the VSI being changed + * @ena: boolean value indicating if this is a enable or disable request + */ +int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) +{ + struct device *dev = &vsi->back->pdev->dev; + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx ctxt = { 0 }; + enum ice_status status; + + /* Here we are configuring what the VSI should do with the VLAN tag in + * the Rx packet. We can either leave the tag in the packet or put it in + * the Rx descriptor. + */ + if (ena) { + /* Strip VLAN tag from Rx packet and put it in the desc */ + ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; + } else { + /* Disable stripping. Leave tag in packet */ + ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; + } + + /* Allow all packets untagged/tagged */ + ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; + + ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); + + status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + if (status) { + dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n", + ena, status, hw->adminq.sq_last_status); + return -EIO; + } + + vsi->info.vlan_flags = ctxt.info.vlan_flags; + return 0; +} + +/** + * ice_vsi_start_rx_rings - start VSI's Rx rings + * @vsi: the VSI whose rings are to be started + * + * Returns 0 on success and a negative value on error + */ +int ice_vsi_start_rx_rings(struct ice_vsi *vsi) +{ + return ice_vsi_ctrl_rx_rings(vsi, true); +} + +/** + * ice_vsi_stop_rx_rings - stop VSI's Rx rings + * @vsi: the VSI + * + * Returns 0 on success and a negative value on error + */ +int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) +{ + return ice_vsi_ctrl_rx_rings(vsi, false); +} + +/** + * ice_vsi_stop_tx_rings - Disable Tx rings + * @vsi: the VSI being configured + * @rst_src: reset source + * @rel_vmvf_num: Relative id of VF/VM + */ +int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u32 *q_teids, val; + u16 *q_ids, i; + int err = 0; + + if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) + return -EINVAL; + + q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), + GFP_KERNEL); + if (!q_teids) + return -ENOMEM; + + q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), + GFP_KERNEL); + if (!q_ids) { + err = -ENOMEM; + goto err_alloc_q_ids; + } + + /* set up the Tx queue list to be disabled */ + ice_for_each_txq(vsi, i) { + u16 v_idx; + + if (!vsi->tx_rings || !vsi->tx_rings[i]) { + err = -EINVAL; + goto err_out; + } + + q_ids[i] = vsi->txq_map[i]; + q_teids[i] = vsi->tx_rings[i]->txq_teid; + + /* clear cause_ena bit for disabled queues */ + val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); + val &= ~QINT_TQCTL_CAUSE_ENA_M; + wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); + + /* software is expected to wait for 100 ns */ + ndelay(100); + + /* trigger a software interrupt for the vector associated to + * the queue to schedule NAPI handler + */ + v_idx = vsi->tx_rings[i]->q_vector->v_idx; + wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), + GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); + } + status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, + rst_src, rel_vmvf_num, NULL); + /* if the disable queue command was exercised during an active reset + * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as + * the reset operation disables queues at the hardware level anyway. + */ + if (status == ICE_ERR_RESET_ONGOING) { + dev_info(&pf->pdev->dev, + "Reset in progress. LAN Tx queues already disabled\n"); + } else if (status) { + dev_err(&pf->pdev->dev, + "Failed to disable LAN Tx queues, error: %d\n", + status); + err = -ENODEV; + } + +err_out: + devm_kfree(&pf->pdev->dev, q_ids); + +err_alloc_q_ids: + devm_kfree(&pf->pdev->dev, q_teids); + + return err; +} + +/** + * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI + * @vsi: VSI to enable or disable VLAN pruning on + * @ena: set to true to enable VLAN pruning and false to disable it + * + * returns 0 if VSI is updated, negative otherwise + */ +int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) +{ + struct ice_vsi_ctx *ctxt; + struct device *dev; + int status; + + if (!vsi) + return -EINVAL; + + dev = &vsi->back->pdev->dev; + ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return -ENOMEM; + + ctxt->info = vsi->info; + + if (ena) { + ctxt->info.sec_flags |= + ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S; + ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + } else { + ctxt->info.sec_flags &= + ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); + ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + } + + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID | + ICE_AQ_VSI_PROP_SW_VALID); + + status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL); + if (status) { + netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n", + ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status, + vsi->back->hw.adminq.sq_last_status); + goto err_out; + } + + vsi->info.sec_flags = ctxt->info.sec_flags; + vsi->info.sw_flags2 = ctxt->info.sw_flags2; + + devm_kfree(dev, ctxt); + return 0; + +err_out: + devm_kfree(dev, ctxt); + return -EIO; +} + +/** + * ice_vsi_setup - Set up a VSI by a given type + * @pf: board private structure + * @pi: pointer to the port_info instance + * @type: VSI type + * @vf_id: defines VF id to which this VSI connects. This field is meant to be + * used only for ICE_VSI_VF VSI type. For other VSI types, should + * fill-in ICE_INVAL_VFID as input. + * + * This allocates the sw VSI structure and its queue resources. + * + * Returns pointer to the successfully allocated and configured VSI sw struct on + * success, NULL on failure. + */ +struct ice_vsi * +ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, + enum ice_vsi_type type, u16 vf_id) +{ + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + struct device *dev = &pf->pdev->dev; + struct ice_vsi *vsi; + int ret, i; + + vsi = ice_vsi_alloc(pf, type); + if (!vsi) { + dev_err(dev, "could not allocate VSI\n"); + return NULL; + } + + vsi->port_info = pi; + vsi->vsw = pf->first_sw; + if (vsi->type == ICE_VSI_VF) + vsi->vf_id = vf_id; + + if (ice_vsi_get_qs(vsi)) { + dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", + vsi->idx); + goto unroll_get_qs; + } + + /* set RSS capabilities */ + ice_vsi_set_rss_params(vsi); + + /* create the VSI */ + ret = ice_vsi_init(vsi); + if (ret) + goto unroll_get_qs; + + switch (vsi->type) { + case ICE_VSI_PF: + ret = ice_vsi_alloc_q_vectors(vsi); + if (ret) + goto unroll_vsi_init; + + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto unroll_alloc_q_vector; + + ret = ice_vsi_alloc_rings(vsi); + if (ret) + goto unroll_vector_base; + + ice_vsi_map_rings_to_vectors(vsi); + + /* Do not exit if configuring RSS had an issue, at least + * receive traffic on first queue. Hence no need to capture + * return value + */ + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + ice_vsi_cfg_rss_lut_key(vsi); + break; + case ICE_VSI_VF: + /* VF driver will take care of creating netdev for this type and + * map queues to vectors through Virtchnl, PF driver only + * creates a VSI and corresponding structures for bookkeeping + * purpose + */ + ret = ice_vsi_alloc_q_vectors(vsi); + if (ret) + goto unroll_vsi_init; + + ret = ice_vsi_alloc_rings(vsi); + if (ret) + goto unroll_alloc_q_vector; + + /* Setup Vector base only during VF init phase or when VF asks + * for more vectors than assigned number. In all other cases, + * assign hw_base_vector to the value given earlier. + */ + if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) { + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto unroll_vector_base; + } else { + vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx; + } + pf->q_left_tx -= vsi->alloc_txq; + pf->q_left_rx -= vsi->alloc_rxq; + break; + default: + /* if VSI type is not recognized, clean up the resources and + * exit + */ + goto unroll_vsi_init; + } + + ice_vsi_set_tc_cfg(vsi); + + /* configure VSI nodes based on number of queues and TC's */ + for (i = 0; i < vsi->tc_cfg.numtc; i++) + max_txqs[i] = vsi->num_txq; + + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); + if (ret) { + dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n"); + goto unroll_vector_base; + } + + return vsi; + +unroll_vector_base: + /* reclaim SW interrupts back to the common pool */ + ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); + pf->num_avail_sw_msix += vsi->num_q_vectors; + /* reclaim HW interrupt back to the common pool */ + ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); + pf->num_avail_hw_msix += vsi->num_q_vectors; +unroll_alloc_q_vector: + ice_vsi_free_q_vectors(vsi); +unroll_vsi_init: + ice_vsi_delete(vsi); +unroll_get_qs: + ice_vsi_put_qs(vsi); + pf->q_left_tx += vsi->alloc_txq; + pf->q_left_rx += vsi->alloc_rxq; + ice_vsi_clear(vsi); + + return NULL; +} + +/** + * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW + * @vsi: the VSI being cleaned up + */ +static void ice_vsi_release_msix(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + u16 vector = vsi->hw_base_vector; + struct ice_hw *hw = &pf->hw; + u32 txq = 0; + u32 rxq = 0; + int i, q; + + for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + struct ice_q_vector *q_vector = vsi->q_vectors[i]; + + wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0); + wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0); + for (q = 0; q < q_vector->num_ring_tx; q++) { + wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); + txq++; + } + + for (q = 0; q < q_vector->num_ring_rx; q++) { + wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); + rxq++; + } + } + + ice_flush(hw); +} + +/** + * ice_vsi_free_irq - Free the IRQ association with the OS + * @vsi: the VSI being configured + */ +void ice_vsi_free_irq(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int base = vsi->sw_base_vector; + + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { + int i; + + if (!vsi->q_vectors || !vsi->irqs_ready) + return; + + ice_vsi_release_msix(vsi); + if (vsi->type == ICE_VSI_VF) + return; + + vsi->irqs_ready = false; + for (i = 0; i < vsi->num_q_vectors; i++) { + u16 vector = i + base; + int irq_num; + + irq_num = pf->msix_entries[vector].vector; + + /* free only the irqs that were actually requested */ + if (!vsi->q_vectors[i] || + !(vsi->q_vectors[i]->num_ring_tx || + vsi->q_vectors[i]->num_ring_rx)) + continue; + + /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(irq_num, NULL); + + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(irq_num, NULL); + synchronize_irq(irq_num); + devm_free_irq(&pf->pdev->dev, irq_num, + vsi->q_vectors[i]); + } + } +} + +/** + * ice_vsi_free_tx_rings - Free Tx resources for VSI queues + * @vsi: the VSI having resources freed + */ +void ice_vsi_free_tx_rings(struct ice_vsi *vsi) +{ + int i; + + if (!vsi->tx_rings) + return; + + ice_for_each_txq(vsi, i) + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) + ice_free_tx_ring(vsi->tx_rings[i]); +} + +/** + * ice_vsi_free_rx_rings - Free Rx resources for VSI queues + * @vsi: the VSI having resources freed + */ +void ice_vsi_free_rx_rings(struct ice_vsi *vsi) +{ + int i; + + if (!vsi->rx_rings) + return; + + ice_for_each_rxq(vsi, i) + if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) + ice_free_rx_ring(vsi->rx_rings[i]); +} + +/** + * ice_vsi_close - Shut down a VSI + * @vsi: the VSI being shut down + */ +void ice_vsi_close(struct ice_vsi *vsi) +{ + if (!test_and_set_bit(__ICE_DOWN, vsi->state)) + ice_down(vsi); + + ice_vsi_free_irq(vsi); + ice_vsi_free_tx_rings(vsi); + ice_vsi_free_rx_rings(vsi); +} + +/** + * ice_free_res - free a block of resources + * @res: pointer to the resource + * @index: starting index previously returned by ice_get_res + * @id: identifier to track owner + * + * Returns number of resources freed + */ +int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) +{ + int count = 0; + int i; + + if (!res || index >= res->num_entries) + return -EINVAL; + + id |= ICE_RES_VALID_BIT; + for (i = index; i < res->num_entries && res->list[i] == id; i++) { + res->list[i] = 0; + count++; + } + + return count; +} + +/** + * ice_search_res - Search the tracker for a block of resources + * @res: pointer to the resource + * @needed: size of the block needed + * @id: identifier to track owner + * + * Returns the base item index of the block, or -ENOMEM for error + */ +static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) +{ + int start = res->search_hint; + int end = start; + + if ((start + needed) > res->num_entries) + return -ENOMEM; + + id |= ICE_RES_VALID_BIT; + + do { + /* skip already allocated entries */ + if (res->list[end++] & ICE_RES_VALID_BIT) { + start = end; + if ((start + needed) > res->num_entries) + break; + } + + if (end == (start + needed)) { + int i = start; + + /* there was enough, so assign it to the requestor */ + while (i != end) + res->list[i++] = id; + + if (end == res->num_entries) + end = 0; + + res->search_hint = end; + return start; + } + } while (1); + + return -ENOMEM; +} + +/** + * ice_get_res - get a block of resources + * @pf: board private structure + * @res: pointer to the resource + * @needed: size of the block needed + * @id: identifier to track owner + * + * Returns the base item index of the block, or -ENOMEM for error + * The search_hint trick and lack of advanced fit-finding only works + * because we're highly likely to have all the same sized requests. + * Linear search time and any fragmentation should be minimal. + */ +int +ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) +{ + int ret; + + if (!res || !pf) + return -EINVAL; + + if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { + dev_err(&pf->pdev->dev, + "param err: needed=%d, num_entries = %d id=0x%04x\n", + needed, res->num_entries, id); + return -EINVAL; + } + + /* search based on search_hint */ + ret = ice_search_res(res, needed, id); + + if (ret < 0) { + /* previous search failed. Reset search hint and try again */ + res->search_hint = 0; + ret = ice_search_res(res, needed, id); + } + + return ret; +} + +/** + * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI + * @vsi: the VSI being un-configured + */ +void ice_vsi_dis_irq(struct ice_vsi *vsi) +{ + int base = vsi->sw_base_vector; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + u32 val; + int i; + + /* disable interrupt causation from each queue */ + if (vsi->tx_rings) { + ice_for_each_txq(vsi, i) { + if (vsi->tx_rings[i]) { + u16 reg; + + reg = vsi->tx_rings[i]->reg_idx; + val = rd32(hw, QINT_TQCTL(reg)); + val &= ~QINT_TQCTL_CAUSE_ENA_M; + wr32(hw, QINT_TQCTL(reg), val); + } + } + } + + if (vsi->rx_rings) { + ice_for_each_rxq(vsi, i) { + if (vsi->rx_rings[i]) { + u16 reg; + + reg = vsi->rx_rings[i]->reg_idx; + val = rd32(hw, QINT_RQCTL(reg)); + val &= ~QINT_RQCTL_CAUSE_ENA_M; + wr32(hw, QINT_RQCTL(reg), val); + } + } + } + + /* disable each interrupt */ + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { + for (i = vsi->hw_base_vector; + i < (vsi->num_q_vectors + vsi->hw_base_vector); i++) + wr32(hw, GLINT_DYN_CTL(i), 0); + + ice_flush(hw); + for (i = 0; i < vsi->num_q_vectors; i++) + synchronize_irq(pf->msix_entries[i + base].vector); + } +} + +/** + * ice_vsi_release - Delete a VSI and free its resources + * @vsi: the VSI being removed + * + * Returns 0 on success or < 0 on error + */ +int ice_vsi_release(struct ice_vsi *vsi) +{ + struct ice_pf *pf; + struct ice_vf *vf; + + if (!vsi->back) + return -ENODEV; + pf = vsi->back; + vf = &pf->vf[vsi->vf_id]; + /* do not unregister and free netdevs while driver is in the reset + * recovery pending state. Since reset/rebuild happens through PF + * service task workqueue, its not a good idea to unregister netdev + * that is associated to the PF that is running the work queue items + * currently. This is done to avoid check_flush_dependency() warning + * on this wq + */ + if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) { + unregister_netdev(vsi->netdev); + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } + + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + ice_rss_clean(vsi); + + /* Disable VSI and free resources */ + ice_vsi_dis_irq(vsi); + ice_vsi_close(vsi); + + /* reclaim interrupt vectors back to PF */ + if (vsi->type != ICE_VSI_VF) { + /* reclaim SW interrupts back to the common pool */ + ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, + vsi->idx); + pf->num_avail_sw_msix += vsi->num_q_vectors; + /* reclaim HW interrupts back to the common pool */ + ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, + vsi->idx); + pf->num_avail_hw_msix += vsi->num_q_vectors; + } else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) { + /* Reclaim VF resources back only while freeing all VFs or + * vector reassignment is requested + */ + ice_free_res(vsi->back->hw_irq_tracker, vf->first_vector_idx, + vsi->idx); + pf->num_avail_hw_msix += pf->num_vf_msix; + } + + ice_remove_vsi_fltr(&pf->hw, vsi->idx); + ice_vsi_delete(vsi); + ice_vsi_free_q_vectors(vsi); + ice_vsi_clear_rings(vsi); + + ice_vsi_put_qs(vsi); + pf->q_left_tx += vsi->alloc_txq; + pf->q_left_rx += vsi->alloc_rxq; + + /* retain SW VSI data structure since it is needed to unregister and + * free VSI netdev when PF is not in reset recovery pending state,\ + * for ex: during rmmod. + */ + if (!ice_is_reset_in_progress(pf->state)) + ice_vsi_clear(vsi); + + return 0; +} + +/** + * ice_vsi_rebuild - Rebuild VSI after reset + * @vsi: VSI to be rebuild + * + * Returns 0 on success and negative value on failure + */ +int ice_vsi_rebuild(struct ice_vsi *vsi) +{ + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + int ret, i; + + if (!vsi) + return -EINVAL; + + ice_vsi_free_q_vectors(vsi); + ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); + ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); + vsi->sw_base_vector = 0; + vsi->hw_base_vector = 0; + ice_vsi_clear_rings(vsi); + ice_vsi_free_arrays(vsi, false); + ice_vsi_set_num_qs(vsi); + + /* Initialize VSI struct elements and create VSI in FW */ + ret = ice_vsi_init(vsi); + if (ret < 0) + goto err_vsi; + + ret = ice_vsi_alloc_arrays(vsi, false); + if (ret < 0) + goto err_vsi; + + switch (vsi->type) { + case ICE_VSI_PF: + ret = ice_vsi_alloc_q_vectors(vsi); + if (ret) + goto err_rings; + + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto err_vectors; + + ret = ice_vsi_alloc_rings(vsi); + if (ret) + goto err_vectors; + + ice_vsi_map_rings_to_vectors(vsi); + break; + case ICE_VSI_VF: + ret = ice_vsi_alloc_q_vectors(vsi); + if (ret) + goto err_rings; + + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto err_vectors; + + ret = ice_vsi_alloc_rings(vsi); + if (ret) + goto err_vectors; + + vsi->back->q_left_tx -= vsi->alloc_txq; + vsi->back->q_left_rx -= vsi->alloc_rxq; + break; + default: + break; + } + + ice_vsi_set_tc_cfg(vsi); + + /* configure VSI nodes based on number of queues and TC's */ + for (i = 0; i < vsi->tc_cfg.numtc; i++) + max_txqs[i] = vsi->num_txq; + + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Failed VSI lan queue config\n"); + goto err_vectors; + } + return 0; + +err_vectors: + ice_vsi_free_q_vectors(vsi); +err_rings: + if (vsi->netdev) { + vsi->current_netdev_flags = 0; + unregister_netdev(vsi->netdev); + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } +err_vsi: + ice_vsi_clear(vsi); + set_bit(__ICE_RESET_FAILED, vsi->back->state); + return ret; +} + +/** + * ice_is_reset_in_progress - check for a reset in progress + * @state: pf state field + */ +bool ice_is_reset_in_progress(unsigned long *state) +{ + return test_bit(__ICE_RESET_OICR_RECV, state) || + test_bit(__ICE_PFR_REQ, state) || + test_bit(__ICE_CORER_REQ, state) || + test_bit(__ICE_GLOBR_REQ, state); +} diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h new file mode 100644 index 000000000000..677db40338f5 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_LIB_H_ +#define _ICE_LIB_H_ + +#include "ice.h" + +int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, + const u8 *macaddr); + +void ice_free_fltr_list(struct device *dev, struct list_head *h); + +void ice_update_eth_stats(struct ice_vsi *vsi); + +int ice_vsi_cfg_rxqs(struct ice_vsi *vsi); + +int ice_vsi_cfg_txqs(struct ice_vsi *vsi); + +void ice_vsi_cfg_msix(struct ice_vsi *vsi); + +int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid); + +int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid); + +int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi); + +int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena); + +int ice_vsi_start_rx_rings(struct ice_vsi *vsi); + +int ice_vsi_stop_rx_rings(struct ice_vsi *vsi); + +int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num); + +int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena); + +void ice_vsi_delete(struct ice_vsi *vsi); + +int ice_vsi_clear(struct ice_vsi *vsi); + +struct ice_vsi * +ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, + enum ice_vsi_type type, u16 vf_id); + +int ice_vsi_release(struct ice_vsi *vsi); + +void ice_vsi_close(struct ice_vsi *vsi); + +int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id); + +int +ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id); + +int ice_vsi_rebuild(struct ice_vsi *vsi); + +bool ice_is_reset_in_progress(unsigned long *state); + +void ice_vsi_free_q_vectors(struct ice_vsi *vsi); + +void ice_vsi_put_qs(struct ice_vsi *vsi); + +void ice_vsi_dis_irq(struct ice_vsi *vsi); + +void ice_vsi_free_irq(struct ice_vsi *vsi); + +void ice_vsi_free_rx_rings(struct ice_vsi *vsi); + +void ice_vsi_free_tx_rings(struct ice_vsi *vsi); + +int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); + +int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); + +irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data); +#endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 3f047bb43348..8f61b375e768 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -6,8 +6,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "ice.h" +#include "ice_lib.h" -#define DRV_VERSION "ice-0.7.0-k" +#define DRV_VERSION "0.7.2-k" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" const char ice_drv_ver[] = DRV_VERSION; static const char ice_driver_string[] = DRV_SUMMARY; @@ -15,7 +16,7 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); static int debug = -1; @@ -31,173 +32,84 @@ static const struct net_device_ops ice_netdev_ops; static void ice_pf_dis_all_vsi(struct ice_pf *pf); static void ice_rebuild(struct ice_pf *pf); -static int ice_vsi_release(struct ice_vsi *vsi); + +static void ice_vsi_release_all(struct ice_pf *pf); static void ice_update_vsi_stats(struct ice_vsi *vsi); static void ice_update_pf_stats(struct ice_pf *pf); /** - * ice_get_free_slot - get the next non-NULL location index in array - * @array: array to search - * @size: size of the array - * @curr: last known occupied index to be used as a search hint - * - * void * is being used to keep the functionality generic. This lets us use this - * function on any array of pointers. + * ice_get_tx_pending - returns number of Tx descriptors not processed + * @ring: the ring of descriptors */ -static int ice_get_free_slot(void *array, int size, int curr) +static u32 ice_get_tx_pending(struct ice_ring *ring) { - int **tmp_array = (int **)array; - int next; + u32 head, tail; - if (curr < (size - 1) && !tmp_array[curr + 1]) { - next = curr + 1; - } else { - int i = 0; + head = ring->next_to_clean; + tail = readl(ring->tail); - while ((i < size) && (tmp_array[i])) - i++; - if (i == size) - next = ICE_NO_VSI; - else - next = i; - } - return next; + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + return 0; } /** - * ice_search_res - Search the tracker for a block of resources - * @res: pointer to the resource - * @needed: size of the block needed - * @id: identifier to track owner - * Returns the base item index of the block, or -ENOMEM for error + * ice_check_for_hang_subtask - check for and recover hung queues + * @pf: pointer to PF struct */ -static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) +static void ice_check_for_hang_subtask(struct ice_pf *pf) { - int start = res->search_hint; - int end = start; - - id |= ICE_RES_VALID_BIT; + struct ice_vsi *vsi = NULL; + unsigned int i; + u32 v, v_idx; + int packets; - do { - /* skip already allocated entries */ - if (res->list[end++] & ICE_RES_VALID_BIT) { - start = end; - if ((start + needed) > res->num_entries) - break; + ice_for_each_vsi(pf, v) + if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { + vsi = pf->vsi[v]; + break; } - if (end == (start + needed)) { - int i = start; + if (!vsi || test_bit(__ICE_DOWN, vsi->state)) + return; - /* there was enough, so assign it to the requestor */ - while (i != end) - res->list[i++] = id; + if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) + return; - if (end == res->num_entries) - end = 0; + for (i = 0; i < vsi->num_txq; i++) { + struct ice_ring *tx_ring = vsi->tx_rings[i]; + + if (tx_ring && tx_ring->desc) { + int itr = ICE_ITR_NONE; + + /* If packet counter has not changed the queue is + * likely stalled, so force an interrupt for this + * queue. + * + * prev_pkt would be negative if there was no + * pending work. + */ + packets = tx_ring->stats.pkts & INT_MAX; + if (tx_ring->tx_stats.prev_pkt == packets) { + /* Trigger sw interrupt to revive the queue */ + v_idx = tx_ring->q_vector->v_idx; + wr32(&vsi->back->hw, + GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), + (itr << GLINT_DYN_CTL_ITR_INDX_S) | + GLINT_DYN_CTL_SWINT_TRIG_M | + GLINT_DYN_CTL_INTENA_MSK_M); + continue; + } - res->search_hint = end; - return start; + /* Memory barrier between read of packet count and call + * to ice_get_tx_pending() + */ + smp_rmb(); + tx_ring->tx_stats.prev_pkt = + ice_get_tx_pending(tx_ring) ? packets : -1; } - } while (1); - - return -ENOMEM; -} - -/** - * ice_get_res - get a block of resources - * @pf: board private structure - * @res: pointer to the resource - * @needed: size of the block needed - * @id: identifier to track owner - * - * Returns the base item index of the block, or -ENOMEM for error - * The search_hint trick and lack of advanced fit-finding only works - * because we're highly likely to have all the same sized requests. - * Linear search time and any fragmentation should be minimal. - */ -static int -ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) -{ - int ret; - - if (!res || !pf) - return -EINVAL; - - if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { - dev_err(&pf->pdev->dev, - "param err: needed=%d, num_entries = %d id=0x%04x\n", - needed, res->num_entries, id); - return -EINVAL; - } - - /* search based on search_hint */ - ret = ice_search_res(res, needed, id); - - if (ret < 0) { - /* previous search failed. Reset search hint and try again */ - res->search_hint = 0; - ret = ice_search_res(res, needed, id); } - - return ret; -} - -/** - * ice_free_res - free a block of resources - * @res: pointer to the resource - * @index: starting index previously returned by ice_get_res - * @id: identifier to track owner - * Returns number of resources freed - */ -static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) -{ - int count = 0; - int i; - - if (!res || index >= res->num_entries) - return -EINVAL; - - id |= ICE_RES_VALID_BIT; - for (i = index; i < res->num_entries && res->list[i] == id; i++) { - res->list[i] = 0; - count++; - } - - return count; -} - -/** - * ice_add_mac_to_list - Add a mac address filter entry to the list - * @vsi: the VSI to be forwarded to - * @add_list: pointer to the list which contains MAC filter entries - * @macaddr: the MAC address to be added. - * - * Adds mac address filter entry to the temp list - * - * Returns 0 on success or ENOMEM on failure. - */ -static int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, - const u8 *macaddr) -{ - struct ice_fltr_list_entry *tmp; - struct ice_pf *pf = vsi->back; - - tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC); - if (!tmp) - return -ENOMEM; - - tmp->fltr_info.flag = ICE_FLTR_TX; - tmp->fltr_info.src = vsi->vsi_num; - tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC; - tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; - tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num; - ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr); - - INIT_LIST_HEAD(&tmp->list_entry); - list_add(&tmp->list_entry, add_list); - - return 0; } /** @@ -243,24 +155,6 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) } /** - * ice_free_fltr_list - free filter lists helper - * @dev: pointer to the device struct - * @h: pointer to the list head to be freed - * - * Helper function to free filter lists previously created using - * ice_add_mac_to_list - */ -static void ice_free_fltr_list(struct device *dev, struct list_head *h) -{ - struct ice_fltr_list_entry *e, *tmp; - - list_for_each_entry_safe(e, tmp, h, list_entry) { - list_del(&e->list_entry); - devm_kfree(dev, e); - } -} - -/** * ice_vsi_fltr_changed - check if filter state changed * @vsi: VSI to be checked * @@ -359,7 +253,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); if (vsi->current_netdev_flags & IFF_PROMISC) { /* Apply TX filter rule to get traffic from VMs */ - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true, + status = ice_cfg_dflt_vsi(hw, vsi->idx, true, ICE_FLTR_TX); if (status) { netdev_err(netdev, "Error setting default VSI %i tx rule\n", @@ -369,7 +263,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) goto out_promisc; } /* Apply RX filter rule to get traffic from wire */ - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true, + status = ice_cfg_dflt_vsi(hw, vsi->idx, true, ICE_FLTR_RX); if (status) { netdev_err(netdev, "Error setting default VSI %i rx rule\n", @@ -380,7 +274,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } } else { /* Clear TX filter rule to stop traffic from VMs */ - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false, + status = ice_cfg_dflt_vsi(hw, vsi->idx, false, ICE_FLTR_TX); if (status) { netdev_err(netdev, "Error clearing default VSI %i tx rule\n", @@ -389,8 +283,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) err = -EIO; goto out_promisc; } - /* Clear filter RX to remove traffic from wire */ - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false, + /* Clear RX filter to remove traffic from wire */ + status = ice_cfg_dflt_vsi(hw, vsi->idx, false, ICE_FLTR_RX); if (status) { netdev_err(netdev, "Error clearing default VSI %i rx rule\n", @@ -438,15 +332,6 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf) } /** - * ice_is_reset_recovery_pending - schedule a reset - * @state: pf state field - */ -static bool ice_is_reset_recovery_pending(unsigned long int *state) -{ - return test_bit(__ICE_RESET_RECOVERY_PENDING, state); -} - -/** * ice_prepare_for_reset - prep for the core to reset * @pf: board private structure * @@ -456,23 +341,17 @@ static void ice_prepare_for_reset(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - u32 v; - - ice_for_each_vsi(pf, v) - if (pf->vsi[v]) - ice_remove_vsi_fltr(hw, pf->vsi[v]->vsi_num); - dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); + /* Notify VFs of impending reset */ + if (ice_check_sq_alive(hw, &hw->mailboxq)) + ice_vc_notify_reset(pf); /* disable the VSIs and their queues that are not already DOWN */ - /* pf_dis_all_vsi modifies netdev structures -rtnl_lock needed */ ice_pf_dis_all_vsi(pf); - ice_for_each_vsi(pf, v) - if (pf->vsi[v]) - pf->vsi[v]->vsi_num = 0; - ice_shutdown_all_ctrlq(hw); + + set_bit(__ICE_PREPARED_FOR_RESET, pf->state); } /** @@ -489,27 +368,29 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); WARN_ON(in_interrupt()); - /* PFR is a bit of a special case because it doesn't result in an OICR - * interrupt. So for PFR, we prepare for reset, issue the reset and - * rebuild sequentially. - */ - if (reset_type == ICE_RESET_PFR) { - set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); - ice_prepare_for_reset(pf); - } + ice_prepare_for_reset(pf); /* trigger the reset */ if (ice_reset(hw, reset_type)) { dev_err(dev, "reset %d failed\n", reset_type); set_bit(__ICE_RESET_FAILED, pf->state); - clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + clear_bit(__ICE_RESET_OICR_RECV, pf->state); + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); + clear_bit(__ICE_PFR_REQ, pf->state); + clear_bit(__ICE_CORER_REQ, pf->state); + clear_bit(__ICE_GLOBR_REQ, pf->state); return; } + /* PFR is a bit of a special case because it doesn't result in an OICR + * interrupt. So for PFR, rebuild after the reset and clear the reset- + * associated state bits. + */ if (reset_type == ICE_RESET_PFR) { pf->pfr_count++; ice_rebuild(pf); - clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); + clear_bit(__ICE_PFR_REQ, pf->state); } } @@ -519,48 +400,60 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) */ static void ice_reset_subtask(struct ice_pf *pf) { - enum ice_reset_req reset_type; - - rtnl_lock(); + enum ice_reset_req reset_type = ICE_RESET_INVAL; /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an - * OICR interrupt. The OICR handler (ice_misc_intr) determines what - * type of reset happened and sets __ICE_RESET_RECOVERY_PENDING bit in - * pf->state. So if reset/recovery is pending (as indicated by this bit) - * we do a rebuild and return. + * OICR interrupt. The OICR handler (ice_misc_intr) determines what type + * of reset is pending and sets bits in pf->state indicating the reset + * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set + * prepare for pending reset if not already (for PF software-initiated + * global resets the software should already be prepared for it as + * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated + * by firmware or software on other PFs, that bit is not set so prepare + * for the reset now), poll for reset done, rebuild and return. */ - if (ice_is_reset_recovery_pending(pf->state)) { + if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) { clear_bit(__ICE_GLOBR_RECV, pf->state); clear_bit(__ICE_CORER_RECV, pf->state); - ice_prepare_for_reset(pf); + if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) + ice_prepare_for_reset(pf); /* make sure we are ready to rebuild */ - if (ice_check_reset(&pf->hw)) + if (ice_check_reset(&pf->hw)) { set_bit(__ICE_RESET_FAILED, pf->state); - else + } else { + /* done with reset. start rebuild */ + pf->hw.reset_ongoing = false; ice_rebuild(pf); - clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); - goto unlock; + /* clear bit to resume normal operations, but + * ICE_NEEDS_RESTART bit is set incase rebuild failed + */ + clear_bit(__ICE_RESET_OICR_RECV, pf->state); + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); + clear_bit(__ICE_PFR_REQ, pf->state); + clear_bit(__ICE_CORER_REQ, pf->state); + clear_bit(__ICE_GLOBR_REQ, pf->state); + } + + return; } /* No pending resets to finish processing. Check for new resets */ - if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state)) - reset_type = ICE_RESET_GLOBR; - else if (test_and_clear_bit(__ICE_CORER_REQ, pf->state)) - reset_type = ICE_RESET_CORER; - else if (test_and_clear_bit(__ICE_PFR_REQ, pf->state)) + if (test_bit(__ICE_PFR_REQ, pf->state)) reset_type = ICE_RESET_PFR; - else - goto unlock; + if (test_bit(__ICE_CORER_REQ, pf->state)) + reset_type = ICE_RESET_CORER; + if (test_bit(__ICE_GLOBR_REQ, pf->state)) + reset_type = ICE_RESET_GLOBR; + /* If no valid reset type requested just return */ + if (reset_type == ICE_RESET_INVAL) + return; - /* reset if not already down or resetting */ + /* reset if not already down or busy */ if (!test_bit(__ICE_DOWN, pf->state) && !test_bit(__ICE_CFG_BUSY, pf->state)) { ice_do_reset(pf, reset_type); } - -unlock: - rtnl_unlock(); } /** @@ -772,6 +665,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi) } } + ice_vc_notify_link_state(pf); + return 0; } @@ -822,6 +717,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) cq = &hw->adminq; qtype = "Admin"; break; + case ICE_CTL_Q_MAILBOX: + cq = &hw->mailboxq; + qtype = "Mailbox"; + break; default: dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n", q_type); @@ -903,6 +802,12 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) dev_err(&pf->pdev->dev, "Could not handle link event\n"); break; + case ice_mbx_opc_send_msg_to_pf: + ice_vc_process_vf_msg(pf, &event); + break; + case ice_aqc_opc_fw_logging: + ice_output_fw_log(hw, &event.desc, event.msg_buf); + break; default: dev_dbg(&pf->pdev->dev, "%s Receive Queue unknown event 0x%04x ignored\n", @@ -959,6 +864,28 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf) } /** + * ice_clean_mailboxq_subtask - clean the MailboxQ rings + * @pf: board private structure + */ +static void ice_clean_mailboxq_subtask(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + + if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state)) + return; + + if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) + return; + + clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); + + if (ice_ctrlq_pending(hw, &hw->mailboxq)) + __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); + + ice_flush(hw); +} + +/** * ice_service_task_schedule - schedule the service task to wake up * @pf: board private structure * @@ -966,8 +893,9 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf) */ static void ice_service_task_schedule(struct ice_pf *pf) { - if (!test_bit(__ICE_DOWN, pf->state) && - !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state)) + if (!test_bit(__ICE_SERVICE_DIS, pf->state) && + !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && + !test_bit(__ICE_NEEDS_RESTART, pf->state)) queue_work(ice_wq, &pf->serv_task); } @@ -985,6 +913,22 @@ static void ice_service_task_complete(struct ice_pf *pf) } /** + * ice_service_task_stop - stop service task and cancel works + * @pf: board private structure + */ +static void ice_service_task_stop(struct ice_pf *pf) +{ + set_bit(__ICE_SERVICE_DIS, pf->state); + + if (pf->serv_tmr.function) + del_timer_sync(&pf->serv_tmr); + if (pf->serv_task.func) + cancel_work_sync(&pf->serv_task); + + clear_bit(__ICE_SERVICE_SCHED, pf->state); +} + +/** * ice_service_timer - timer callback to schedule service task * @t: pointer to timer_list */ @@ -997,6 +941,160 @@ static void ice_service_timer(struct timer_list *t) } /** + * ice_handle_mdd_event - handle malicious driver detect event + * @pf: pointer to the PF structure + * + * Called from service task. OICR interrupt handler indicates MDD event + */ +static void ice_handle_mdd_event(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + bool mdd_detected = false; + u32 reg; + int i; + + if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state)) + return; + + /* find what triggered the MDD event */ + reg = rd32(hw, GL_MDET_TX_PQM); + if (reg & GL_MDET_TX_PQM_VALID_M) { + u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> + GL_MDET_TX_PQM_PF_NUM_S; + u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> + GL_MDET_TX_PQM_VF_NUM_S; + u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> + GL_MDET_TX_PQM_MAL_TYPE_S; + u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> + GL_MDET_TX_PQM_QNUM_S); + + if (netif_msg_tx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", + event, queue, pf_num, vf_num); + wr32(hw, GL_MDET_TX_PQM, 0xffffffff); + mdd_detected = true; + } + + reg = rd32(hw, GL_MDET_TX_TCLAN); + if (reg & GL_MDET_TX_TCLAN_VALID_M) { + u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> + GL_MDET_TX_TCLAN_PF_NUM_S; + u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> + GL_MDET_TX_TCLAN_VF_NUM_S; + u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> + GL_MDET_TX_TCLAN_MAL_TYPE_S; + u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> + GL_MDET_TX_TCLAN_QNUM_S); + + if (netif_msg_rx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", + event, queue, pf_num, vf_num); + wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); + mdd_detected = true; + } + + reg = rd32(hw, GL_MDET_RX); + if (reg & GL_MDET_RX_VALID_M) { + u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> + GL_MDET_RX_PF_NUM_S; + u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> + GL_MDET_RX_VF_NUM_S; + u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> + GL_MDET_RX_MAL_TYPE_S; + u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> + GL_MDET_RX_QNUM_S); + + if (netif_msg_rx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", + event, queue, pf_num, vf_num); + wr32(hw, GL_MDET_RX, 0xffffffff); + mdd_detected = true; + } + + if (mdd_detected) { + bool pf_mdd_detected = false; + + reg = rd32(hw, PF_MDET_TX_PQM); + if (reg & PF_MDET_TX_PQM_VALID_M) { + wr32(hw, PF_MDET_TX_PQM, 0xFFFF); + dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + + reg = rd32(hw, PF_MDET_TX_TCLAN); + if (reg & PF_MDET_TX_TCLAN_VALID_M) { + wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); + dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + + reg = rd32(hw, PF_MDET_RX); + if (reg & PF_MDET_RX_VALID_M) { + wr32(hw, PF_MDET_RX, 0xFFFF); + dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + /* Queue belongs to the PF initiate a reset */ + if (pf_mdd_detected) { + set_bit(__ICE_NEEDS_RESTART, pf->state); + ice_service_task_schedule(pf); + } + } + + /* see if one of the VFs needs to be reset */ + for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { + struct ice_vf *vf = &pf->vf[i]; + + reg = rd32(hw, VP_MDET_TX_PQM(i)); + if (reg & VP_MDET_TX_PQM_VALID_M) { + wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + i); + } + + reg = rd32(hw, VP_MDET_TX_TCLAN(i)); + if (reg & VP_MDET_TX_TCLAN_VALID_M) { + wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + i); + } + + reg = rd32(hw, VP_MDET_TX_TDPU(i)); + if (reg & VP_MDET_TX_TDPU_VALID_M) { + wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + i); + } + + reg = rd32(hw, VP_MDET_RX(i)); + if (reg & VP_MDET_RX_VALID_M) { + wr32(hw, VP_MDET_RX(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", + i); + } + + if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) { + dev_info(&pf->pdev->dev, + "Too many MDD events on VF %d, disabled\n", i); + dev_info(&pf->pdev->dev, + "Use PF Control I/F to re-enable the VF\n"); + set_bit(ICE_VF_STATE_DIS, vf->vf_states); + } + } + + /* re-enable MDD interrupt cause */ + clear_bit(__ICE_MDD_EVENT_PENDING, pf->state); + reg = rd32(hw, PFINT_OICR_ENA); + reg |= PFINT_OICR_MAL_DETECT_M; + wr32(hw, PFINT_OICR_ENA, reg); + ice_flush(hw); +} + +/** * ice_service_task - manage and run subtasks * @work: pointer to work_struct contained by the PF struct */ @@ -1010,16 +1108,21 @@ static void ice_service_task(struct work_struct *work) /* process reset requests first */ ice_reset_subtask(pf); - /* bail if a reset/recovery cycle is pending */ - if (ice_is_reset_recovery_pending(pf->state) || - test_bit(__ICE_SUSPENDED, pf->state)) { + /* bail if a reset/recovery cycle is pending or rebuild failed */ + if (ice_is_reset_in_progress(pf->state) || + test_bit(__ICE_SUSPENDED, pf->state) || + test_bit(__ICE_NEEDS_RESTART, pf->state)) { ice_service_task_complete(pf); return; } + ice_check_for_hang_subtask(pf); ice_sync_fltr_subtask(pf); + ice_handle_mdd_event(pf); + ice_process_vflr_event(pf); ice_watchdog_subtask(pf); ice_clean_adminq_subtask(pf); + ice_clean_mailboxq_subtask(pf); /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ ice_service_task_complete(pf); @@ -1029,6 +1132,9 @@ static void ice_service_task(struct work_struct *work) * schedule the service task now. */ if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || + test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || + test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || + test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) || test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) mod_timer(&pf->serv_tmr, jiffies); } @@ -1043,6 +1149,10 @@ static void ice_set_ctrlq_len(struct ice_hw *hw) hw->adminq.num_sq_entries = ICE_AQ_LEN; hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; + hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; + hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; + hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; + hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; } /** @@ -1073,57 +1183,6 @@ static void ice_irq_affinity_notify(struct irq_affinity_notify *notify, static void ice_irq_affinity_release(struct kref __always_unused *ref) {} /** - * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI - * @vsi: the VSI being un-configured - */ -static void ice_vsi_dis_irq(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - int base = vsi->base_vector; - u32 val; - int i; - - /* disable interrupt causation from each queue */ - if (vsi->tx_rings) { - ice_for_each_txq(vsi, i) { - if (vsi->tx_rings[i]) { - u16 reg; - - reg = vsi->tx_rings[i]->reg_idx; - val = rd32(hw, QINT_TQCTL(reg)); - val &= ~QINT_TQCTL_CAUSE_ENA_M; - wr32(hw, QINT_TQCTL(reg), val); - } - } - } - - if (vsi->rx_rings) { - ice_for_each_rxq(vsi, i) { - if (vsi->rx_rings[i]) { - u16 reg; - - reg = vsi->rx_rings[i]->reg_idx; - val = rd32(hw, QINT_RQCTL(reg)); - val &= ~QINT_RQCTL_CAUSE_ENA_M; - wr32(hw, QINT_RQCTL(reg), val); - } - } - } - - /* disable each interrupt */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - for (i = vsi->base_vector; - i < (vsi->num_q_vectors + vsi->base_vector); i++) - wr32(hw, GLINT_DYN_CTL(i), 0); - - ice_flush(hw); - for (i = 0; i < vsi->num_q_vectors; i++) - synchronize_irq(pf->msix_entries[i + base].vector); - } -} - -/** * ice_vsi_ena_irq - Enable IRQ for the given VSI * @vsi: the VSI being configured */ @@ -1144,26 +1203,6 @@ static int ice_vsi_ena_irq(struct ice_vsi *vsi) } /** - * ice_vsi_delete - delete a VSI from the switch - * @vsi: pointer to VSI being removed - */ -static void ice_vsi_delete(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - struct ice_vsi_ctx ctxt; - enum ice_status status; - - ctxt.vsi_num = vsi->vsi_num; - - memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); - - status = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); - if (status) - dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", - vsi->vsi_num); -} - -/** * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI * @vsi: the VSI being configured * @basename: name for the vector @@ -1172,7 +1211,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) { int q_vectors = vsi->num_q_vectors; struct ice_pf *pf = vsi->back; - int base = vsi->base_vector; + int base = vsi->sw_base_vector; int rx_int_idx = 0; int tx_int_idx = 0; int vector, err; @@ -1231,467 +1270,6 @@ free_q_irqs: } /** - * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type - * @vsi: the VSI being configured - */ -static void ice_vsi_set_rss_params(struct ice_vsi *vsi) -{ - struct ice_hw_common_caps *cap; - struct ice_pf *pf = vsi->back; - - if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { - vsi->rss_size = 1; - return; - } - - cap = &pf->hw.func_caps.common_cap; - switch (vsi->type) { - case ICE_VSI_PF: - /* PF VSI will inherit RSS instance of PF */ - vsi->rss_table_size = cap->rss_table_size; - vsi->rss_size = min_t(int, num_online_cpus(), - BIT(cap->rss_table_entry_width)); - vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; - break; - default: - dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); - break; - } -} - -/** - * ice_vsi_setup_q_map - Setup a VSI queue map - * @vsi: the VSI being configured - * @ctxt: VSI context structure - */ -static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) -{ - u16 offset = 0, qmap = 0, numq_tc; - u16 pow = 0, max_rss = 0, qcount; - u16 qcount_tx = vsi->alloc_txq; - u16 qcount_rx = vsi->alloc_rxq; - bool ena_tc0 = false; - int i; - - /* at least TC0 should be enabled by default */ - if (vsi->tc_cfg.numtc) { - if (!(vsi->tc_cfg.ena_tc & BIT(0))) - ena_tc0 = true; - } else { - ena_tc0 = true; - } - - if (ena_tc0) { - vsi->tc_cfg.numtc++; - vsi->tc_cfg.ena_tc |= 1; - } - - numq_tc = qcount_rx / vsi->tc_cfg.numtc; - - /* TC mapping is a function of the number of Rx queues assigned to the - * VSI for each traffic class and the offset of these queues. - * The first 10 bits are for queue offset for TC0, next 4 bits for no:of - * queues allocated to TC0. No:of queues is a power-of-2. - * - * If TC is not enabled, the queue offset is set to 0, and allocate one - * queue, this way, traffic for the given TC will be sent to the default - * queue. - * - * Setup number and offset of Rx queues for all TCs for the VSI - */ - - /* qcount will change if RSS is enabled */ - if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { - if (vsi->type == ICE_VSI_PF) - max_rss = ICE_MAX_LG_RSS_QS; - else - max_rss = ICE_MAX_SMALL_RSS_QS; - - qcount = min_t(int, numq_tc, max_rss); - qcount = min_t(int, qcount, vsi->rss_size); - } else { - qcount = numq_tc; - } - - /* find the (rounded up) power-of-2 of qcount */ - pow = order_base_2(qcount); - - for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { - if (!(vsi->tc_cfg.ena_tc & BIT(i))) { - /* TC is not enabled */ - vsi->tc_cfg.tc_info[i].qoffset = 0; - vsi->tc_cfg.tc_info[i].qcount = 1; - ctxt->info.tc_mapping[i] = 0; - continue; - } - - /* TC is enabled */ - vsi->tc_cfg.tc_info[i].qoffset = offset; - vsi->tc_cfg.tc_info[i].qcount = qcount; - - qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & - ICE_AQ_VSI_TC_Q_OFFSET_M) | - ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & - ICE_AQ_VSI_TC_Q_NUM_M); - offset += qcount; - ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); - } - - vsi->num_txq = qcount_tx; - vsi->num_rxq = offset; - - /* Rx queue mapping */ - ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); - /* q_mapping buffer holds the info for the first queue allocated for - * this VSI in the PF space and also the number of queues associated - * with this VSI. - */ - ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); - ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); -} - -/** - * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI - * @ctxt: the VSI context being set - * - * This initializes a default VSI context for all sections except the Queues. - */ -static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) -{ - u32 table = 0; - - memset(&ctxt->info, 0, sizeof(ctxt->info)); - /* VSI's should be allocated from shared pool */ - ctxt->alloc_from_pool = true; - /* Src pruning enabled by default */ - ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; - /* Traffic from VSI can be sent to LAN */ - ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; - - /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy - * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all - * packets untagged/tagged. - */ - ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & - ICE_AQ_VSI_VLAN_MODE_M) >> - ICE_AQ_VSI_VLAN_MODE_S); - - /* Have 1:1 UP mapping for both ingress/egress tables */ - table |= ICE_UP_TABLE_TRANSLATE(0, 0); - table |= ICE_UP_TABLE_TRANSLATE(1, 1); - table |= ICE_UP_TABLE_TRANSLATE(2, 2); - table |= ICE_UP_TABLE_TRANSLATE(3, 3); - table |= ICE_UP_TABLE_TRANSLATE(4, 4); - table |= ICE_UP_TABLE_TRANSLATE(5, 5); - table |= ICE_UP_TABLE_TRANSLATE(6, 6); - table |= ICE_UP_TABLE_TRANSLATE(7, 7); - ctxt->info.ingress_table = cpu_to_le32(table); - ctxt->info.egress_table = cpu_to_le32(table); - /* Have 1:1 UP mapping for outer to inner UP table */ - ctxt->info.outer_up_table = cpu_to_le32(table); - /* No Outer tag support outer_tag_flags remains to zero */ -} - -/** - * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI - * @ctxt: the VSI context being set - * @vsi: the VSI being configured - */ -static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) -{ - u8 lut_type, hash_type; - - switch (vsi->type) { - case ICE_VSI_PF: - /* PF VSI will inherit RSS instance of PF */ - lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; - hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; - break; - default: - dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", - vsi->type); - return; - } - - ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & - ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | - ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & - ICE_AQ_VSI_Q_OPT_RSS_HASH_M); -} - -/** - * ice_vsi_add - Create a new VSI or fetch preallocated VSI - * @vsi: the VSI being configured - * - * This initializes a VSI context depending on the VSI type to be added and - * passes it down to the add_vsi aq command to create a new VSI. - */ -static int ice_vsi_add(struct ice_vsi *vsi) -{ - struct ice_vsi_ctx ctxt = { 0 }; - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - int ret = 0; - - switch (vsi->type) { - case ICE_VSI_PF: - ctxt.flags = ICE_AQ_VSI_TYPE_PF; - break; - default: - return -ENODEV; - } - - ice_set_dflt_vsi_ctx(&ctxt); - /* if the switch is in VEB mode, allow VSI loopback */ - if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) - ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; - - /* Set LUT type and HASH type if RSS is enabled */ - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) - ice_set_rss_vsi_ctx(&ctxt, vsi); - - ctxt.info.sw_id = vsi->port_info->sw_id; - ice_vsi_setup_q_map(vsi, &ctxt); - - ret = ice_aq_add_vsi(hw, &ctxt, NULL); - if (ret) { - dev_err(&vsi->back->pdev->dev, - "Add VSI AQ call failed, err %d\n", ret); - return -EIO; - } - vsi->info = ctxt.info; - vsi->vsi_num = ctxt.vsi_num; - - return ret; -} - -/** - * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW - * @vsi: the VSI being cleaned up - */ -static void ice_vsi_release_msix(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - u16 vector = vsi->base_vector; - struct ice_hw *hw = &pf->hw; - u32 txq = 0; - u32 rxq = 0; - int i, q; - - for (i = 0; i < vsi->num_q_vectors; i++, vector++) { - struct ice_q_vector *q_vector = vsi->q_vectors[i]; - - wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0); - wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0); - for (q = 0; q < q_vector->num_ring_tx; q++) { - wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); - txq++; - } - - for (q = 0; q < q_vector->num_ring_rx; q++) { - wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); - rxq++; - } - } - - ice_flush(hw); -} - -/** - * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI - * @vsi: the VSI having rings deallocated - */ -static void ice_vsi_clear_rings(struct ice_vsi *vsi) -{ - int i; - - if (vsi->tx_rings) { - for (i = 0; i < vsi->alloc_txq; i++) { - if (vsi->tx_rings[i]) { - kfree_rcu(vsi->tx_rings[i], rcu); - vsi->tx_rings[i] = NULL; - } - } - } - if (vsi->rx_rings) { - for (i = 0; i < vsi->alloc_rxq; i++) { - if (vsi->rx_rings[i]) { - kfree_rcu(vsi->rx_rings[i], rcu); - vsi->rx_rings[i] = NULL; - } - } - } -} - -/** - * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI - * @vsi: VSI which is having rings allocated - */ -static int ice_vsi_alloc_rings(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - int i; - - /* Allocate tx_rings */ - for (i = 0; i < vsi->alloc_txq; i++) { - struct ice_ring *ring; - - /* allocate with kzalloc(), free with kfree_rcu() */ - ring = kzalloc(sizeof(*ring), GFP_KERNEL); - - if (!ring) - goto err_out; - - ring->q_index = i; - ring->reg_idx = vsi->txq_map[i]; - ring->ring_active = false; - ring->vsi = vsi; - ring->netdev = vsi->netdev; - ring->dev = &pf->pdev->dev; - ring->count = vsi->num_desc; - - vsi->tx_rings[i] = ring; - } - - /* Allocate rx_rings */ - for (i = 0; i < vsi->alloc_rxq; i++) { - struct ice_ring *ring; - - /* allocate with kzalloc(), free with kfree_rcu() */ - ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (!ring) - goto err_out; - - ring->q_index = i; - ring->reg_idx = vsi->rxq_map[i]; - ring->ring_active = false; - ring->vsi = vsi; - ring->netdev = vsi->netdev; - ring->dev = &pf->pdev->dev; - ring->count = vsi->num_desc; - vsi->rx_rings[i] = ring; - } - - return 0; - -err_out: - ice_vsi_clear_rings(vsi); - return -ENOMEM; -} - -/** - * ice_vsi_free_irq - Free the irq association with the OS - * @vsi: the VSI being configured - */ -static void ice_vsi_free_irq(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - int base = vsi->base_vector; - - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - int i; - - if (!vsi->q_vectors || !vsi->irqs_ready) - return; - - vsi->irqs_ready = false; - for (i = 0; i < vsi->num_q_vectors; i++) { - u16 vector = i + base; - int irq_num; - - irq_num = pf->msix_entries[vector].vector; - - /* free only the irqs that were actually requested */ - if (!vsi->q_vectors[i] || - !(vsi->q_vectors[i]->num_ring_tx || - vsi->q_vectors[i]->num_ring_rx)) - continue; - - /* clear the affinity notifier in the IRQ descriptor */ - irq_set_affinity_notifier(irq_num, NULL); - - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(irq_num, NULL); - synchronize_irq(irq_num); - devm_free_irq(&pf->pdev->dev, irq_num, - vsi->q_vectors[i]); - } - ice_vsi_release_msix(vsi); - } -} - -/** - * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW - * @vsi: the VSI being configured - */ -static void ice_vsi_cfg_msix(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - u16 vector = vsi->base_vector; - struct ice_hw *hw = &pf->hw; - u32 txq = 0, rxq = 0; - int i, q, itr; - u8 itr_gran; - - for (i = 0; i < vsi->num_q_vectors; i++, vector++) { - struct ice_q_vector *q_vector = vsi->q_vectors[i]; - - itr_gran = hw->itr_gran_200; - - if (q_vector->num_ring_rx) { - q_vector->rx.itr = - ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting, - itr_gran); - q_vector->rx.latency_range = ICE_LOW_LATENCY; - } - - if (q_vector->num_ring_tx) { - q_vector->tx.itr = - ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting, - itr_gran); - q_vector->tx.latency_range = ICE_LOW_LATENCY; - } - wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr); - wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr); - - /* Both Transmit Queue Interrupt Cause Control register - * and Receive Queue Interrupt Cause control register - * expects MSIX_INDX field to be the vector index - * within the function space and not the absolute - * vector index across PF or across device. - * For SR-IOV VF VSIs queue vector index always starts - * with 1 since first vector index(0) is used for OICR - * in VF space. Since VMDq and other PF VSIs are withtin - * the PF function space, use the vector index thats - * tracked for this PF. - */ - for (q = 0; q < q_vector->num_ring_tx; q++) { - u32 val; - - itr = ICE_TX_ITR; - val = QINT_TQCTL_CAUSE_ENA_M | - (itr << QINT_TQCTL_ITR_INDX_S) | - (vector << QINT_TQCTL_MSIX_INDX_S); - wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); - txq++; - } - - for (q = 0; q < q_vector->num_ring_rx; q++) { - u32 val; - - itr = ICE_RX_ITR; - val = QINT_RQCTL_CAUSE_ENA_M | - (itr << QINT_RQCTL_ITR_INDX_S) | - (vector << QINT_RQCTL_MSIX_INDX_S); - wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); - rxq++; - } - } - - ice_flush(hw); -} - -/** * ice_ena_misc_vector - enable the non-queue interrupts * @pf: board private structure */ @@ -1708,13 +1286,14 @@ static void ice_ena_misc_vector(struct ice_pf *pf) PFINT_OICR_MAL_DETECT_M | PFINT_OICR_GRST_M | PFINT_OICR_PCI_EXCEPTION_M | + PFINT_OICR_VFLR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_CRITERR_M); wr32(hw, PFINT_OICR_ENA, val); /* SW_ITR_IDX = 0, but don't change INTENA */ - wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), + wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx), GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); } @@ -1731,12 +1310,23 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) u32 oicr, ena_mask; set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); + set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); oicr = rd32(hw, PFINT_OICR); ena_mask = rd32(hw, PFINT_OICR_ENA); + if (oicr & PFINT_OICR_MAL_DETECT_M) { + ena_mask &= ~PFINT_OICR_MAL_DETECT_M; + set_bit(__ICE_MDD_EVENT_PENDING, pf->state); + } + if (oicr & PFINT_OICR_VFLR_M) { + ena_mask &= ~PFINT_OICR_VFLR_M; + set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); + } + if (oicr & PFINT_OICR_GRST_M) { u32 reset; + /* we have a reset warning */ ena_mask &= ~PFINT_OICR_GRST_M; reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> @@ -1746,15 +1336,18 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) pf->corer_count++; else if (reset == ICE_RESET_GLOBR) pf->globr_count++; - else + else if (reset == ICE_RESET_EMPR) pf->empr_count++; + else + dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n", + reset); /* If a reset cycle isn't already in progress, we set a bit in * pf->state so that the service task can start a reset/rebuild. * We also make note of which reset happened so that peer * devices/drivers can be informed. */ - if (!test_bit(__ICE_RESET_RECOVERY_PENDING, pf->state)) { + if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) { if (reset == ICE_RESET_CORER) set_bit(__ICE_CORER_RECV, pf->state); else if (reset == ICE_RESET_GLOBR) @@ -1762,7 +1355,20 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) else set_bit(__ICE_EMPR_RECV, pf->state); - set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + /* There are couple of different bits at play here. + * hw->reset_ongoing indicates whether the hardware is + * in reset. This is set to true when a reset interrupt + * is received and set back to false after the driver + * has determined that the hardware is out of reset. + * + * __ICE_RESET_OICR_RECV in pf->state indicates + * that a post reset rebuild is required before the + * driver is operational again. This is set above. + * + * As this is the start of the reset/rebuild cycle, set + * both to indicate that. + */ + hw->reset_ongoing = true; } } @@ -1803,208 +1409,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) } /** - * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors - * @vsi: the VSI being configured - * - * This function maps descriptor rings to the queue-specific vectors allotted - * through the MSI-X enabling code. On a constrained vector budget, we map Tx - * and Rx rings to the vector as "efficiently" as possible. - */ -static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) -{ - int q_vectors = vsi->num_q_vectors; - int tx_rings_rem, rx_rings_rem; - int v_id; - - /* initially assigning remaining rings count to VSIs num queue value */ - tx_rings_rem = vsi->num_txq; - rx_rings_rem = vsi->num_rxq; - - for (v_id = 0; v_id < q_vectors; v_id++) { - struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; - int tx_rings_per_v, rx_rings_per_v, q_id, q_base; - - /* Tx rings mapping to vector */ - tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); - q_vector->num_ring_tx = tx_rings_per_v; - q_vector->tx.ring = NULL; - q_base = vsi->num_txq - tx_rings_rem; - - for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { - struct ice_ring *tx_ring = vsi->tx_rings[q_id]; - - tx_ring->q_vector = q_vector; - tx_ring->next = q_vector->tx.ring; - q_vector->tx.ring = tx_ring; - } - tx_rings_rem -= tx_rings_per_v; - - /* Rx rings mapping to vector */ - rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); - q_vector->num_ring_rx = rx_rings_per_v; - q_vector->rx.ring = NULL; - q_base = vsi->num_rxq - rx_rings_rem; - - for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { - struct ice_ring *rx_ring = vsi->rx_rings[q_id]; - - rx_ring->q_vector = q_vector; - rx_ring->next = q_vector->rx.ring; - q_vector->rx.ring = rx_ring; - } - rx_rings_rem -= rx_rings_per_v; - } -} - -/** - * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI - * @vsi: the VSI being configured - * - * Return 0 on success and a negative value on error - */ -static void ice_vsi_set_num_qs(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - - switch (vsi->type) { - case ICE_VSI_PF: - vsi->alloc_txq = pf->num_lan_tx; - vsi->alloc_rxq = pf->num_lan_rx; - vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE); - vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); - break; - default: - dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", - vsi->type); - break; - } -} - -/** - * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi - * @vsi: VSI pointer - * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. - * - * On error: returns error code (negative) - * On success: returns 0 - */ -static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors) -{ - struct ice_pf *pf = vsi->back; - - /* allocate memory for both Tx and Rx ring pointers */ - vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, - sizeof(struct ice_ring *), GFP_KERNEL); - if (!vsi->tx_rings) - goto err_txrings; - - vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, - sizeof(struct ice_ring *), GFP_KERNEL); - if (!vsi->rx_rings) - goto err_rxrings; - - if (alloc_qvectors) { - /* allocate memory for q_vector pointers */ - vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, - vsi->num_q_vectors, - sizeof(struct ice_q_vector *), - GFP_KERNEL); - if (!vsi->q_vectors) - goto err_vectors; - } - - return 0; - -err_vectors: - devm_kfree(&pf->pdev->dev, vsi->rx_rings); -err_rxrings: - devm_kfree(&pf->pdev->dev, vsi->tx_rings); -err_txrings: - return -ENOMEM; -} - -/** - * ice_msix_clean_rings - MSIX mode Interrupt Handler - * @irq: interrupt number - * @data: pointer to a q_vector - */ -static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) -{ - struct ice_q_vector *q_vector = (struct ice_q_vector *)data; - - if (!q_vector->tx.ring && !q_vector->rx.ring) - return IRQ_HANDLED; - - napi_schedule(&q_vector->napi); - - return IRQ_HANDLED; -} - -/** - * ice_vsi_alloc - Allocates the next available struct vsi in the PF - * @pf: board private structure - * @type: type of VSI - * - * returns a pointer to a VSI on success, NULL on failure. - */ -static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) -{ - struct ice_vsi *vsi = NULL; - - /* Need to protect the allocation of the VSIs at the PF level */ - mutex_lock(&pf->sw_mutex); - - /* If we have already allocated our maximum number of VSIs, - * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index - * is available to be populated - */ - if (pf->next_vsi == ICE_NO_VSI) { - dev_dbg(&pf->pdev->dev, "out of VSI slots!\n"); - goto unlock_pf; - } - - vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL); - if (!vsi) - goto unlock_pf; - - vsi->type = type; - vsi->back = pf; - set_bit(__ICE_DOWN, vsi->state); - vsi->idx = pf->next_vsi; - vsi->work_lmt = ICE_DFLT_IRQ_WORK; - - ice_vsi_set_num_qs(vsi); - - switch (vsi->type) { - case ICE_VSI_PF: - if (ice_vsi_alloc_arrays(vsi, true)) - goto err_rings; - - /* Setup default MSIX irq handler for VSI */ - vsi->irq_handler = ice_msix_clean_rings; - break; - default: - dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); - goto unlock_pf; - } - - /* fill VSI slot in the PF struct */ - pf->vsi[pf->next_vsi] = vsi; - - /* prepare pf->next_vsi for next use */ - pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, - pf->next_vsi); - goto unlock_pf; - -err_rings: - devm_kfree(&pf->pdev->dev, vsi); - vsi = NULL; -unlock_pf: - mutex_unlock(&pf->sw_mutex); - return vsi; -} - -/** * ice_free_irq_msix_misc - Unroll misc vector setup * @pf: board private structure */ @@ -2015,12 +1419,15 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) ice_flush(&pf->hw); if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { - synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); + synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector); devm_free_irq(&pf->pdev->dev, - pf->msix_entries[pf->oicr_idx].vector, pf); + pf->msix_entries[pf->sw_oicr_idx].vector, pf); } - ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); + pf->num_avail_sw_msix += 1; + ice_free_res(pf->sw_irq_tracker, pf->sw_oicr_idx, ICE_RES_MISC_VEC_ID); + pf->num_avail_hw_msix += 1; + ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID); } /** @@ -2047,42 +1454,61 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) * lost during reset. Note that this function is called only during * rebuild path and not while reset is in progress. */ - if (ice_is_reset_recovery_pending(pf->state)) + if (ice_is_reset_in_progress(pf->state)) goto skip_req_irq; - /* reserve one vector in irq_tracker for misc interrupts */ - oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); + /* reserve one vector in sw_irq_tracker for misc interrupts */ + oicr_idx = ice_get_res(pf, pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); if (oicr_idx < 0) return oicr_idx; - pf->oicr_idx = oicr_idx; + pf->num_avail_sw_msix -= 1; + pf->sw_oicr_idx = oicr_idx; + + /* reserve one vector in hw_irq_tracker for misc interrupts */ + oicr_idx = ice_get_res(pf, pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); + if (oicr_idx < 0) { + ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); + pf->num_avail_sw_msix += 1; + return oicr_idx; + } + pf->num_avail_hw_msix -= 1; + pf->hw_oicr_idx = oicr_idx; err = devm_request_irq(&pf->pdev->dev, - pf->msix_entries[pf->oicr_idx].vector, + pf->msix_entries[pf->sw_oicr_idx].vector, ice_misc_intr, 0, pf->int_name, pf); if (err) { dev_err(&pf->pdev->dev, "devm_request_irq for %s failed: %d\n", pf->int_name, err); - ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); + ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); + pf->num_avail_sw_msix += 1; + ice_free_res(pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); + pf->num_avail_hw_msix += 1; return err; } skip_req_irq: ice_ena_misc_vector(pf); - val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | + val = ((pf->hw_oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | PFINT_OICR_CTL_CAUSE_ENA_M); wr32(hw, PFINT_OICR_CTL, val); /* This enables Admin queue Interrupt causes */ - val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | + val = ((pf->hw_oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | PFINT_FW_CTL_CAUSE_ENA_M); wr32(hw, PFINT_FW_CTL, val); - itr_gran = hw->itr_gran_200; + /* This enables Mailbox queue Interrupt causes */ + val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) | + PFINT_MBX_CTL_CAUSE_ENA_M); + wr32(hw, PFINT_MBX_CTL, val); + + itr_gran = hw->itr_gran; - wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), + wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx), ITR_TO_REG(ICE_ITR_8K, itr_gran)); ice_flush(hw); @@ -2092,209 +1518,43 @@ skip_req_irq: } /** - * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI - * @vsi: the VSI getting queues - * - * Return 0 on success and a negative value on error - */ -static int ice_vsi_get_qs_contig(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - int offset, ret = 0; - - mutex_lock(&pf->avail_q_mutex); - /* look for contiguous block of queues for tx */ - offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS, - 0, vsi->alloc_txq, 0); - if (offset < ICE_MAX_TXQS) { - int i; - - bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq); - for (i = 0; i < vsi->alloc_txq; i++) - vsi->txq_map[i] = i + offset; - } else { - ret = -ENOMEM; - vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER; - } - - /* look for contiguous block of queues for rx */ - offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS, - 0, vsi->alloc_rxq, 0); - if (offset < ICE_MAX_RXQS) { - int i; - - bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq); - for (i = 0; i < vsi->alloc_rxq; i++) - vsi->rxq_map[i] = i + offset; - } else { - ret = -ENOMEM; - vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER; - } - mutex_unlock(&pf->avail_q_mutex); - - return ret; -} - -/** - * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI - * @vsi: the VSI getting queues - * - * Return 0 on success and a negative value on error + * ice_napi_del - Remove NAPI handler for the VSI + * @vsi: VSI for which NAPI handler is to be removed */ -static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi) +static void ice_napi_del(struct ice_vsi *vsi) { - struct ice_pf *pf = vsi->back; - int i, index = 0; - - mutex_lock(&pf->avail_q_mutex); - - if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) { - for (i = 0; i < vsi->alloc_txq; i++) { - index = find_next_zero_bit(pf->avail_txqs, - ICE_MAX_TXQS, index); - if (index < ICE_MAX_TXQS) { - set_bit(index, pf->avail_txqs); - vsi->txq_map[i] = index; - } else { - goto err_scatter_tx; - } - } - } - - if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) { - for (i = 0; i < vsi->alloc_rxq; i++) { - index = find_next_zero_bit(pf->avail_rxqs, - ICE_MAX_RXQS, index); - if (index < ICE_MAX_RXQS) { - set_bit(index, pf->avail_rxqs); - vsi->rxq_map[i] = index; - } else { - goto err_scatter_rx; - } - } - } - - mutex_unlock(&pf->avail_q_mutex); - return 0; + int v_idx; -err_scatter_rx: - /* unflag any queues we have grabbed (i is failed position) */ - for (index = 0; index < i; index++) { - clear_bit(vsi->rxq_map[index], pf->avail_rxqs); - vsi->rxq_map[index] = 0; - } - i = vsi->alloc_txq; -err_scatter_tx: - /* i is either position of failed attempt or vsi->alloc_txq */ - for (index = 0; index < i; index++) { - clear_bit(vsi->txq_map[index], pf->avail_txqs); - vsi->txq_map[index] = 0; - } + if (!vsi->netdev) + return; - mutex_unlock(&pf->avail_q_mutex); - return -ENOMEM; + for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) + netif_napi_del(&vsi->q_vectors[v_idx]->napi); } /** - * ice_vsi_get_qs - Assign queues from PF to VSI - * @vsi: the VSI to assign queues to + * ice_napi_add - register NAPI handler for the VSI + * @vsi: VSI for which NAPI handler is to be registered * - * Returns 0 on success and a negative value on error - */ -static int ice_vsi_get_qs(struct ice_vsi *vsi) -{ - int ret = 0; - - vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; - vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; - - /* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping - * modes individually to scatter if assigning contiguous queues - * to rx or tx fails - */ - ret = ice_vsi_get_qs_contig(vsi); - if (ret < 0) { - if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) - vsi->alloc_txq = max_t(u16, vsi->alloc_txq, - ICE_MAX_SCATTER_TXQS); - if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) - vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq, - ICE_MAX_SCATTER_RXQS); - ret = ice_vsi_get_qs_scatter(vsi); - } - - return ret; -} - -/** - * ice_vsi_put_qs - Release queues from VSI to PF - * @vsi: the VSI thats going to release queues - */ -static void ice_vsi_put_qs(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - int i; - - mutex_lock(&pf->avail_q_mutex); - - for (i = 0; i < vsi->alloc_txq; i++) { - clear_bit(vsi->txq_map[i], pf->avail_txqs); - vsi->txq_map[i] = ICE_INVAL_Q_INDEX; - } - - for (i = 0; i < vsi->alloc_rxq; i++) { - clear_bit(vsi->rxq_map[i], pf->avail_rxqs); - vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; - } - - mutex_unlock(&pf->avail_q_mutex); -} - -/** - * ice_free_q_vector - Free memory allocated for a specific interrupt vector - * @vsi: VSI having the memory freed - * @v_idx: index of the vector to be freed + * This function is only called in the driver's load path. Registering the NAPI + * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, + * reset/rebuild, etc.) */ -static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) +static void ice_napi_add(struct ice_vsi *vsi) { - struct ice_q_vector *q_vector; - struct ice_ring *ring; + int v_idx; - if (!vsi->q_vectors[v_idx]) { - dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n", - v_idx); + if (!vsi->netdev) return; - } - q_vector = vsi->q_vectors[v_idx]; - - ice_for_each_ring(ring, q_vector->tx) - ring->q_vector = NULL; - ice_for_each_ring(ring, q_vector->rx) - ring->q_vector = NULL; - - /* only VSI with an associated netdev is set up with NAPI */ - if (vsi->netdev) - netif_napi_del(&q_vector->napi); - - devm_kfree(&vsi->back->pdev->dev, q_vector); - vsi->q_vectors[v_idx] = NULL; -} - -/** - * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors - * @vsi: the VSI having memory freed - */ -static void ice_vsi_free_q_vectors(struct ice_vsi *vsi) -{ - int v_idx; for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) - ice_free_q_vector(vsi, v_idx); + netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, + ice_napi_poll, NAPI_POLL_WEIGHT); } /** - * ice_cfg_netdev - Setup the netdev flags - * @vsi: the VSI being configured + * ice_cfg_netdev - Allocate, configure and register a netdev + * @vsi: the VSI associated with the new netdev * * Returns 0 on success, negative value on failure */ @@ -2307,6 +1567,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) struct ice_netdev_priv *np; struct net_device *netdev; u8 mac_addr[ETH_ALEN]; + int err; netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv), vsi->alloc_txq, vsi->alloc_rxq); @@ -2364,195 +1625,14 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = ICE_MAX_MTU; - return 0; -} - -/** - * ice_vsi_free_arrays - clean up vsi resources - * @vsi: pointer to VSI being cleared - * @free_qvectors: bool to specify if q_vectors should be deallocated - */ -static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors) -{ - struct ice_pf *pf = vsi->back; - - /* free the ring and vector containers */ - if (free_qvectors && vsi->q_vectors) { - devm_kfree(&pf->pdev->dev, vsi->q_vectors); - vsi->q_vectors = NULL; - } - if (vsi->tx_rings) { - devm_kfree(&pf->pdev->dev, vsi->tx_rings); - vsi->tx_rings = NULL; - } - if (vsi->rx_rings) { - devm_kfree(&pf->pdev->dev, vsi->rx_rings); - vsi->rx_rings = NULL; - } -} - -/** - * ice_vsi_clear - clean up and deallocate the provided vsi - * @vsi: pointer to VSI being cleared - * - * This deallocates the vsi's queue resources, removes it from the PF's - * VSI array if necessary, and deallocates the VSI - * - * Returns 0 on success, negative on failure - */ -static int ice_vsi_clear(struct ice_vsi *vsi) -{ - struct ice_pf *pf = NULL; - - if (!vsi) - return 0; - - if (!vsi->back) - return -EINVAL; - - pf = vsi->back; - - if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { - dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n", - vsi->idx); - return -EINVAL; - } - - mutex_lock(&pf->sw_mutex); - /* updates the PF for this cleared vsi */ - - pf->vsi[vsi->idx] = NULL; - if (vsi->idx < pf->next_vsi) - pf->next_vsi = vsi->idx; - - ice_vsi_free_arrays(vsi, true); - mutex_unlock(&pf->sw_mutex); - devm_kfree(&pf->pdev->dev, vsi); - - return 0; -} - -/** - * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector - * @vsi: the VSI being configured - * @v_idx: index of the vector in the vsi struct - * - * We allocate one q_vector. If allocation fails we return -ENOMEM. - */ -static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) -{ - struct ice_pf *pf = vsi->back; - struct ice_q_vector *q_vector; - - /* allocate q_vector */ - q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); - if (!q_vector) - return -ENOMEM; - - q_vector->vsi = vsi; - q_vector->v_idx = v_idx; - /* only set affinity_mask if the CPU is online */ - if (cpu_online(v_idx)) - cpumask_set_cpu(v_idx, &q_vector->affinity_mask); - - if (vsi->netdev) - netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, - NAPI_POLL_WEIGHT); - /* tie q_vector and vsi together */ - vsi->q_vectors[v_idx] = q_vector; - - return 0; -} - -/** - * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors - * @vsi: the VSI being configured - * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. - */ -static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - int v_idx = 0, num_q_vectors; - int err; - - if (vsi->q_vectors[0]) { - dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", - vsi->vsi_num); - return -EEXIST; - } - - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - num_q_vectors = vsi->num_q_vectors; - } else { - err = -EINVAL; - goto err_out; - } - - for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { - err = ice_vsi_alloc_q_vector(vsi, v_idx); - if (err) - goto err_out; - } - - return 0; - -err_out: - while (v_idx--) - ice_free_q_vector(vsi, v_idx); - - dev_err(&pf->pdev->dev, - "Failed to allocate %d q_vector for VSI %d, ret=%d\n", - vsi->num_q_vectors, vsi->vsi_num, err); - vsi->num_q_vectors = 0; - return err; -} - -/** - * ice_vsi_setup_vector_base - Set up the base vector for the given VSI - * @vsi: ptr to the VSI - * - * This should only be called after ice_vsi_alloc() which allocates the - * corresponding SW VSI structure and initializes num_queue_pairs for the - * newly allocated VSI. - * - * Returns 0 on success or negative on failure - */ -static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - int num_q_vectors = 0; - - if (vsi->base_vector) { - dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", - vsi->vsi_num, vsi->base_vector); - return -EEXIST; - } - - if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - return -ENOENT; - - switch (vsi->type) { - case ICE_VSI_PF: - num_q_vectors = vsi->num_q_vectors; - break; - default: - dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", - vsi->type); - break; - } + err = register_netdev(vsi->netdev); + if (err) + return err; - if (num_q_vectors) - vsi->base_vector = ice_get_res(pf, pf->irq_tracker, - num_q_vectors, vsi->idx); + netif_carrier_off(vsi->netdev); - if (vsi->base_vector < 0) { - dev_err(&pf->pdev->dev, - "Failed to get tracking for %d vectors for VSI %d, err=%d\n", - num_q_vectors, vsi->vsi_num, vsi->base_vector); - return -ENOENT; - } + /* make sure transmit queues start off as stopped */ + netif_tx_stop_all_queues(vsi->netdev); return 0; } @@ -2572,327 +1652,17 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) } /** - * ice_vsi_cfg_rss - Configure RSS params for a VSI - * @vsi: VSI to be configured - */ -static int ice_vsi_cfg_rss(struct ice_vsi *vsi) -{ - u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; - struct ice_aqc_get_set_rss_keys *key; - struct ice_pf *pf = vsi->back; - enum ice_status status; - int err = 0; - u8 *lut; - - vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq); - - lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); - if (!lut) - return -ENOMEM; - - if (vsi->rss_lut_user) - memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); - else - ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); - - status = ice_aq_set_rss_lut(&pf->hw, vsi->vsi_num, vsi->rss_lut_type, - lut, vsi->rss_table_size); - - if (status) { - dev_err(&vsi->back->pdev->dev, - "set_rss_lut failed, error %d\n", status); - err = -EIO; - goto ice_vsi_cfg_rss_exit; - } - - key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL); - if (!key) { - err = -ENOMEM; - goto ice_vsi_cfg_rss_exit; - } - - if (vsi->rss_hkey_user) - memcpy(seed, vsi->rss_hkey_user, - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); - else - netdev_rss_key_fill((void *)seed, - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); - memcpy(&key->standard_rss_key, seed, - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); - - status = ice_aq_set_rss_key(&pf->hw, vsi->vsi_num, key); - - if (status) { - dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n", - status); - err = -EIO; - } - - devm_kfree(&pf->pdev->dev, key); -ice_vsi_cfg_rss_exit: - devm_kfree(&pf->pdev->dev, lut); - return err; -} - -/** - * ice_vsi_reinit_setup - return resource and reallocate resource for a VSI - * @vsi: pointer to the ice_vsi - * - * This reallocates the VSIs queue resources - * - * Returns 0 on success and negative value on failure - */ -static int ice_vsi_reinit_setup(struct ice_vsi *vsi) -{ - u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; - int ret, i; - - if (!vsi) - return -EINVAL; - - ice_vsi_free_q_vectors(vsi); - ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx); - vsi->base_vector = 0; - ice_vsi_clear_rings(vsi); - ice_vsi_free_arrays(vsi, false); - ice_vsi_set_num_qs(vsi); - - /* Initialize VSI struct elements and create VSI in FW */ - ret = ice_vsi_add(vsi); - if (ret < 0) - goto err_vsi; - - ret = ice_vsi_alloc_arrays(vsi, false); - if (ret < 0) - goto err_vsi; - - switch (vsi->type) { - case ICE_VSI_PF: - if (!vsi->netdev) { - ret = ice_cfg_netdev(vsi); - if (ret) - goto err_rings; - - ret = register_netdev(vsi->netdev); - if (ret) - goto err_rings; - - netif_carrier_off(vsi->netdev); - netif_tx_stop_all_queues(vsi->netdev); - } - - ret = ice_vsi_alloc_q_vectors(vsi); - if (ret) - goto err_rings; - - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto err_vectors; - - ret = ice_vsi_alloc_rings(vsi); - if (ret) - goto err_vectors; - - ice_vsi_map_rings_to_vectors(vsi); - break; - default: - break; - } - - ice_vsi_set_tc_cfg(vsi); - - /* configure VSI nodes based on number of queues and TC's */ - for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = vsi->num_txq; - - ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num, - vsi->tc_cfg.ena_tc, max_txqs); - if (ret) { - dev_info(&vsi->back->pdev->dev, - "Failed VSI lan queue config\n"); - goto err_vectors; - } - return 0; - -err_vectors: - ice_vsi_free_q_vectors(vsi); -err_rings: - if (vsi->netdev) { - vsi->current_netdev_flags = 0; - unregister_netdev(vsi->netdev); - free_netdev(vsi->netdev); - vsi->netdev = NULL; - } -err_vsi: - ice_vsi_clear(vsi); - set_bit(__ICE_RESET_FAILED, vsi->back->state); - return ret; -} - -/** - * ice_vsi_setup - Set up a VSI by a given type + * ice_pf_vsi_setup - Set up a PF VSI * @pf: board private structure - * @type: VSI type * @pi: pointer to the port_info instance * - * This allocates the sw VSI structure and its queue resources. - * - * Returns pointer to the successfully allocated and configure VSI sw struct on - * success, otherwise returns NULL on failure. + * Returns pointer to the successfully allocated VSI sw struct on success, + * otherwise returns NULL on failure. */ static struct ice_vsi * -ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type, - struct ice_port_info *pi) -{ - u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; - struct device *dev = &pf->pdev->dev; - struct ice_vsi_ctx ctxt = { 0 }; - struct ice_vsi *vsi; - int ret, i; - - vsi = ice_vsi_alloc(pf, type); - if (!vsi) { - dev_err(dev, "could not allocate VSI\n"); - return NULL; - } - - vsi->port_info = pi; - vsi->vsw = pf->first_sw; - - if (ice_vsi_get_qs(vsi)) { - dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", - vsi->idx); - goto err_get_qs; - } - - /* set RSS capabilities */ - ice_vsi_set_rss_params(vsi); - - /* create the VSI */ - ret = ice_vsi_add(vsi); - if (ret) - goto err_vsi; - - ctxt.vsi_num = vsi->vsi_num; - - switch (vsi->type) { - case ICE_VSI_PF: - ret = ice_cfg_netdev(vsi); - if (ret) - goto err_cfg_netdev; - - ret = register_netdev(vsi->netdev); - if (ret) - goto err_register_netdev; - - netif_carrier_off(vsi->netdev); - - /* make sure transmit queues start off as stopped */ - netif_tx_stop_all_queues(vsi->netdev); - ret = ice_vsi_alloc_q_vectors(vsi); - if (ret) - goto err_msix; - - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto err_rings; - - ret = ice_vsi_alloc_rings(vsi); - if (ret) - goto err_rings; - - ice_vsi_map_rings_to_vectors(vsi); - - /* Do not exit if configuring RSS had an issue, at least - * receive traffic on first queue. Hence no need to capture - * return value - */ - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) - ice_vsi_cfg_rss(vsi); - break; - default: - /* if vsi type is not recognized, clean up the resources and - * exit - */ - goto err_rings; - } - - ice_vsi_set_tc_cfg(vsi); - - /* configure VSI nodes based on number of queues and TC's */ - for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = vsi->num_txq; - - ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num, - vsi->tc_cfg.ena_tc, max_txqs); - if (ret) { - dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n"); - goto err_rings; - } - - return vsi; - -err_rings: - ice_vsi_free_q_vectors(vsi); -err_msix: - if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED) - unregister_netdev(vsi->netdev); -err_register_netdev: - if (vsi->netdev) { - free_netdev(vsi->netdev); - vsi->netdev = NULL; - } -err_cfg_netdev: - ret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); - if (ret) - dev_err(&vsi->back->pdev->dev, - "Free VSI AQ call failed, err %d\n", ret); -err_vsi: - ice_vsi_put_qs(vsi); -err_get_qs: - pf->q_left_tx += vsi->alloc_txq; - pf->q_left_rx += vsi->alloc_rxq; - ice_vsi_clear(vsi); - - return NULL; -} - -/** - * ice_vsi_add_vlan - Add vsi membership for given vlan - * @vsi: the vsi being configured - * @vid: vlan id to be added - */ -static int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) +ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - struct ice_fltr_list_entry *tmp; - struct ice_pf *pf = vsi->back; - LIST_HEAD(tmp_add_list); - enum ice_status status; - int err = 0; - - tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL); - if (!tmp) - return -ENOMEM; - - tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; - tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; - tmp->fltr_info.flag = ICE_FLTR_TX; - tmp->fltr_info.src = vsi->vsi_num; - tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num; - tmp->fltr_info.l_data.vlan.vlan_id = vid; - - INIT_LIST_HEAD(&tmp->list_entry); - list_add(&tmp->list_entry, &tmp_add_list); - - status = ice_add_vlan(&pf->hw, &tmp_add_list); - if (status) { - err = -ENODEV; - dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n", - vid, vsi->vsi_num); - } - - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); - return err; + return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); } /** @@ -2908,7 +1678,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - int ret = 0; + int ret; if (vid >= VLAN_N_VID) { netdev_err(netdev, "VLAN id requested %d is out of range %d\n", @@ -2919,6 +1689,13 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, if (vsi->info.pvid) return -EINVAL; + /* Enable VLAN pruning when VLAN 0 is added */ + if (unlikely(!vid)) { + ret = ice_cfg_vlan_pruning(vsi, true); + if (ret) + return ret; + } + /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is * needed to continue allowing all untagged packets since VLAN prune * list is applied to all packets by the switch @@ -2932,38 +1709,6 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, } /** - * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN - * @vsi: the VSI being configured - * @vid: VLAN id to be removed - */ -static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) -{ - struct ice_fltr_list_entry *list; - struct ice_pf *pf = vsi->back; - LIST_HEAD(tmp_add_list); - - list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); - if (!list) - return; - - list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; - list->fltr_info.fwd_id.vsi_id = vsi->vsi_num; - list->fltr_info.fltr_act = ICE_FWD_TO_VSI; - list->fltr_info.l_data.vlan.vlan_id = vid; - list->fltr_info.flag = ICE_FLTR_TX; - list->fltr_info.src = vsi->vsi_num; - - INIT_LIST_HEAD(&list->list_entry); - list_add(&list->list_entry, &tmp_add_list); - - if (ice_remove_vlan(&pf->hw, &tmp_add_list)) - dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n", - vid, vsi->vsi_num); - - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); -} - -/** * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload * @netdev: network interface to be adjusted * @proto: unused protocol @@ -2976,19 +1721,25 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev, { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + int status; if (vsi->info.pvid) return -EINVAL; - /* return code is ignored as there is nothing a user - * can do about failure to remove and a log message was - * already printed from the other function + /* Make sure ice_vsi_kill_vlan is successful before updating VLAN + * information */ - ice_vsi_kill_vlan(vsi, vid); + status = ice_vsi_kill_vlan(vsi, vid); + if (status) + return status; clear_bit(vid, vsi->active_vlans); - return 0; + /* Disable VLAN pruning when VLAN 0 is removed */ + if (unlikely(!vid)) + status = ice_cfg_vlan_pruning(vsi, false); + + return status; } /** @@ -3004,59 +1755,73 @@ static int ice_setup_pf_sw(struct ice_pf *pf) struct ice_vsi *vsi; int status = 0; - if (!ice_is_reset_recovery_pending(pf->state)) { - vsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info); - if (!vsi) { - status = -ENOMEM; - goto error_exit; - } - } else { - vsi = pf->vsi[0]; - status = ice_vsi_reinit_setup(vsi); - if (status < 0) - return -EIO; + if (ice_is_reset_in_progress(pf->state)) + return -EBUSY; + + vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); + if (!vsi) { + status = -ENOMEM; + goto unroll_vsi_setup; + } + + status = ice_cfg_netdev(vsi); + if (status) { + status = -ENODEV; + goto unroll_vsi_setup; } - /* tmp_add_list contains a list of MAC addresses for which MAC - * filters need to be programmed. Add the VSI's unicast MAC to - * this list + /* registering the NAPI handler requires both the queues and + * netdev to be created, which are done in ice_pf_vsi_setup() + * and ice_cfg_netdev() respectively + */ + ice_napi_add(vsi); + + /* To add a MAC filter, first add the MAC to a list and then + * pass the list to ice_add_mac. */ + + /* Add a unicast MAC filter so the VSI can get its packets */ status = ice_add_mac_to_list(vsi, &tmp_add_list, vsi->port_info->mac.perm_addr); if (status) - goto error_exit; + goto unroll_napi_add; /* VSI needs to receive broadcast traffic, so add the broadcast - * MAC address to the list. + * MAC address to the list as well. */ eth_broadcast_addr(broadcast); status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); if (status) - goto error_exit; + goto free_mac_list; /* program MAC filters for entries in tmp_add_list */ status = ice_add_mac(&pf->hw, &tmp_add_list); if (status) { dev_err(&pf->pdev->dev, "Could not add MAC filters\n"); status = -ENOMEM; - goto error_exit; + goto free_mac_list; } ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); return status; -error_exit: +free_mac_list: ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); +unroll_napi_add: if (vsi) { - ice_vsi_free_q_vectors(vsi); - if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED) - unregister_netdev(vsi->netdev); + ice_napi_del(vsi); if (vsi->netdev) { + if (vsi->netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); vsi->netdev = NULL; } + } +unroll_vsi_setup: + if (vsi) { + ice_vsi_free_q_vectors(vsi); ice_vsi_delete(vsi); ice_vsi_put_qs(vsi); pf->q_left_tx += vsi->alloc_txq; @@ -3097,10 +1862,7 @@ static void ice_determine_q_usage(struct ice_pf *pf) */ static void ice_deinit_pf(struct ice_pf *pf) { - if (pf->serv_tmr.function) - del_timer_sync(&pf->serv_tmr); - if (pf->serv_task.func) - cancel_work_sync(&pf->serv_task); + ice_service_task_stop(pf); mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->avail_q_mutex); } @@ -3113,6 +1875,15 @@ static void ice_init_pf(struct ice_pf *pf) { bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); set_bit(ICE_FLAG_MSIX_ENA, pf->flags); +#ifdef CONFIG_PCI_IOV + if (pf->hw.func_caps.common_cap.sr_iov_1_1) { + struct ice_hw *hw = &pf->hw; + + set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); + pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs, + ICE_MAX_VF_COUNT); + } +#endif /* CONFIG_PCI_IOV */ mutex_init(&pf->sw_mutex); mutex_init(&pf->avail_q_mutex); @@ -3155,6 +1926,7 @@ static int ice_ena_msix_range(struct ice_pf *pf) /* reserve vectors for LAN traffic */ pf->num_lan_msix = min_t(int, num_online_cpus(), v_left); v_budget += pf->num_lan_msix; + v_left -= pf->num_lan_msix; pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, sizeof(struct msix_entry), GFP_KERNEL); @@ -3182,10 +1954,11 @@ static int ice_ena_msix_range(struct ice_pf *pf) "not enough vectors. requested = %d, obtained = %d\n", v_budget, v_actual); if (v_actual >= (pf->num_lan_msix + 1)) { - pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1); + pf->num_avail_sw_msix = v_actual - + (pf->num_lan_msix + 1); } else if (v_actual >= 2) { pf->num_lan_msix = 1; - pf->num_avail_msix = v_actual - 2; + pf->num_avail_sw_msix = v_actual - 2; } else { pci_disable_msix(pf->pdev); err = -ERANGE; @@ -3218,12 +1991,32 @@ static void ice_dis_msix(struct ice_pf *pf) } /** + * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme + * @pf: board private structure + */ +static void ice_clear_interrupt_scheme(struct ice_pf *pf) +{ + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + ice_dis_msix(pf); + + if (pf->sw_irq_tracker) { + devm_kfree(&pf->pdev->dev, pf->sw_irq_tracker); + pf->sw_irq_tracker = NULL; + } + + if (pf->hw_irq_tracker) { + devm_kfree(&pf->pdev->dev, pf->hw_irq_tracker); + pf->hw_irq_tracker = NULL; + } +} + +/** * ice_init_interrupt_scheme - Determine proper interrupt scheme * @pf: board private structure to initialize */ static int ice_init_interrupt_scheme(struct ice_pf *pf) { - int vectors = 0; + int vectors = 0, hw_vectors = 0; ssize_t size; if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) @@ -3237,30 +2030,31 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) /* set up vector assignment tracking */ size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors); - pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); - if (!pf->irq_tracker) { + pf->sw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); + if (!pf->sw_irq_tracker) { ice_dis_msix(pf); return -ENOMEM; } - pf->irq_tracker->num_entries = vectors; + /* populate SW interrupts pool with number of OS granted IRQs. */ + pf->num_avail_sw_msix = vectors; + pf->sw_irq_tracker->num_entries = vectors; - return 0; -} + /* set up HW vector assignment tracking */ + hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; + size = sizeof(struct ice_res_tracker) + (sizeof(u16) * hw_vectors); -/** - * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme - * @pf: board private structure - */ -static void ice_clear_interrupt_scheme(struct ice_pf *pf) -{ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_dis_msix(pf); - - if (pf->irq_tracker) { - devm_kfree(&pf->pdev->dev, pf->irq_tracker); - pf->irq_tracker = NULL; + pf->hw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); + if (!pf->hw_irq_tracker) { + ice_clear_interrupt_scheme(pf); + return -ENOMEM; } + + /* populate HW interrupts pool with number of HW supported irqs. */ + pf->num_avail_hw_msix = hw_vectors; + pf->hw_irq_tracker->num_entries = hw_vectors; + + return 0; } /** @@ -3307,6 +2101,8 @@ static int ice_probe(struct pci_dev *pdev, pf->pdev = pdev; pci_set_drvdata(pdev, pf); set_bit(__ICE_DOWN, pf->state); + /* Disable service task until DOWN bit is cleared */ + set_bit(__ICE_SERVICE_DIS, pf->state); hw = &pf->hw; hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; @@ -3364,6 +2160,9 @@ static int ice_probe(struct pci_dev *pdev, goto err_init_interrupt_unroll; } + /* Driver is mostly up */ + clear_bit(__ICE_DOWN, pf->state); + /* In case of MSIX we are going to setup the misc vector right here * to handle admin queue events etc. In case of legacy and MSI * the misc functionality and queue processing is combined in @@ -3386,7 +2185,11 @@ static int ice_probe(struct pci_dev *pdev, goto err_msix_misc_unroll; } - pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; + if (hw->evb_veb) + pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; + else + pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; + pf->first_sw->pf = pf; /* record the sw_id available for later use */ @@ -3399,8 +2202,7 @@ static int ice_probe(struct pci_dev *pdev, goto err_alloc_sw_unroll; } - /* Driver is mostly up */ - clear_bit(__ICE_DOWN, pf->state); + clear_bit(__ICE_SERVICE_DIS, pf->state); /* since everything is good, start the service timer */ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); @@ -3414,6 +2216,7 @@ static int ice_probe(struct pci_dev *pdev, return 0; err_alloc_sw_unroll: + set_bit(__ICE_SERVICE_DIS, pf->state); set_bit(__ICE_DOWN, pf->state); devm_kfree(&pf->pdev->dev, pf->first_sw); err_msix_misc_unroll: @@ -3436,25 +2239,23 @@ err_exit_unroll: static void ice_remove(struct pci_dev *pdev) { struct ice_pf *pf = pci_get_drvdata(pdev); - int i = 0; - int err; + int i; if (!pf) return; set_bit(__ICE_DOWN, pf->state); + ice_service_task_stop(pf); - for (i = 0; i < pf->num_alloc_vsi; i++) { + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) + ice_free_vfs(pf); + ice_vsi_release_all(pf); + ice_free_irq_msix_misc(pf); + ice_for_each_vsi(pf, i) { if (!pf->vsi[i]) continue; - - err = ice_vsi_release(pf->vsi[i]); - if (err) - dev_dbg(&pf->pdev->dev, "Failed to release VSI index %d (err %d)\n", - i, err); + ice_vsi_free_q_vectors(pf->vsi[i]); } - - ice_free_irq_msix_misc(pf); ice_clear_interrupt_scheme(pf); ice_deinit_pf(pf); ice_deinit_hw(&pf->hw); @@ -3473,8 +2274,6 @@ static const struct pci_device_id ice_pci_tbl[] = { { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_10G_BASE_T), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SGMII), 0 }, /* required last entry */ { 0, } }; @@ -3485,6 +2284,7 @@ static struct pci_driver ice_driver = { .id_table = ice_pci_tbl, .probe = ice_probe, .remove = ice_remove, + .sriov_configure = ice_sriov_configure, }; /** @@ -3500,7 +2300,7 @@ static int __init ice_module_init(void) pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); pr_info("%s\n", ice_copyright); - ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME); + ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); if (!ice_wq) { pr_err("Failed to create workqueue\n"); return -ENOMEM; @@ -3562,7 +2362,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) } if (test_bit(__ICE_DOWN, pf->state) || - ice_is_reset_recovery_pending(pf->state)) { + ice_is_reset_in_progress(pf->state)) { netdev_err(netdev, "can't set mac %pM. device not ready\n", mac); return -EBUSY; @@ -3722,78 +2522,6 @@ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], } /** - * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx - * @vsi: the vsi being changed - */ -static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) -{ - struct device *dev = &vsi->back->pdev->dev; - struct ice_hw *hw = &vsi->back->hw; - struct ice_vsi_ctx ctxt = { 0 }; - enum ice_status status; - - /* Here we are configuring the VSI to let the driver add VLAN tags by - * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag - * insertion happens in the Tx hot path, in ice_tx_map. - */ - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; - - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - ctxt.vsi_num = vsi->vsi_num; - - status = ice_aq_update_vsi(hw, &ctxt, NULL); - if (status) { - dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", - status, hw->adminq.sq_last_status); - return -EIO; - } - - vsi->info.vlan_flags = ctxt.info.vlan_flags; - return 0; -} - -/** - * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx - * @vsi: the vsi being changed - * @ena: boolean value indicating if this is a enable or disable request - */ -static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) -{ - struct device *dev = &vsi->back->pdev->dev; - struct ice_hw *hw = &vsi->back->hw; - struct ice_vsi_ctx ctxt = { 0 }; - enum ice_status status; - - /* Here we are configuring what the VSI should do with the VLAN tag in - * the Rx packet. We can either leave the tag in the packet or put it in - * the Rx descriptor. - */ - if (ena) { - /* Strip VLAN tag from Rx packet and put it in the desc */ - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; - } else { - /* Disable stripping. Leave tag in packet */ - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; - } - - /* Allow all packets untagged/tagged */ - ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; - - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - ctxt.vsi_num = vsi->vsi_num; - - status = ice_aq_update_vsi(hw, &ctxt, NULL); - if (status) { - dev_err(dev, "update VSI for VALN strip failed, ena = %d err %d aq_err %d\n", - ena, status, hw->adminq.sq_last_status); - return -EIO; - } - - vsi->info.vlan_flags = ctxt.info.vlan_flags; - return 0; -} - -/** * ice_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting @@ -3805,6 +2533,12 @@ static int ice_set_features(struct net_device *netdev, struct ice_vsi *vsi = np->vsi; int ret = 0; + if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) + ret = ice_vsi_manage_rss_lut(vsi, true); + else if (!(features & NETIF_F_RXHASH) && + netdev->features & NETIF_F_RXHASH) + ret = ice_vsi_manage_rss_lut(vsi, false); + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) ret = ice_vsi_manage_vlan_stripping(vsi, true); @@ -3863,248 +2597,6 @@ static int ice_restore_vlan(struct ice_vsi *vsi) } /** - * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance - * @ring: The Tx ring to configure - * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized - * @pf_q: queue index in the PF space - * - * Configure the Tx descriptor ring in TLAN context. - */ -static void -ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) -{ - struct ice_vsi *vsi = ring->vsi; - struct ice_hw *hw = &vsi->back->hw; - - tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; - - tlan_ctx->port_num = vsi->port_info->lport; - - /* Transmit Queue Length */ - tlan_ctx->qlen = ring->count; - - /* PF number */ - tlan_ctx->pf_num = hw->pf_id; - - /* queue belongs to a specific VSI type - * VF / VM index should be programmed per vmvf_type setting: - * for vmvf_type = VF, it is VF number between 0-256 - * for vmvf_type = VM, it is VM number between 0-767 - * for PF or EMP this field should be set to zero - */ - switch (vsi->type) { - case ICE_VSI_PF: - tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; - break; - default: - return; - } - - /* make sure the context is associated with the right VSI */ - tlan_ctx->src_vsi = vsi->vsi_num; - - tlan_ctx->tso_ena = ICE_TX_LEGACY; - tlan_ctx->tso_qnum = pf_q; - - /* Legacy or Advanced Host Interface: - * 0: Advanced Host Interface - * 1: Legacy Host Interface - */ - tlan_ctx->legacy_int = ICE_TX_LEGACY; -} - -/** - * ice_vsi_cfg_txqs - Configure the VSI for Tx - * @vsi: the VSI being configured - * - * Return 0 on success and a negative value on error - * Configure the Tx VSI for operation. - */ -static int ice_vsi_cfg_txqs(struct ice_vsi *vsi) -{ - struct ice_aqc_add_tx_qgrp *qg_buf; - struct ice_aqc_add_txqs_perq *txq; - struct ice_pf *pf = vsi->back; - enum ice_status status; - u16 buf_len, i, pf_q; - int err = 0, tc = 0; - u8 num_q_grps; - - buf_len = sizeof(struct ice_aqc_add_tx_qgrp); - qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); - if (!qg_buf) - return -ENOMEM; - - if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) { - err = -EINVAL; - goto err_cfg_txqs; - } - qg_buf->num_txqs = 1; - num_q_grps = 1; - - /* set up and configure the tx queues */ - ice_for_each_txq(vsi, i) { - struct ice_tlan_ctx tlan_ctx = { 0 }; - - pf_q = vsi->txq_map[i]; - ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q); - /* copy context contents into the qg_buf */ - qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, - ice_tlan_ctx_info); - - /* init queue specific tail reg. It is referred as transmit - * comm scheduler queue doorbell. - */ - vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); - status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc, - num_q_grps, qg_buf, buf_len, NULL); - if (status) { - dev_err(&vsi->back->pdev->dev, - "Failed to set LAN Tx queue context, error: %d\n", - status); - err = -ENODEV; - goto err_cfg_txqs; - } - - /* Add Tx Queue TEID into the VSI tx ring from the response - * This will complete configuring and enabling the queue. - */ - txq = &qg_buf->txqs[0]; - if (pf_q == le16_to_cpu(txq->txq_id)) - vsi->tx_rings[i]->txq_teid = - le32_to_cpu(txq->q_teid); - } -err_cfg_txqs: - devm_kfree(&pf->pdev->dev, qg_buf); - return err; -} - -/** - * ice_setup_rx_ctx - Configure a receive ring context - * @ring: The Rx ring to configure - * - * Configure the Rx descriptor ring in RLAN context. - */ -static int ice_setup_rx_ctx(struct ice_ring *ring) -{ - struct ice_vsi *vsi = ring->vsi; - struct ice_hw *hw = &vsi->back->hw; - u32 rxdid = ICE_RXDID_FLEX_NIC; - struct ice_rlan_ctx rlan_ctx; - u32 regval; - u16 pf_q; - int err; - - /* what is RX queue number in global space of 2K rx queues */ - pf_q = vsi->rxq_map[ring->q_index]; - - /* clear the context structure first */ - memset(&rlan_ctx, 0, sizeof(rlan_ctx)); - - rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S; - - rlan_ctx.qlen = ring->count; - - /* Receive Packet Data Buffer Size. - * The Packet Data Buffer Size is defined in 128 byte units. - */ - rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; - - /* use 32 byte descriptors */ - rlan_ctx.dsize = 1; - - /* Strip the Ethernet CRC bytes before the packet is posted to host - * memory. - */ - rlan_ctx.crcstrip = 1; - - /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ - rlan_ctx.l2tsel = 1; - - rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; - rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; - rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; - - /* This controls whether VLAN is stripped from inner headers - * The VLAN in the inner L2 header is stripped to the receive - * descriptor if enabled by this flag. - */ - rlan_ctx.showiv = 0; - - /* Max packet size for this queue - must not be set to a larger value - * than 5 x DBUF - */ - rlan_ctx.rxmax = min_t(u16, vsi->max_frame, - ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); - - /* Rx queue threshold in units of 64 */ - rlan_ctx.lrxqthresh = 1; - - /* Enable Flexible Descriptors in the queue context which - * allows this driver to select a specific receive descriptor format - */ - regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); - regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & - QRXFLXP_CNTXT_RXDID_IDX_M; - - /* increasing context priority to pick up profile id; - * default is 0x01; setting to 0x03 to ensure profile - * is programming if prev context is of same priority - */ - regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & - QRXFLXP_CNTXT_RXDID_PRIO_M; - - wr32(hw, QRXFLXP_CNTXT(pf_q), regval); - - /* Absolute queue number out of 2K needs to be passed */ - err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); - if (err) { - dev_err(&vsi->back->pdev->dev, - "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", - pf_q, err); - return -EIO; - } - - /* init queue specific tail register */ - ring->tail = hw->hw_addr + QRX_TAIL(pf_q); - writel(0, ring->tail); - ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); - - return 0; -} - -/** - * ice_vsi_cfg_rxqs - Configure the VSI for Rx - * @vsi: the VSI being configured - * - * Return 0 on success and a negative value on error - * Configure the Rx VSI for operation. - */ -static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) -{ - int err = 0; - u16 i; - - if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) - vsi->max_frame = vsi->netdev->mtu + - ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - else - vsi->max_frame = ICE_RXBUF_2048; - - vsi->rx_buf_len = ICE_RXBUF_2048; - /* set up individual rings */ - for (i = 0; i < vsi->num_rxq && !err; i++) - err = ice_setup_rx_ctx(vsi->rx_rings[i]); - - if (err) { - dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n"); - return -EIO; - } - return err; -} - -/** * ice_vsi_cfg - Setup the VSI * @vsi: the VSI being configured * @@ -4129,200 +2621,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi) } /** - * ice_vsi_stop_tx_rings - Disable Tx rings - * @vsi: the VSI being configured - */ -static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - enum ice_status status; - u32 *q_teids, val; - u16 *q_ids, i; - int err = 0; - - if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) - return -EINVAL; - - q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), - GFP_KERNEL); - if (!q_teids) - return -ENOMEM; - - q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), - GFP_KERNEL); - if (!q_ids) { - err = -ENOMEM; - goto err_alloc_q_ids; - } - - /* set up the tx queue list to be disabled */ - ice_for_each_txq(vsi, i) { - u16 v_idx; - - if (!vsi->tx_rings || !vsi->tx_rings[i]) { - err = -EINVAL; - goto err_out; - } - - q_ids[i] = vsi->txq_map[i]; - q_teids[i] = vsi->tx_rings[i]->txq_teid; - - /* clear cause_ena bit for disabled queues */ - val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); - val &= ~QINT_TQCTL_CAUSE_ENA_M; - wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); - - /* software is expected to wait for 100 ns */ - ndelay(100); - - /* trigger a software interrupt for the vector associated to - * the queue to schedule napi handler - */ - v_idx = vsi->tx_rings[i]->q_vector->v_idx; - wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx), - GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); - } - status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, - NULL); - if (status) { - dev_err(&pf->pdev->dev, - "Failed to disable LAN Tx queues, error: %d\n", - status); - err = -ENODEV; - } - -err_out: - devm_kfree(&pf->pdev->dev, q_ids); - -err_alloc_q_ids: - devm_kfree(&pf->pdev->dev, q_teids); - - return err; -} - -/** - * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled - * @pf: the PF being configured - * @pf_q: the PF queue - * @ena: enable or disable state of the queue - * - * This routine will wait for the given Rx queue of the PF to reach the - * enabled or disabled state. - * Returns -ETIMEDOUT in case of failing to reach the requested state after - * multiple retries; else will return 0 in case of success. - */ -static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) -{ - int i; - - for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { - u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q)); - - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) - break; - - usleep_range(10, 20); - } - if (i >= ICE_Q_WAIT_RETRY_LIMIT) - return -ETIMEDOUT; - - return 0; -} - -/** - * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings - * @vsi: the VSI being configured - * @ena: start or stop the rx rings - */ -static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) -{ - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - int i, j, ret = 0; - - for (i = 0; i < vsi->num_rxq; i++) { - int pf_q = vsi->rxq_map[i]; - u32 rx_reg; - - for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) { - rx_reg = rd32(hw, QRX_CTRL(pf_q)); - if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) == - ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1)) - break; - usleep_range(1000, 2000); - } - - /* Skip if the queue is already in the requested state */ - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) - continue; - - /* turn on/off the queue */ - if (ena) - rx_reg |= QRX_CTRL_QENA_REQ_M; - else - rx_reg &= ~QRX_CTRL_QENA_REQ_M; - wr32(hw, QRX_CTRL(pf_q), rx_reg); - - /* wait for the change to finish */ - ret = ice_pf_rxq_wait(pf, pf_q, ena); - if (ret) { - dev_err(&pf->pdev->dev, - "VSI idx %d Rx ring %d %sable timeout\n", - vsi->idx, pf_q, (ena ? "en" : "dis")); - break; - } - } - - return ret; -} - -/** - * ice_vsi_start_rx_rings - start VSI's rx rings - * @vsi: the VSI whose rings are to be started - * - * Returns 0 on success and a negative value on error - */ -static int ice_vsi_start_rx_rings(struct ice_vsi *vsi) -{ - return ice_vsi_ctrl_rx_rings(vsi, true); -} - -/** - * ice_vsi_stop_rx_rings - stop VSI's rx rings - * @vsi: the VSI - * - * Returns 0 on success and a negative value on error - */ -static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) -{ - return ice_vsi_ctrl_rx_rings(vsi, false); -} - -/** - * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings - * @vsi: the VSI - * Returns 0 on success and a negative value on error - */ -static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi) -{ - int err_tx, err_rx; - - err_tx = ice_vsi_stop_tx_rings(vsi); - if (err_tx) - dev_dbg(&vsi->back->pdev->dev, "Failed to disable Tx rings\n"); - - err_rx = ice_vsi_stop_rx_rings(vsi); - if (err_rx) - dev_dbg(&vsi->back->pdev->dev, "Failed to disable Rx rings\n"); - - if (err_tx || err_rx) - return -EIO; - - return 0; -} - -/** * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI * @vsi: the VSI being configured */ @@ -4419,122 +2717,6 @@ static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, } /** - * ice_stat_update40 - read 40 bit stat from the chip and update stat values - * @hw: ptr to the hardware info - * @hireg: high 32 bit HW register to read from - * @loreg: low 32 bit HW register to read from - * @prev_stat_loaded: bool to specify if previous stats are loaded - * @prev_stat: ptr to previous loaded stat value - * @cur_stat: ptr to current stat value - */ -static void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, - bool prev_stat_loaded, u64 *prev_stat, - u64 *cur_stat) -{ - u64 new_data; - - new_data = rd32(hw, loreg); - new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; - - /* device stats are not reset at PFR, they likely will not be zeroed - * when the driver starts. So save the first values read and use them as - * offsets to be subtracted from the raw values in order to report stats - * that count from zero. - */ - if (!prev_stat_loaded) - *prev_stat = new_data; - if (likely(new_data >= *prev_stat)) - *cur_stat = new_data - *prev_stat; - else - /* to manage the potential roll-over */ - *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; - *cur_stat &= 0xFFFFFFFFFFULL; -} - -/** - * ice_stat_update32 - read 32 bit stat from the chip and update stat values - * @hw: ptr to the hardware info - * @reg: HW register to read from - * @prev_stat_loaded: bool to specify if previous stats are loaded - * @prev_stat: ptr to previous loaded stat value - * @cur_stat: ptr to current stat value - */ -static void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, - u64 *prev_stat, u64 *cur_stat) -{ - u32 new_data; - - new_data = rd32(hw, reg); - - /* device stats are not reset at PFR, they likely will not be zeroed - * when the driver starts. So save the first values read and use them as - * offsets to be subtracted from the raw values in order to report stats - * that count from zero. - */ - if (!prev_stat_loaded) - *prev_stat = new_data; - if (likely(new_data >= *prev_stat)) - *cur_stat = new_data - *prev_stat; - else - /* to manage the potential roll-over */ - *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; -} - -/** - * ice_update_eth_stats - Update VSI-specific ethernet statistics counters - * @vsi: the VSI to be updated - */ -static void ice_update_eth_stats(struct ice_vsi *vsi) -{ - struct ice_eth_stats *prev_es, *cur_es; - struct ice_hw *hw = &vsi->back->hw; - u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ - - prev_es = &vsi->eth_stats_prev; - cur_es = &vsi->eth_stats; - - ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_bytes, - &cur_es->rx_bytes); - - ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_unicast, - &cur_es->rx_unicast); - - ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_multicast, - &cur_es->rx_multicast); - - ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_broadcast, - &cur_es->rx_broadcast); - - ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, - &prev_es->rx_discards, &cur_es->rx_discards); - - ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_bytes, - &cur_es->tx_bytes); - - ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_unicast, - &cur_es->tx_unicast); - - ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_multicast, - &cur_es->tx_multicast); - - ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_broadcast, - &cur_es->tx_broadcast); - - ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, - &prev_es->tx_errors, &cur_es->tx_errors); - - vsi->stat_offsets_loaded = true; -} - -/** * ice_update_vsi_ring_stats - Update VSI stats counters * @vsi: the VSI to be updated */ @@ -4827,7 +3009,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) */ int ice_down(struct ice_vsi *vsi) { - int i, err; + int i, tx_err, rx_err; /* Caller of this function is expected to set the * vsi->state __ICE_DOWN bit @@ -4838,7 +3020,18 @@ int ice_down(struct ice_vsi *vsi) } ice_vsi_dis_irq(vsi); - err = ice_vsi_stop_tx_rx_rings(vsi); + tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0); + if (tx_err) + netdev_err(vsi->netdev, + "Failed stop Tx rings, VSI %d error %d\n", + vsi->vsi_num, tx_err); + + rx_err = ice_vsi_stop_rx_rings(vsi); + if (rx_err) + netdev_err(vsi->netdev, + "Failed stop Rx rings, VSI %d error %d\n", + vsi->vsi_num, rx_err); + ice_napi_disable_all(vsi); ice_for_each_txq(vsi, i) @@ -4847,10 +3040,14 @@ int ice_down(struct ice_vsi *vsi) ice_for_each_rxq(vsi, i) ice_clean_rx_ring(vsi->rx_rings[i]); - if (err) - netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", + if (tx_err || rx_err) { + netdev_err(vsi->netdev, + "Failed to close VSI 0x%04X on switch 0x%04X\n", vsi->vsi_num, vsi->vsw->sw_id); - return err; + return -EIO; + } + + return 0; } /** @@ -4870,6 +3067,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) } ice_for_each_txq(vsi, i) { + vsi->tx_rings[i]->netdev = vsi->netdev; err = ice_setup_tx_ring(vsi->tx_rings[i]); if (err) break; @@ -4895,6 +3093,7 @@ static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) } ice_for_each_rxq(vsi, i) { + vsi->rx_rings[i]->netdev = vsi->netdev; err = ice_setup_rx_ring(vsi->rx_rings[i]); if (err) break; @@ -4922,38 +3121,6 @@ static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename) } /** - * ice_vsi_free_tx_rings - Free Tx resources for VSI queues - * @vsi: the VSI having resources freed - */ -static void ice_vsi_free_tx_rings(struct ice_vsi *vsi) -{ - int i; - - if (!vsi->tx_rings) - return; - - ice_for_each_txq(vsi, i) - if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) - ice_free_tx_ring(vsi->tx_rings[i]); -} - -/** - * ice_vsi_free_rx_rings - Free Rx resources for VSI queues - * @vsi: the VSI having resources freed - */ -static void ice_vsi_free_rx_rings(struct ice_vsi *vsi) -{ - int i; - - if (!vsi->rx_rings) - return; - - ice_for_each_rxq(vsi, i) - if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) - ice_free_rx_ring(vsi->rx_rings[i]); -} - -/** * ice_vsi_open - Called when a network interface is made active * @vsi: the VSI to open * @@ -5014,78 +3181,26 @@ err_setup_tx: } /** - * ice_vsi_close - Shut down a VSI - * @vsi: the VSI being shut down - */ -static void ice_vsi_close(struct ice_vsi *vsi) -{ - if (!test_and_set_bit(__ICE_DOWN, vsi->state)) - ice_down(vsi); - - ice_vsi_free_irq(vsi); - ice_vsi_free_tx_rings(vsi); - ice_vsi_free_rx_rings(vsi); -} - -/** - * ice_rss_clean - Delete RSS related VSI structures that hold user inputs - * @vsi: the VSI being removed + * ice_vsi_release_all - Delete all VSIs + * @pf: PF from which all VSIs are being removed */ -static void ice_rss_clean(struct ice_vsi *vsi) +static void ice_vsi_release_all(struct ice_pf *pf) { - struct ice_pf *pf; + int err, i; - pf = vsi->back; - - if (vsi->rss_hkey_user) - devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user); - if (vsi->rss_lut_user) - devm_kfree(&pf->pdev->dev, vsi->rss_lut_user); -} - -/** - * ice_vsi_release - Delete a VSI and free its resources - * @vsi: the VSI being removed - * - * Returns 0 on success or < 0 on error - */ -static int ice_vsi_release(struct ice_vsi *vsi) -{ - struct ice_pf *pf; + if (!pf->vsi) + return; - if (!vsi->back) - return -ENODEV; - pf = vsi->back; + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (!pf->vsi[i]) + continue; - if (vsi->netdev) { - unregister_netdev(vsi->netdev); - free_netdev(vsi->netdev); - vsi->netdev = NULL; + err = ice_vsi_release(pf->vsi[i]); + if (err) + dev_dbg(&pf->pdev->dev, + "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", + i, err, pf->vsi[i]->vsi_num); } - - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) - ice_rss_clean(vsi); - - /* Disable VSI and free resources */ - ice_vsi_dis_irq(vsi); - ice_vsi_close(vsi); - - /* reclaim interrupt vectors back to PF */ - ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx); - pf->num_avail_msix += vsi->num_q_vectors; - - ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num); - ice_vsi_delete(vsi); - ice_vsi_free_q_vectors(vsi); - ice_vsi_clear_rings(vsi); - - ice_vsi_put_qs(vsi); - pf->q_left_tx += vsi->alloc_txq; - pf->q_left_rx += vsi->alloc_rxq; - - ice_vsi_clear(vsi); - - return 0; } /** @@ -5099,28 +3214,37 @@ static void ice_dis_vsi(struct ice_vsi *vsi) set_bit(__ICE_NEEDS_RESTART, vsi->state); - if (vsi->netdev && netif_running(vsi->netdev) && - vsi->type == ICE_VSI_PF) - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); - - ice_vsi_close(vsi); + if (vsi->type == ICE_VSI_PF && vsi->netdev) { + if (netif_running(vsi->netdev)) { + rtnl_lock(); + vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + rtnl_unlock(); + } else { + ice_vsi_close(vsi); + } + } } /** * ice_ena_vsi - resume a VSI * @vsi: the VSI being resume */ -static void ice_ena_vsi(struct ice_vsi *vsi) +static int ice_ena_vsi(struct ice_vsi *vsi) { - if (!test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state)) - return; + int err = 0; + + if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) && + vsi->netdev) { + if (netif_running(vsi->netdev)) { + rtnl_lock(); + err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); + rtnl_unlock(); + } else { + err = ice_vsi_open(vsi); + } + } - if (vsi->netdev && netif_running(vsi->netdev)) - vsi->netdev->netdev_ops->ndo_open(vsi->netdev); - else if (ice_vsi_open(vsi)) - /* this clears the DOWN bit */ - dev_dbg(&vsi->back->pdev->dev, "Failed open VSI 0x%04X on switch 0x%04X\n", - vsi->vsi_num, vsi->vsw->sw_id); + return err; } /** @@ -5140,13 +3264,89 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf) * ice_pf_ena_all_vsi - Resume all VSIs on a PF * @pf: the PF */ -static void ice_pf_ena_all_vsi(struct ice_pf *pf) +static int ice_pf_ena_all_vsi(struct ice_pf *pf) { int v; ice_for_each_vsi(pf, v) if (pf->vsi[v]) - ice_ena_vsi(pf->vsi[v]); + if (ice_ena_vsi(pf->vsi[v])) + return -EIO; + + return 0; +} + +/** + * ice_vsi_rebuild_all - rebuild all VSIs in pf + * @pf: the PF + */ +static int ice_vsi_rebuild_all(struct ice_pf *pf) +{ + int i; + + /* loop through pf->vsi array and reinit the VSI if found */ + for (i = 0; i < pf->num_alloc_vsi; i++) { + int err; + + if (!pf->vsi[i]) + continue; + + /* VF VSI rebuild isn't supported yet */ + if (pf->vsi[i]->type == ICE_VSI_VF) + continue; + + err = ice_vsi_rebuild(pf->vsi[i]); + if (err) { + dev_err(&pf->pdev->dev, + "VSI at index %d rebuild failed\n", + pf->vsi[i]->idx); + return err; + } + + dev_info(&pf->pdev->dev, + "VSI at index %d rebuilt. vsi_num = 0x%x\n", + pf->vsi[i]->idx, pf->vsi[i]->vsi_num); + } + + return 0; +} + +/** + * ice_vsi_replay_all - replay all VSIs configuration in the PF + * @pf: the PF + */ +static int ice_vsi_replay_all(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + enum ice_status ret; + int i; + + /* loop through pf->vsi array and replay the VSI if found */ + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (!pf->vsi[i]) + continue; + + ret = ice_replay_vsi(hw, pf->vsi[i]->idx); + if (ret) { + dev_err(&pf->pdev->dev, + "VSI at index %d replay failed %d\n", + pf->vsi[i]->idx, ret); + return -EIO; + } + + /* Re-map HW VSI number, using VSI handle that has been + * previously validated in ice_replay_vsi() call above + */ + pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx); + + dev_info(&pf->pdev->dev, + "VSI at index %d filter replayed successfully - vsi_num %i\n", + pf->vsi[i]->idx, pf->vsi[i]->vsi_num); + } + + /* Clean up replay filter after successful re-configuration */ + ice_replay_post(hw); + return 0; } /** @@ -5168,13 +3368,13 @@ static void ice_rebuild(struct ice_pf *pf) ret = ice_init_all_ctrlq(hw); if (ret) { dev_err(dev, "control queues init failed %d\n", ret); - goto fail_reset; + goto err_init_ctrlq; } ret = ice_clear_pf_cfg(hw); if (ret) { dev_err(dev, "clear PF configuration failed %d\n", ret); - goto fail_reset; + goto err_init_ctrlq; } ice_clear_pxe_mode(hw); @@ -5182,14 +3382,34 @@ static void ice_rebuild(struct ice_pf *pf) ret = ice_get_caps(hw); if (ret) { dev_err(dev, "ice_get_caps failed %d\n", ret); - goto fail_reset; + goto err_init_ctrlq; } - /* basic nic switch setup */ - err = ice_setup_pf_sw(pf); + err = ice_sched_init_port(hw->port_info); + if (err) + goto err_sched_init_port; + + /* reset search_hint of irq_trackers to 0 since interrupts are + * reclaimed and could be allocated from beginning during VSI rebuild + */ + pf->sw_irq_tracker->search_hint = 0; + pf->hw_irq_tracker->search_hint = 0; + + err = ice_vsi_rebuild_all(pf); if (err) { - dev_err(dev, "ice_setup_pf_sw failed\n"); - goto fail_reset; + dev_err(dev, "ice_vsi_rebuild_all failed\n"); + goto err_vsi_rebuild; + } + + err = ice_update_link_info(hw->port_info); + if (err) + dev_err(&pf->pdev->dev, "Get link status error %d\n", err); + + /* Replay all VSIs Configuration, including filters after reset */ + if (ice_vsi_replay_all(pf)) { + dev_err(&pf->pdev->dev, + "error replaying VSI configurations with switch filter rules\n"); + goto err_vsi_rebuild; } /* start misc vector */ @@ -5197,20 +3417,36 @@ static void ice_rebuild(struct ice_pf *pf) err = ice_req_irq_msix_misc(pf); if (err) { dev_err(dev, "misc vector setup failed: %d\n", err); - goto fail_reset; + goto err_vsi_rebuild; } } /* restart the VSIs that were rebuilt and running before the reset */ - ice_pf_ena_all_vsi(pf); + err = ice_pf_ena_all_vsi(pf); + if (err) { + dev_err(&pf->pdev->dev, "error enabling VSIs\n"); + /* no need to disable VSIs in tear down path in ice_rebuild() + * since its already taken care in ice_vsi_open() + */ + goto err_vsi_rebuild; + } + ice_reset_all_vfs(pf, true); + /* if we get here, reset flow is successful */ + clear_bit(__ICE_RESET_FAILED, pf->state); return; -fail_reset: +err_vsi_rebuild: + ice_vsi_release_all(pf); +err_sched_init_port: + ice_sched_cleanup_all(hw); +err_init_ctrlq: ice_shutdown_all_ctrlq(hw); set_bit(__ICE_RESET_FAILED, pf->state); clear_recovery: - set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + /* set this bit in PF state to control service task scheduling */ + set_bit(__ICE_NEEDS_RESTART, pf->state); + dev_err(dev, "Rebuild failed, unload and reload driver\n"); } /** @@ -5243,7 +3479,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) } /* if a reset is in progress, wait for some time for it to complete */ do { - if (ice_is_reset_recovery_pending(pf->state)) { + if (ice_is_reset_in_progress(pf->state)) { count++; usleep_range(1000, 2000); } else { @@ -5299,7 +3535,7 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) struct ice_aqc_get_set_rss_keys *buf = (struct ice_aqc_get_set_rss_keys *)seed; - status = ice_aq_set_rss_key(hw, vsi->vsi_num, buf); + status = ice_aq_set_rss_key(hw, vsi->idx, buf); if (status) { dev_err(&pf->pdev->dev, @@ -5310,8 +3546,8 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) } if (lut) { - status = ice_aq_set_rss_lut(hw, vsi->vsi_num, - vsi->rss_lut_type, lut, lut_size); + status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, + lut, lut_size); if (status) { dev_err(&pf->pdev->dev, "Cannot set RSS lut, err %d aq_err %d\n", @@ -5342,7 +3578,7 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) struct ice_aqc_get_set_rss_keys *buf = (struct ice_aqc_get_set_rss_keys *)seed; - status = ice_aq_get_rss_key(hw, vsi->vsi_num, buf); + status = ice_aq_get_rss_key(hw, vsi->idx, buf); if (status) { dev_err(&pf->pdev->dev, "Cannot get RSS key, err %d aq_err %d\n", @@ -5352,8 +3588,8 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) } if (lut) { - status = ice_aq_get_rss_lut(hw, vsi->vsi_num, - vsi->rss_lut_type, lut, lut_size); + status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type, + lut, lut_size); if (status) { dev_err(&pf->pdev->dev, "Cannot get RSS lut, err %d aq_err %d\n", @@ -5366,6 +3602,232 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) } /** + * ice_bridge_getlink - Get the hardware bridge mode + * @skb: skb buff + * @pid: process id + * @seq: RTNL message seq + * @dev: the netdev being configured + * @filter_mask: filter mask passed in + * @nlflags: netlink flags passed in + * + * Return the bridge mode (VEB/VEPA) + */ +static int +ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, int nlflags) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u16 bmode; + + bmode = pf->first_sw->bridge_mode; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, + filter_mask, NULL); +} + +/** + * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) + * @vsi: Pointer to VSI structure + * @bmode: Hardware bridge mode (VEB/VEPA) + * + * Returns 0 on success, negative on failure + */ +static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) +{ + struct device *dev = &vsi->back->pdev->dev; + struct ice_aqc_vsi_props *vsi_props; + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx ctxt = { 0 }; + enum ice_status status; + + vsi_props = &vsi->info; + ctxt.info = vsi->info; + + if (bmode == BRIDGE_MODE_VEB) + /* change from VEPA to VEB mode */ + ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + else + /* change from VEB to VEPA mode */ + ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); + + status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + if (status) { + dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", + bmode, status, hw->adminq.sq_last_status); + return -EIO; + } + /* Update sw flags for book keeping */ + vsi_props->sw_flags = ctxt.info.sw_flags; + + return 0; +} + +/** + * ice_bridge_setlink - Set the hardware bridge mode + * @dev: the netdev being configured + * @nlh: RTNL message + * @flags: bridge setlink flags + * + * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is + * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if + * not already set for all VSIs connected to this switch. And also update the + * unicast switch filter rules for the corresponding switch of the netdev. + */ +static int +ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 __always_unused flags) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_pf *pf = np->vsi->back; + struct nlattr *attr, *br_spec; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + struct ice_sw *pf_sw; + int rem, v, err = 0; + + pf_sw = pf->first_sw; + /* find the attribute in the netlink message */ + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + mode = nla_get_u16(attr); + if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) + return -EINVAL; + /* Continue if bridge mode is not being flipped */ + if (mode == pf_sw->bridge_mode) + continue; + /* Iterates through the PF VSI list and update the loopback + * mode of the VSI + */ + ice_for_each_vsi(pf, v) { + if (!pf->vsi[v]) + continue; + err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); + if (err) + return err; + } + + hw->evb_veb = (mode == BRIDGE_MODE_VEB); + /* Update the unicast switch filter rules for the corresponding + * switch of the netdev + */ + status = ice_update_sw_rule_bridge_mode(hw); + if (status) { + netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n", + mode, status, hw->adminq.sq_last_status); + /* revert hw->evb_veb */ + hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); + return -EIO; + } + + pf_sw->bridge_mode = mode; + } + + return 0; +} + +/** + * ice_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void ice_tx_timeout(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_ring *tx_ring = NULL; + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u32 head, val = 0, i; + int hung_queue = -1; + + pf->tx_timeout_count++; + + /* find the stopped queue the same way the stack does */ + for (i = 0; i < netdev->num_tx_queues; i++) { + struct netdev_queue *q; + unsigned long trans_start; + + q = netdev_get_tx_queue(netdev, i); + trans_start = q->trans_start; + if (netif_xmit_stopped(q) && + time_after(jiffies, + (trans_start + netdev->watchdog_timeo))) { + hung_queue = i; + break; + } + } + + if (i == netdev->num_tx_queues) { + netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); + } else { + /* now that we have an index, find the tx_ring struct */ + for (i = 0; i < vsi->num_txq; i++) { + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { + if (hung_queue == + vsi->tx_rings[i]->q_index) { + tx_ring = vsi->tx_rings[i]; + break; + } + } + } + } + + /* Reset recovery level if enough time has elapsed after last timeout. + * Also ensure no new reset action happens before next timeout period. + */ + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) + pf->tx_timeout_recovery_level = 1; + else if (time_before(jiffies, (pf->tx_timeout_last_recovery + + netdev->watchdog_timeo))) + return; + + if (tx_ring) { + head = tx_ring->next_to_clean; + /* Read interrupt register */ + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + val = rd32(&pf->hw, + GLINT_DYN_CTL(tx_ring->q_vector->v_idx + + tx_ring->vsi->hw_base_vector)); + + netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", + vsi->vsi_num, hung_queue, tx_ring->next_to_clean, + head, tx_ring->next_to_use, + readl(tx_ring->tail), val); + } + + pf->tx_timeout_last_recovery = jiffies; + netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", + pf->tx_timeout_recovery_level, hung_queue); + + switch (pf->tx_timeout_recovery_level) { + case 1: + set_bit(__ICE_PFR_REQ, pf->state); + break; + case 2: + set_bit(__ICE_CORER_REQ, pf->state); + break; + case 3: + set_bit(__ICE_GLOBR_REQ, pf->state); + break; + default: + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); + set_bit(__ICE_DOWN, pf->state); + set_bit(__ICE_NEEDS_RESTART, vsi->state); + set_bit(__ICE_SERVICE_DIS, pf->state); + break; + } + + ice_service_task_schedule(pf); + pf->tx_timeout_recovery_level++; +} + +/** * ice_open - Called when a network interface becomes active * @netdev: network interface device structure * @@ -5383,6 +3845,11 @@ static int ice_open(struct net_device *netdev) struct ice_vsi *vsi = np->vsi; int err; + if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { + netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); + return -EIO; + } + netif_carrier_off(netdev); err = ice_vsi_open(vsi); @@ -5473,9 +3940,18 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = ice_change_mtu, .ndo_get_stats64 = ice_get_stats64, + .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, + .ndo_set_vf_mac = ice_set_vf_mac, + .ndo_get_vf_config = ice_get_vf_cfg, + .ndo_set_vf_trust = ice_set_vf_trust, + .ndo_set_vf_vlan = ice_set_vf_port_vlan, + .ndo_set_vf_link_state = ice_set_vf_link_state, .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, .ndo_set_features = ice_set_features, + .ndo_bridge_getlink = ice_bridge_getlink, + .ndo_bridge_setlink = ice_bridge_setlink, .ndo_fdb_add = ice_fdb_add, .ndo_fdb_del = ice_fdb_del, + .ndo_tx_timeout = ice_tx_timeout, }; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 295a8cd87fc1..3274c543283c 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -137,7 +137,7 @@ ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) if (hw->nvm.blank_nvm_mode) return 0; - return ice_acquire_res(hw, ICE_NVM_RES_ID, access); + return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index eeae199469b6..7cc8aa18a22b 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -17,7 +17,6 @@ ice_sched_add_root_node(struct ice_port_info *pi, { struct ice_sched_node *root; struct ice_hw *hw; - u16 max_children; if (!pi) return ICE_ERR_PARAM; @@ -28,8 +27,8 @@ ice_sched_add_root_node(struct ice_port_info *pi, if (!root) return ICE_ERR_NO_MEMORY; - max_children = le16_to_cpu(hw->layer_info[0].max_children); - root->children = devm_kcalloc(ice_hw_to_dev(hw), max_children, + /* coverity[suspicious_sizeof] */ + root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0], sizeof(*root), GFP_KERNEL); if (!root->children) { devm_kfree(ice_hw_to_dev(hw), root); @@ -86,6 +85,62 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) } /** + * ice_aq_query_sched_elems - query scheduler elements + * @hw: pointer to the hw struct + * @elems_req: number of elements to query + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @elems_ret: returns total number of elements returned + * @cd: pointer to command details structure or NULL + * + * Query scheduling elements (0x0404) + */ +static enum ice_status +ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, + struct ice_aqc_get_elem *buf, u16 buf_size, + u16 *elems_ret, struct ice_sq_cd *cd) +{ + struct ice_aqc_get_cfg_elem *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.get_update_elem; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sched_elems); + cmd->num_elem_req = cpu_to_le16(elems_req); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status && elems_ret) + *elems_ret = le16_to_cpu(cmd->num_elem_resp); + + return status; +} + +/** + * ice_sched_query_elem - query element information from hw + * @hw: pointer to the hw struct + * @node_teid: node teid to be queried + * @buf: buffer to element information + * + * This function queries HW element information + */ +static enum ice_status +ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, + struct ice_aqc_get_elem *buf) +{ + u16 buf_size, num_elem_ret = 0; + enum ice_status status; + + buf_size = sizeof(*buf); + memset(buf, 0, buf_size); + buf->generic[0].node_teid = cpu_to_le32(node_teid); + status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, + NULL); + if (status || num_elem_ret != 1) + ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); + return status; +} + +/** * ice_sched_add_node - Insert the Tx scheduler node in SW DB * @pi: port information structure * @layer: Scheduler layer of the node @@ -98,9 +153,10 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, struct ice_aqc_txsched_elem_data *info) { struct ice_sched_node *parent; + struct ice_aqc_get_elem elem; struct ice_sched_node *node; + enum ice_status status; struct ice_hw *hw; - u16 max_children; if (!pi) return ICE_ERR_PARAM; @@ -117,12 +173,20 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, return ICE_ERR_PARAM; } + /* query the current node information from FW before additing it + * to the SW DB + */ + status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem); + if (status) + return status; + node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL); if (!node) return ICE_ERR_NO_MEMORY; - max_children = le16_to_cpu(hw->layer_info[layer].max_children); - if (max_children) { - node->children = devm_kcalloc(ice_hw_to_dev(hw), max_children, + if (hw->max_children[layer]) { + /* coverity[suspicious_sizeof] */ + node->children = devm_kcalloc(ice_hw_to_dev(hw), + hw->max_children[layer], sizeof(*node), GFP_KERNEL); if (!node->children) { devm_kfree(ice_hw_to_dev(hw), node); @@ -134,7 +198,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, node->parent = parent; node->tx_sched_layer = layer; parent->children[parent->num_children++] = node; - memcpy(&node->info, info, sizeof(*info)); + memcpy(&node->info, &elem.generic[0], sizeof(node->info)); return 0; } @@ -192,14 +256,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) return ICE_ERR_NO_MEMORY; + buf->hdr.parent_teid = parent->info.node_teid; buf->hdr.num_elems = cpu_to_le16(num_nodes); for (i = 0; i < num_nodes; i++) buf->teid[i] = cpu_to_le32(node_teids[i]); + status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, &num_groups_removed, NULL); if (status || num_groups_removed != 1) ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n"); + devm_kfree(ice_hw_to_dev(hw), buf); return status; } @@ -532,9 +599,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, static void ice_sched_clear_tx_topo(struct ice_port_info *pi) { struct ice_sched_agg_info *agg_info; - struct ice_sched_vsi_info *vsi_elem; struct ice_sched_agg_info *atmp; - struct ice_sched_vsi_info *tmp; struct ice_hw *hw; if (!pi) @@ -553,13 +618,6 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi) } } - /* remove the vsi list */ - list_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list, - list_entry) { - list_del(&vsi_elem->list_entry); - devm_kfree(ice_hw_to_dev(hw), vsi_elem); - } - if (pi->root) { ice_free_sched_node(pi, pi->root); pi->root = NULL; @@ -592,13 +650,16 @@ static void ice_sched_clear_port(struct ice_port_info *pi) */ void ice_sched_cleanup_all(struct ice_hw *hw) { - if (!hw || !hw->port_info) + if (!hw) return; - if (hw->layer_info) + if (hw->layer_info) { devm_kfree(ice_hw_to_dev(hw), hw->layer_info); + hw->layer_info = NULL; + } - ice_sched_clear_port(hw->port_info); + if (hw->port_info) + ice_sched_clear_port(hw->port_info); hw->num_tx_sched_layers = 0; hw->num_tx_sched_phys_layers = 0; @@ -607,31 +668,6 @@ void ice_sched_cleanup_all(struct ice_hw *hw) } /** - * ice_sched_create_vsi_info_entry - create an empty new VSI entry - * @pi: port information structure - * @vsi_id: VSI Id - * - * This function creates a new VSI entry and adds it to list - */ -static struct ice_sched_vsi_info * -ice_sched_create_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id) -{ - struct ice_sched_vsi_info *vsi_elem; - - if (!pi) - return NULL; - - vsi_elem = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*vsi_elem), - GFP_KERNEL); - if (!vsi_elem) - return NULL; - - list_add(&vsi_elem->list_entry, &pi->vsi_info_list); - vsi_elem->vsi_id = vsi_id; - return vsi_elem; -} - -/** * ice_sched_add_elems - add nodes to hw and SW DB * @pi: port information structure * @tc_node: pointer to the branch node @@ -671,9 +707,13 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, ICE_AQC_ELEM_VALID_EIR; buf->generic[i].data.generic = 0; buf->generic[i].data.cir_bw.bw_profile_idx = - ICE_SCHED_DFLT_RL_PROF_ID; + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->generic[i].data.cir_bw.bw_alloc = + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); buf->generic[i].data.eir_bw.bw_profile_idx = - ICE_SCHED_DFLT_RL_PROF_ID; + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->generic[i].data.eir_bw.bw_alloc = + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); } status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, @@ -697,7 +737,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, teid = le32_to_cpu(buf->generic[i].node_teid); new_node = ice_sched_find_node_by_teid(parent, teid); - if (!new_node) { ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid); @@ -710,7 +749,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, /* add it to previous node sibling pointer */ /* Note: siblings are not linked across branches */ prev = ice_sched_get_first_node(hw, tc_node, layer); - if (prev && prev != new_node) { while (prev->sibling) prev = prev->sibling; @@ -760,8 +798,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, return ICE_ERR_PARAM; /* max children per node per layer */ - max_child_nodes = - le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children); + max_child_nodes = hw->max_children[parent->tx_sched_layer]; /* current number of children + required nodes exceed max children ? */ if ((parent->num_children + num_nodes) > max_child_nodes) { @@ -851,78 +888,6 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) } /** - * ice_sched_get_num_nodes_per_layer - Get the total number of nodes per layer - * @pi: pointer to the port info struct - * @layer: layer number - * - * This function calculates the number of nodes present in the scheduler tree - * including all the branches for a given layer - */ -static u16 -ice_sched_get_num_nodes_per_layer(struct ice_port_info *pi, u8 layer) -{ - struct ice_hw *hw; - u16 num_nodes = 0; - u8 i; - - if (!pi) - return num_nodes; - - hw = pi->hw; - - /* Calculate the number of nodes for all TCs */ - for (i = 0; i < pi->root->num_children; i++) { - struct ice_sched_node *tc_node, *node; - - tc_node = pi->root->children[i]; - - /* Get the first node */ - node = ice_sched_get_first_node(hw, tc_node, layer); - if (!node) - continue; - - /* count the siblings */ - while (node) { - num_nodes++; - node = node->sibling; - } - } - - return num_nodes; -} - -/** - * ice_sched_val_max_nodes - check max number of nodes reached or not - * @pi: port information structure - * @new_num_nodes_per_layer: pointer to the new number of nodes array - * - * This function checks whether the scheduler tree layers have enough space to - * add new nodes - */ -static enum ice_status -ice_sched_validate_for_max_nodes(struct ice_port_info *pi, - u16 *new_num_nodes_per_layer) -{ - struct ice_hw *hw = pi->hw; - u8 i, qg_layer; - u16 num_nodes; - - qg_layer = ice_sched_get_qgrp_layer(hw); - - /* walk through all the layers from SW entry point to qgroup layer */ - for (i = hw->sw_entry_point_layer; i <= qg_layer; i++) { - num_nodes = ice_sched_get_num_nodes_per_layer(pi, i); - if (num_nodes + new_num_nodes_per_layer[i] > - le16_to_cpu(hw->layer_info[i].max_pf_nodes)) { - ice_debug(hw, ICE_DBG_SCHED, - "max nodes reached for layer = %d\n", i); - return ICE_ERR_CFG; - } - } - return 0; -} - -/** * ice_rm_dflt_leaf_node - remove the default leaf node in the tree * @pi: port information structure * @@ -1003,14 +968,12 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) hw = pi->hw; /* Query the Default Topology from FW */ - buf = devm_kcalloc(ice_hw_to_dev(hw), ICE_TXSCHED_MAX_BRANCHES, - sizeof(*buf), GFP_KERNEL); + buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!buf) return ICE_ERR_NO_MEMORY; /* Query default scheduling tree topology */ - status = ice_aq_get_dflt_topo(hw, pi->lport, buf, - sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES, + status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, &num_branches, NULL); if (status) goto err_init_port; @@ -1075,7 +1038,6 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) pi->port_state = ICE_SCHED_PORT_STATE_READY; mutex_init(&pi->sched_lock); INIT_LIST_HEAD(&pi->agg_list); - INIT_LIST_HEAD(&pi->vsi_info_list); err_init_port: if (status && pi->root) { @@ -1097,6 +1059,8 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) { struct ice_aqc_query_txsched_res_resp *buf; enum ice_status status = 0; + __le16 max_sibl; + u8 i; if (hw->layer_info) return status; @@ -1115,7 +1079,20 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) hw->flattened_layers = buf->sched_props.flattening_bitmap; hw->max_cgds = buf->sched_props.max_pf_cgds; - hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, + /* max sibling group size of current layer refers to the max children + * of the below layer node. + * layer 1 node max children will be layer 2 max sibling group size + * layer 2 node max children will be layer 3 max sibling group size + * and so on. This array will be populated from root (index 0) to + * qgroup layer 7. Leaf node has no children. + */ + for (i = 0; i < hw->num_tx_sched_layers; i++) { + max_sibl = buf->layer_props[i].max_sibl_grp_sz; + hw->max_children[i] = le16_to_cpu(max_sibl); + } + + hw->layer_info = (struct ice_aqc_layer_props *) + devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, (hw->num_tx_sched_layers * sizeof(*hw->layer_info)), GFP_KERNEL); @@ -1130,27 +1107,6 @@ sched_query_out: } /** - * ice_sched_get_vsi_info_entry - Get the vsi entry list for given vsi_id - * @pi: port information structure - * @vsi_id: vsi id - * - * This function retrieves the vsi list for the given vsi id - */ -static struct ice_sched_vsi_info * -ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id) -{ - struct ice_sched_vsi_info *list_elem; - - if (!pi) - return NULL; - - list_for_each_entry(list_elem, &pi->vsi_info_list, list_entry) - if (list_elem->vsi_id == vsi_id) - return list_elem; - return NULL; -} - -/** * ice_sched_find_node_in_subtree - Find node in part of base node subtree * @hw: pointer to the hw struct * @base: pointer to the base node @@ -1186,30 +1142,28 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, /** * ice_sched_get_free_qparent - Get a free lan or rdma q group node * @pi: port information structure - * @vsi_id: vsi id + * @vsi_handle: software VSI handle * @tc: branch number * @owner: lan or rdma * * This function retrieves a free lan or rdma q group node */ struct ice_sched_node * -ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc, +ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 owner) { struct ice_sched_node *vsi_node, *qgrp_node = NULL; - struct ice_sched_vsi_info *list_elem; + struct ice_vsi_ctx *vsi_ctx; u16 max_children; u8 qgrp_layer; qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); - max_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children); - - list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id); - if (!list_elem) - goto lan_q_exit; - - vsi_node = list_elem->vsi_node[tc]; + max_children = pi->hw->max_children[qgrp_layer]; + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); + if (!vsi_ctx) + return NULL; + vsi_node = vsi_ctx->sched.vsi_node[tc]; /* validate invalid VSI id */ if (!vsi_node) goto lan_q_exit; @@ -1233,14 +1187,14 @@ lan_q_exit: * ice_sched_get_vsi_node - Get a VSI node based on VSI id * @hw: pointer to the hw struct * @tc_node: pointer to the TC node - * @vsi_id: VSI id + * @vsi_handle: software VSI handle * * This function retrieves a VSI node for a given VSI id from a given * TC branch */ static struct ice_sched_node * ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node, - u16 vsi_id) + u16 vsi_handle) { struct ice_sched_node *node; u8 vsi_layer; @@ -1250,7 +1204,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node, /* Check whether it already exists */ while (node) { - if (node->vsi_id == vsi_id) + if (node->vsi_handle == vsi_handle) return node; node = node->sibling; } @@ -1278,10 +1232,8 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) /* calculate num nodes from q group to VSI layer */ for (i = qgl; i > vsil; i--) { - u16 max_children = le16_to_cpu(hw->layer_info[i].max_children); - /* round to the next integer if there is a remainder */ - num = DIV_ROUND_UP(num, max_children); + num = DIV_ROUND_UP(num, hw->max_children[i]); /* need at least one node */ num_nodes[i] = num ? num : 1; @@ -1291,7 +1243,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) /** * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree * @pi: port information structure - * @vsi_id: VSI id + * @vsi_handle: software VSI handle * @tc_node: pointer to the TC node * @num_nodes: pointer to the num nodes that needs to be added per layer * @owner: node owner (lan or rdma) @@ -1300,7 +1252,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) * lan and rdma separately. */ static enum ice_status -ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, +ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes, u8 owner) { @@ -1311,16 +1263,13 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u16 num_added = 0; u8 i, qgl, vsil; - status = ice_sched_validate_for_max_nodes(pi, num_nodes); - if (status) - return status; - qgl = ice_sched_get_qgrp_layer(hw); vsil = ice_sched_get_vsi_layer(hw); - parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id); + parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); for (i = vsil + 1; i <= qgl; i++) { if (!parent) return ICE_ERR_CFG; + status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, @@ -1398,8 +1347,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, struct ice_sched_node *tc_node, u16 *num_nodes) { struct ice_sched_node *node; - u16 max_child; - u8 i, vsil; + u8 vsil; + int i; vsil = ice_sched_get_vsi_layer(hw); for (i = vsil; i >= hw->sw_entry_point_layer; i--) @@ -1412,12 +1361,10 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, /* If intermediate nodes are reached max children * then add a new one. */ - node = ice_sched_get_first_node(hw, tc_node, i); - max_child = le16_to_cpu(hw->layer_info[i].max_children); - + node = ice_sched_get_first_node(hw, tc_node, (u8)i); /* scan all the siblings */ while (node) { - if (node->num_children < max_child) + if (node->num_children < hw->max_children[i]) break; node = node->sibling; } @@ -1431,7 +1378,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, /** * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle * @tc_node: pointer to TC node * @num_nodes: pointer to num nodes array * @@ -1439,7 +1386,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, * VSI, its parent and intermediate nodes in below layers */ static enum ice_status -ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id, +ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes) { struct ice_sched_node *parent = tc_node; @@ -1451,10 +1398,6 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id, if (!pi) return ICE_ERR_PARAM; - status = ice_sched_validate_for_max_nodes(pi, num_nodes); - if (status) - return status; - vsil = ice_sched_get_vsi_layer(pi->hw); for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, @@ -1477,21 +1420,22 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id, return ICE_ERR_CFG; if (i == vsil) - parent->vsi_id = vsi_id; + parent->vsi_handle = vsi_handle; } + return 0; } /** * ice_sched_add_vsi_to_topo - add a new VSI into tree * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle * @tc: TC number * * This function adds a new VSI into scheduler tree */ static enum ice_status -ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc) +ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) { u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; struct ice_sched_node *tc_node; @@ -1505,13 +1449,14 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc) ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes); /* add vsi supported nodes to tc subtree */ - return ice_sched_add_vsi_support_nodes(pi, vsi_id, tc_node, num_nodes); + return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, + num_nodes); } /** * ice_sched_update_vsi_child_nodes - update VSI child nodes * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle * @tc: TC number * @new_numqs: new number of max queues * @owner: owner of this subtree @@ -1519,14 +1464,14 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc) * This function updates the VSI child nodes based on the number of queues */ static enum ice_status -ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, - u16 new_numqs, u8 owner) +ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, + u8 tc, u16 new_numqs, u8 owner) { u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; struct ice_sched_node *vsi_node; struct ice_sched_node *tc_node; - struct ice_sched_vsi_info *vsi; + struct ice_vsi_ctx *vsi_ctx; enum ice_status status = 0; struct ice_hw *hw = pi->hw; u16 prev_numqs; @@ -1536,16 +1481,16 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, if (!tc_node) return ICE_ERR_CFG; - vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); + vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); if (!vsi_node) return ICE_ERR_CFG; - vsi = ice_sched_get_vsi_info_entry(pi, vsi_id); - if (!vsi) - return ICE_ERR_CFG; + vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!vsi_ctx) + return ICE_ERR_PARAM; if (owner == ICE_SCHED_NODE_OWNER_LAN) - prev_numqs = vsi->max_lanq[tc]; + prev_numqs = vsi_ctx->sched.max_lanq[tc]; else return ICE_ERR_PARAM; @@ -1570,13 +1515,13 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) new_num_nodes[i] -= prev_num_nodes[i]; - status = ice_sched_add_vsi_child_nodes(pi, vsi_id, tc_node, + status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, new_num_nodes, owner); if (status) return status; } - vsi->max_lanq[tc] = new_numqs; + vsi_ctx->sched.max_lanq[tc] = new_numqs; return status; } @@ -1584,7 +1529,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, /** * ice_sched_cfg_vsi - configure the new/exisiting VSI * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle * @tc: TC number * @maxqs: max number of queues * @owner: lan or rdma @@ -1595,25 +1540,21 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, * disabled then suspend the VSI if it is not already. */ enum ice_status -ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs, +ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable) { struct ice_sched_node *vsi_node, *tc_node; - struct ice_sched_vsi_info *vsi; + struct ice_vsi_ctx *vsi_ctx; enum ice_status status = 0; struct ice_hw *hw = pi->hw; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) return ICE_ERR_PARAM; - - vsi = ice_sched_get_vsi_info_entry(pi, vsi_id); - if (!vsi) - vsi = ice_sched_create_vsi_info_entry(pi, vsi_id); - if (!vsi) - return ICE_ERR_NO_MEMORY; - - vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); + vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!vsi_ctx) + return ICE_ERR_PARAM; + vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); /* suspend the VSI if tc is not enabled */ if (!enable) { @@ -1630,18 +1571,26 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs, /* TC is enabled, if it is a new VSI then add it to the tree */ if (!vsi_node) { - status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc); + status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc); if (status) return status; - vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); + + vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); if (!vsi_node) return ICE_ERR_CFG; - vsi->vsi_node[tc] = vsi_node; + + vsi_ctx->sched.vsi_node[tc] = vsi_node; vsi_node->in_use = true; + /* invalidate the max queues whenever VSI gets added first time + * into the scheduler tree (boot or after reset). We need to + * recreate the child nodes all the time in these cases. + */ + vsi_ctx->sched.max_lanq[tc] = 0; } /* update the VSI child nodes */ - status = ice_sched_update_vsi_child_nodes(pi, vsi_id, tc, maxqs, owner); + status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs, + owner); if (status) return status; diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index badadcc120d3..5dc9cfa04c58 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -12,7 +12,6 @@ struct ice_sched_agg_vsi_info { struct list_head list_entry; DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS); - u16 vsi_id; }; struct ice_sched_agg_info { @@ -35,9 +34,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node); struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc); struct ice_sched_node * -ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc, +ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 owner); enum ice_status -ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs, +ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable); #endif /* _ICE_SCHED_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c new file mode 100644 index 000000000000..027eba4e13f8 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_adminq_cmd.h" +#include "ice_sriov.h" + +/** + * ice_aq_send_msg_to_vf + * @hw: pointer to the hardware structure + * @vfid: VF ID to send msg + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cd: pointer to command details + * + * Send message to VF driver (0x0802) using mailbox + * queue and asynchronously sending message via + * ice_sq_send_cmd() function + */ +enum ice_status +ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, + u8 *msg, u16 msglen, struct ice_sq_cd *cd) +{ + struct ice_aqc_pf_vf_msg *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf); + + cmd = &desc.params.virt; + cmd->id = cpu_to_le32(vfid); + + desc.cookie_high = cpu_to_le32(v_opcode); + desc.cookie_low = cpu_to_le32(v_retval); + + if (msglen) + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd); +} + +/** + * ice_conv_link_speed_to_virtchnl + * @adv_link_support: determines the format of the returned link speed + * @link_speed: variable containing the link_speed to be converted + * + * Convert link speed supported by HW to link speed supported by virtchnl. + * If adv_link_support is true, then return link speed in Mbps. Else return + * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller + * needs to cast back to an enum virtchnl_link_speed in the case where + * adv_link_support is false, but when adv_link_support is true the caller can + * expect the speed in Mbps. + */ +u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) +{ + u32 speed; + + if (adv_link_support) + switch (link_speed) { + case ICE_AQ_LINK_SPEED_10MB: + speed = ICE_LINK_SPEED_10MBPS; + break; + case ICE_AQ_LINK_SPEED_100MB: + speed = ICE_LINK_SPEED_100MBPS; + break; + case ICE_AQ_LINK_SPEED_1000MB: + speed = ICE_LINK_SPEED_1000MBPS; + break; + case ICE_AQ_LINK_SPEED_2500MB: + speed = ICE_LINK_SPEED_2500MBPS; + break; + case ICE_AQ_LINK_SPEED_5GB: + speed = ICE_LINK_SPEED_5000MBPS; + break; + case ICE_AQ_LINK_SPEED_10GB: + speed = ICE_LINK_SPEED_10000MBPS; + break; + case ICE_AQ_LINK_SPEED_20GB: + speed = ICE_LINK_SPEED_20000MBPS; + break; + case ICE_AQ_LINK_SPEED_25GB: + speed = ICE_LINK_SPEED_25000MBPS; + break; + case ICE_AQ_LINK_SPEED_40GB: + speed = ICE_LINK_SPEED_40000MBPS; + break; + default: + speed = ICE_LINK_SPEED_UNKNOWN; + break; + } + else + /* Virtchnl speeds are not defined for every speed supported in + * the hardware. To maintain compatibility with older AVF + * drivers, while reporting the speed the new speed values are + * resolved to the closest known virtchnl speeds + */ + switch (link_speed) { + case ICE_AQ_LINK_SPEED_10MB: + case ICE_AQ_LINK_SPEED_100MB: + speed = (u32)VIRTCHNL_LINK_SPEED_100MB; + break; + case ICE_AQ_LINK_SPEED_1000MB: + case ICE_AQ_LINK_SPEED_2500MB: + case ICE_AQ_LINK_SPEED_5GB: + speed = (u32)VIRTCHNL_LINK_SPEED_1GB; + break; + case ICE_AQ_LINK_SPEED_10GB: + speed = (u32)VIRTCHNL_LINK_SPEED_10GB; + break; + case ICE_AQ_LINK_SPEED_20GB: + speed = (u32)VIRTCHNL_LINK_SPEED_20GB; + break; + case ICE_AQ_LINK_SPEED_25GB: + speed = (u32)VIRTCHNL_LINK_SPEED_25GB; + break; + case ICE_AQ_LINK_SPEED_40GB: + /* fall through */ + speed = (u32)VIRTCHNL_LINK_SPEED_40GB; + break; + default: + speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN; + break; + } + + return speed; +} diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h new file mode 100644 index 000000000000..3d78a0795138 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sriov.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_SRIOV_H_ +#define _ICE_SRIOV_H_ + +#include "ice_common.h" + +#ifdef CONFIG_PCI_IOV +enum ice_status +ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, + u8 *msg, u16 msglen, struct ice_sq_cd *cd); + +u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed); +#else /* CONFIG_PCI_IOV */ +static inline enum ice_status +ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw, + u16 __always_unused vfid, u32 __always_unused v_opcode, + u32 __always_unused v_retval, u8 __always_unused *msg, + u16 __always_unused msglen, + struct ice_sq_cd __always_unused *cd) +{ + return 0; +} + +static inline u32 +ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support, + u16 __always_unused link_speed) +{ + return 0; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* _ICE_SRIOV_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h index 9a95c4ffd7d7..f49f299ddf2c 100644 --- a/drivers/net/ethernet/intel/ice/ice_status.h +++ b/drivers/net/ethernet/intel/ice/ice_status.h @@ -6,6 +6,9 @@ /* Error Codes */ enum ice_status { + ICE_SUCCESS = 0, + + /* Generic codes : Range -1..-49 */ ICE_ERR_PARAM = -1, ICE_ERR_NOT_IMPL = -2, ICE_ERR_NOT_READY = -3, @@ -20,6 +23,7 @@ enum ice_status { ICE_ERR_ALREADY_EXISTS = -14, ICE_ERR_DOES_NOT_EXIST = -15, ICE_ERR_MAX_LIMIT = -17, + ICE_ERR_RESET_ONGOING = -18, ICE_ERR_BUF_TOO_SHORT = -52, ICE_ERR_NVM_BLANK_MODE = -53, ICE_ERR_AQ_ERROR = -100, diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 6b7ec2ae5ad6..33403f39f1b3 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -86,6 +86,36 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, } /** + * ice_init_def_sw_recp - initialize the recipe book keeping tables + * @hw: pointer to the hw struct + * + * Allocate memory for the entire recipe table and initialize the structures/ + * entries corresponding to basic recipes. + */ +enum ice_status +ice_init_def_sw_recp(struct ice_hw *hw) +{ + struct ice_sw_recipe *recps; + u8 i; + + recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, + sizeof(struct ice_sw_recipe), GFP_KERNEL); + if (!recps) + return ICE_ERR_NO_MEMORY; + + for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + recps[i].root_rid = i; + INIT_LIST_HEAD(&recps[i].filt_rules); + INIT_LIST_HEAD(&recps[i].filt_replay_rules); + mutex_init(&recps[i].filt_rule_lock); + } + + hw->switch_info->recp_list = recps; + + return 0; +} + +/** * ice_aq_get_sw_cfg - get switch configuration * @hw: pointer to the hardware structure * @buf: pointer to the result buffer @@ -140,23 +170,24 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf, * * Add a VSI context to the hardware (0x0210) */ -enum ice_status +static enum ice_status ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *res; struct ice_aqc_add_get_update_free_vsi *cmd; - enum ice_status status; struct ice_aq_desc desc; + enum ice_status status; cmd = &desc.params.vsi_cmd; - res = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw; + res = &desc.params.add_update_free_vsi_res; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); if (!vsi_ctx->alloc_from_pool) cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); + cmd->vf_id = vsi_ctx->vf_num; cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); @@ -175,6 +206,42 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, } /** + * ice_aq_free_vsi + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a VSI context struct + * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources + * @cd: pointer to command details structure or NULL + * + * Free VSI context info from hardware (0x0213) + */ +static enum ice_status +ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + bool keep_vsi_alloc, struct ice_sq_cd *cd) +{ + struct ice_aqc_add_update_free_vsi_resp *resp; + struct ice_aqc_add_get_update_free_vsi *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.vsi_cmd; + resp = &desc.params.add_update_free_vsi_res; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); + + cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); + if (keep_vsi_alloc) + cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (!status) { + vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); + vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); + } + + return status; +} + +/** * ice_aq_update_vsi * @hw: pointer to the hw struct * @vsi_ctx: pointer to a VSI context struct @@ -182,7 +249,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, * * Update VSI context in the hardware (0x0211) */ -enum ice_status +static enum ice_status ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { @@ -192,7 +259,7 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, enum ice_status status; cmd = &desc.params.vsi_cmd; - resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw; + resp = &desc.params.add_update_free_vsi_res; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); @@ -212,42 +279,162 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, } /** - * ice_aq_free_vsi + * ice_is_vsi_valid - check whether the VSI is valid or not + * @hw: pointer to the hw struct + * @vsi_handle: VSI handle + * + * check whether the VSI is valid or not + */ +bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle) +{ + return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle]; +} + +/** + * ice_get_hw_vsi_num - return the hw VSI number + * @hw: pointer to the hw struct + * @vsi_handle: VSI handle + * + * return the hw VSI number + * Caution: call this function only if VSI is valid (ice_is_vsi_valid) + */ +u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) +{ + return hw->vsi_ctx[vsi_handle]->vsi_num; +} + +/** + * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle + * @hw: pointer to the hw struct + * @vsi_handle: VSI handle + * + * return the VSI context entry for a given VSI handle + */ +struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) +{ + return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle]; +} + +/** + * ice_save_vsi_ctx - save the VSI context for a given VSI handle + * @hw: pointer to the hw struct + * @vsi_handle: VSI handle + * @vsi: VSI context pointer + * + * save the VSI context entry for a given VSI handle + */ +static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, + struct ice_vsi_ctx *vsi) +{ + hw->vsi_ctx[vsi_handle] = vsi; +} + +/** + * ice_clear_vsi_ctx - clear the VSI context entry + * @hw: pointer to the hw struct + * @vsi_handle: VSI handle + * + * clear the VSI context entry + */ +static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) +{ + struct ice_vsi_ctx *vsi; + + vsi = ice_get_vsi_ctx(hw, vsi_handle); + if (vsi) { + devm_kfree(ice_hw_to_dev(hw), vsi); + hw->vsi_ctx[vsi_handle] = NULL; + } +} + +/** + * ice_add_vsi - add VSI context to the hardware and VSI handle list * @hw: pointer to the hw struct + * @vsi_handle: unique VSI handle provided by drivers * @vsi_ctx: pointer to a VSI context struct - * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources * @cd: pointer to command details structure or NULL * - * Get VSI context info from hardware (0x0213) + * Add a VSI context to the hardware also add it into the VSI handle list. + * If this function gets called after reset for existing VSIs then update + * with the new HW VSI number in the corresponding VSI handle list entry. */ enum ice_status -ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, - bool keep_vsi_alloc, struct ice_sq_cd *cd) +ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd) { - struct ice_aqc_add_update_free_vsi_resp *resp; - struct ice_aqc_add_get_update_free_vsi *cmd; - struct ice_aq_desc desc; + struct ice_vsi_ctx *tmp_vsi_ctx; enum ice_status status; - cmd = &desc.params.vsi_cmd; - resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); + if (vsi_handle >= ICE_MAX_VSI) + return ICE_ERR_PARAM; + status = ice_aq_add_vsi(hw, vsi_ctx, cd); + if (status) + return status; + tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!tmp_vsi_ctx) { + /* Create a new vsi context */ + tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*tmp_vsi_ctx), GFP_KERNEL); + if (!tmp_vsi_ctx) { + ice_aq_free_vsi(hw, vsi_ctx, false, cd); + return ICE_ERR_NO_MEMORY; + } + *tmp_vsi_ctx = *vsi_ctx; + ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); + } else { + /* update with new HW VSI num */ + if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num) + tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; + } - cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); - if (keep_vsi_alloc) - cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); + return status; +} - status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); - if (!status) { - vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); - vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); - } +/** + * ice_free_vsi- free VSI context from hardware and VSI handle list + * @hw: pointer to the hw struct + * @vsi_handle: unique VSI handle + * @vsi_ctx: pointer to a VSI context struct + * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources + * @cd: pointer to command details structure or NULL + * + * Free VSI context info from hardware as well as from VSI handle list + */ +enum ice_status +ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + bool keep_vsi_alloc, struct ice_sq_cd *cd) +{ + enum ice_status status; + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); + status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); + if (!status) + ice_clear_vsi_ctx(hw, vsi_handle); return status; } /** + * ice_update_vsi + * @hw: pointer to the hw struct + * @vsi_handle: unique VSI handle + * @vsi_ctx: pointer to a VSI context struct + * @cd: pointer to command details structure or NULL + * + * Update VSI context in the hardware + */ +enum ice_status +ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd) +{ + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); + return ice_aq_update_vsi(hw, vsi_ctx, cd); +} + +/** * ice_aq_alloc_free_vsi_list * @hw: pointer to the hw struct * @vsi_list_id: VSI list id returned or used for lookup @@ -464,10 +651,12 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) { u16 vlan_id = ICE_MAX_VLAN_ID + 1; - u8 eth_hdr[DUMMY_ETH_HDR_LEN]; void *daddr = NULL; + u16 eth_hdr_sz; + u8 *eth_hdr; u32 act = 0; __be16 *off; + u8 q_rgn; if (opc == ice_aqc_opc_remove_sw_rules) { s_rule->pdata.lkup_tx_rx.act = 0; @@ -477,13 +666,16 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, return; } + eth_hdr_sz = sizeof(dummy_eth_header); + eth_hdr = s_rule->pdata.lkup_tx_rx.hdr; + /* initialize the ether header with a dummy header */ - memcpy(eth_hdr, dummy_eth_header, sizeof(dummy_eth_header)); + memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz); ice_fill_sw_info(hw, f_info); switch (f_info->fltr_act) { case ICE_FWD_TO_VSI: - act |= (f_info->fwd_id.vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & + act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; if (f_info->lkup_type != ICE_SW_LKUP_VLAN) act |= ICE_SINGLE_ACT_VSI_FORWARDING | @@ -503,14 +695,19 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & ICE_SINGLE_ACT_Q_INDEX_M; break; + case ICE_DROP_PACKET: + act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | + ICE_SINGLE_ACT_VALID_BIT; + break; case ICE_FWD_TO_QGRP: + q_rgn = f_info->qgrp_size > 0 ? + (u8)ilog2(f_info->qgrp_size) : 0; act |= ICE_SINGLE_ACT_TO_Q; - act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) & + act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & + ICE_SINGLE_ACT_Q_INDEX_M; + act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & ICE_SINGLE_ACT_Q_REGION_M; break; - case ICE_DROP_PACKET: - act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP; - break; default: return; } @@ -536,7 +733,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, daddr = f_info->l_data.ethertype_mac.mac_addr; /* fall-through */ case ICE_SW_LKUP_ETHERTYPE: - off = (__be16 *)ð_hdr[ICE_ETH_ETHTYPE_OFFSET]; + off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); break; case ICE_SW_LKUP_MAC_VLAN: @@ -563,18 +760,16 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); if (daddr) - ether_addr_copy(ð_hdr[ICE_ETH_DA_OFFSET], daddr); + ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr); if (!(vlan_id > ICE_MAX_VLAN_ID)) { - off = (__be16 *)ð_hdr[ICE_ETH_VLAN_TCI_OFFSET]; + off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); *off = cpu_to_be16(vlan_id); } /* Create the switch rule with the final dummy Ethernet header */ if (opc != ice_aqc_opc_update_sw_rules) - s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(sizeof(eth_hdr)); - - memcpy(s_rule->pdata.lkup_tx_rx.hdr, eth_hdr, sizeof(eth_hdr)); + s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz); } /** @@ -601,8 +796,8 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, enum ice_status status; u16 lg_act_size; u16 rules_size; - u16 vsi_info; u32 act; + u16 id; if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) return ICE_ERR_PARAM; @@ -628,12 +823,11 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, /* First action VSI forwarding or VSI list forwarding depending on how * many VSIs */ - vsi_info = (m_ent->vsi_count > 1) ? - m_ent->fltr_info.fwd_id.vsi_list_id : - m_ent->fltr_info.fwd_id.vsi_id; + id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id : + m_ent->fltr_info.fwd_id.hw_vsi_id; act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; - act |= (vsi_info << ICE_LG_ACT_VSI_LIST_ID_S) & + act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; if (m_ent->vsi_count > 1) act |= ICE_LG_ACT_VSI_LIST; @@ -686,15 +880,15 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, /** * ice_create_vsi_list_map * @hw: pointer to the hardware structure - * @vsi_array: array of VSIs to form a VSI list - * @num_vsi: num VSI in the array + * @vsi_handle_arr: array of VSI handles to set in the VSI mapping + * @num_vsi: number of VSI handles in the array * @vsi_list_id: VSI list id generated as part of allocate resource * * Helper function to create a new entry of VSI list id to VSI mapping * using the given VSI list id */ static struct ice_vsi_list_map_info * -ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, +ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 vsi_list_id) { struct ice_switch_info *sw = hw->switch_info; @@ -706,9 +900,9 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, return NULL; v_map->vsi_list_id = vsi_list_id; - + v_map->ref_cnt = 1; for (i = 0; i < num_vsi; i++) - set_bit(vsi_array[i], v_map->vsi_map); + set_bit(vsi_handle_arr[i], v_map->vsi_map); list_add(&v_map->list_entry, &sw->vsi_list_map_head); return v_map; @@ -717,8 +911,8 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, /** * ice_update_vsi_list_rule * @hw: pointer to the hardware structure - * @vsi_array: array of VSIs to form a VSI list - * @num_vsi: num VSI in the array + * @vsi_handle_arr: array of VSI handles to form a VSI list + * @num_vsi: number of VSI handles in the array * @vsi_list_id: VSI list id generated as part of allocate resource * @remove: Boolean value to indicate if this is a remove action * @opc: switch rules population command type - pass in the command opcode @@ -728,7 +922,7 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, * using the given VSI list id */ static enum ice_status -ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, +ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, enum ice_sw_lkup_type lkup_type) { @@ -759,9 +953,15 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) return ICE_ERR_NO_MEMORY; - - for (i = 0; i < num_vsi; i++) - s_rule->pdata.vsi_list.vsi[i] = cpu_to_le16(vsi_array[i]); + for (i = 0; i < num_vsi; i++) { + if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { + status = ICE_ERR_PARAM; + goto exit; + } + /* AQ call requires hw_vsi_id(s) */ + s_rule->pdata.vsi_list.vsi[i] = + cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); + } s_rule->type = cpu_to_le16(type); s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); @@ -769,6 +969,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL); +exit: devm_kfree(ice_hw_to_dev(hw), s_rule); return status; } @@ -776,21 +977,16 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, /** * ice_create_vsi_list_rule - Creates and populates a VSI list rule * @hw: pointer to the hw struct - * @vsi_array: array of VSIs to form a VSI list - * @num_vsi: number of VSIs in the array + * @vsi_handle_arr: array of VSI handles to form a VSI list + * @num_vsi: number of VSI handles in the array * @vsi_list_id: stores the ID of the VSI list to be created * @lkup_type: switch rule filter's lookup type */ static enum ice_status -ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, +ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) { enum ice_status status; - int i; - - for (i = 0; i < num_vsi; i++) - if (vsi_array[i] >= ICE_MAX_VSI) - return ICE_ERR_OUT_OF_RANGE; status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, ice_aqc_opc_alloc_res); @@ -798,9 +994,9 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, return status; /* Update the newly created VSI list to include the specified VSIs */ - return ice_update_vsi_list_rule(hw, vsi_array, num_vsi, *vsi_list_id, - false, ice_aqc_opc_add_sw_rules, - lkup_type); + return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi, + *vsi_list_id, false, + ice_aqc_opc_add_sw_rules, lkup_type); } /** @@ -816,10 +1012,10 @@ static enum ice_status ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) { - struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; struct ice_aqc_sw_rules_elem *s_rule; enum ice_sw_lkup_type l_type; + struct ice_sw_recipe *recp; enum ice_status status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), @@ -860,31 +1056,9 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw, * calls remove filter AQ command */ l_type = fm_entry->fltr_info.lkup_type; - if (l_type == ICE_SW_LKUP_MAC) { - mutex_lock(&sw->mac_list_lock); - list_add(&fm_entry->list_entry, &sw->mac_list_head); - mutex_unlock(&sw->mac_list_lock); - } else if (l_type == ICE_SW_LKUP_VLAN) { - mutex_lock(&sw->vlan_list_lock); - list_add(&fm_entry->list_entry, &sw->vlan_list_head); - mutex_unlock(&sw->vlan_list_lock); - } else if (l_type == ICE_SW_LKUP_ETHERTYPE || - l_type == ICE_SW_LKUP_ETHERTYPE_MAC) { - mutex_lock(&sw->eth_m_list_lock); - list_add(&fm_entry->list_entry, &sw->eth_m_list_head); - mutex_unlock(&sw->eth_m_list_lock); - } else if (l_type == ICE_SW_LKUP_PROMISC || - l_type == ICE_SW_LKUP_PROMISC_VLAN) { - mutex_lock(&sw->promisc_list_lock); - list_add(&fm_entry->list_entry, &sw->promisc_list_head); - mutex_unlock(&sw->promisc_list_lock); - } else if (fm_entry->fltr_info.lkup_type == ICE_SW_LKUP_MAC_VLAN) { - mutex_lock(&sw->mac_vlan_list_lock); - list_add(&fm_entry->list_entry, &sw->mac_vlan_list_head); - mutex_unlock(&sw->mac_vlan_list_lock); - } else { - status = ICE_ERR_NOT_IMPL; - } + recp = &hw->switch_info->recp_list[l_type]; + list_add(&fm_entry->list_entry, &recp->filt_rules); + ice_create_pkt_fwd_rule_exit: devm_kfree(ice_hw_to_dev(hw), s_rule); return status; @@ -893,19 +1067,15 @@ ice_create_pkt_fwd_rule_exit: /** * ice_update_pkt_fwd_rule * @hw: pointer to the hardware structure - * @rule_id: rule of previously created switch rule to update - * @vsi_list_id: VSI list id to be updated with - * @f_info: ice_fltr_info to pull other information for switch rule + * @f_info: filter information for switch rule * * Call AQ command to update a previously created switch rule with a * VSI list id */ static enum ice_status -ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id, - struct ice_fltr_info f_info) +ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) { struct ice_aqc_sw_rules_elem *s_rule; - struct ice_fltr_info tmp_fltr; enum ice_status status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), @@ -913,14 +1083,9 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id, if (!s_rule) return ICE_ERR_NO_MEMORY; - tmp_fltr = f_info; - tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; - tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; + ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); - ice_fill_sw_rule(hw, &tmp_fltr, s_rule, - ice_aqc_opc_update_sw_rules); - - s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id); + s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id); /* Update switch rule with new rule set to forward VSI list */ status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, @@ -931,7 +1096,48 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id, } /** - * ice_handle_vsi_list_mgmt + * ice_update_sw_rule_bridge_mode + * @hw: pointer to the hw struct + * + * Updates unicast switch filter rules based on VEB/VEPA mode + */ +enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *fm_entry; + enum ice_status status = 0; + struct list_head *rule_head; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + + rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; + rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; + + mutex_lock(rule_lock); + list_for_each_entry(fm_entry, rule_head, list_entry) { + struct ice_fltr_info *fi = &fm_entry->fltr_info; + u8 *addr = fi->l_data.mac.mac_addr; + + /* Update unicast Tx rules to reflect the selected + * VEB/VEPA mode + */ + if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) && + (fi->fltr_act == ICE_FWD_TO_VSI || + fi->fltr_act == ICE_FWD_TO_VSI_LIST || + fi->fltr_act == ICE_FWD_TO_Q || + fi->fltr_act == ICE_FWD_TO_QGRP)) { + status = ice_update_pkt_fwd_rule(hw, fi); + if (status) + break; + } + } + + mutex_unlock(rule_lock); + + return status; +} + +/** + * ice_add_update_vsi_list * @hw: pointer to the hardware structure * @m_entry: pointer to current filter management list entry * @cur_fltr: filter information from the book keeping entry @@ -952,10 +1158,10 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id, * using the update switch rule command */ static enum ice_status -ice_handle_vsi_list_mgmt(struct ice_hw *hw, - struct ice_fltr_mgmt_list_entry *m_entry, - struct ice_fltr_info *cur_fltr, - struct ice_fltr_info *new_fltr) +ice_add_update_vsi_list(struct ice_hw *hw, + struct ice_fltr_mgmt_list_entry *m_entry, + struct ice_fltr_info *cur_fltr, + struct ice_fltr_info *new_fltr) { enum ice_status status = 0; u16 vsi_list_id = 0; @@ -975,34 +1181,36 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw, * a part of a VSI list. So, create a VSI list with the old and * new VSIs. */ - u16 vsi_id_arr[2]; - u16 fltr_rule; + struct ice_fltr_info tmp_fltr; + u16 vsi_handle_arr[2]; /* A rule already exists with the new VSI being added */ - if (cur_fltr->fwd_id.vsi_id == new_fltr->fwd_id.vsi_id) + if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) return ICE_ERR_ALREADY_EXISTS; - vsi_id_arr[0] = cur_fltr->fwd_id.vsi_id; - vsi_id_arr[1] = new_fltr->fwd_id.vsi_id; - status = ice_create_vsi_list_rule(hw, &vsi_id_arr[0], 2, + vsi_handle_arr[0] = cur_fltr->vsi_handle; + vsi_handle_arr[1] = new_fltr->vsi_handle; + status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, &vsi_list_id, new_fltr->lkup_type); if (status) return status; - fltr_rule = cur_fltr->fltr_rule_id; + tmp_fltr = *new_fltr; + tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; + tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; + tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; /* Update the previous switch rule of "MAC forward to VSI" to * "MAC fwd to VSI list" */ - status = ice_update_pkt_fwd_rule(hw, fltr_rule, vsi_list_id, - *new_fltr); + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); if (status) return status; cur_fltr->fwd_id.vsi_list_id = vsi_list_id; cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; m_entry->vsi_list_info = - ice_create_vsi_list_map(hw, &vsi_id_arr[0], 2, + ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, vsi_list_id); /* If this entry was large action then the large action needs @@ -1014,11 +1222,11 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw, m_entry->sw_marker_id, m_entry->lg_act_idx); } else { - u16 vsi_id = new_fltr->fwd_id.vsi_id; + u16 vsi_handle = new_fltr->vsi_handle; enum ice_adminq_opc opcode; /* A rule already exists with the new VSI being added */ - if (test_bit(vsi_id, m_entry->vsi_list_info->vsi_map)) + if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) return 0; /* Update the previously created VSI list set with @@ -1027,12 +1235,12 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw, vsi_list_id = cur_fltr->fwd_id.vsi_list_id; opcode = ice_aqc_opc_update_sw_rules; - status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id, - false, opcode, + status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, + vsi_list_id, false, opcode, new_fltr->lkup_type); /* update VSI list mapping info with new VSI id */ if (!status) - set_bit(vsi_id, m_entry->vsi_list_info->vsi_map); + set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); } if (!status) m_entry->vsi_count++; @@ -1040,54 +1248,313 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw, } /** - * ice_find_mac_entry + * ice_find_rule_entry - Search a rule entry * @hw: pointer to the hardware structure - * @mac_addr: MAC address to search for + * @recp_id: lookup type for which the specified rule needs to be searched + * @f_info: rule information * - * Helper function to search for a MAC entry using a given MAC address - * Returns pointer to the entry if found. + * Helper function to search for a given rule entry + * Returns pointer to entry storing the rule if found */ static struct ice_fltr_mgmt_list_entry * -ice_find_mac_entry(struct ice_hw *hw, u8 *mac_addr) +ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info) { - struct ice_fltr_mgmt_list_entry *m_list_itr, *mac_ret = NULL; + struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL; struct ice_switch_info *sw = hw->switch_info; - - mutex_lock(&sw->mac_list_lock); - list_for_each_entry(m_list_itr, &sw->mac_list_head, list_entry) { - u8 *buf = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; - - if (ether_addr_equal(buf, mac_addr)) { - mac_ret = m_list_itr; + struct list_head *list_head; + + list_head = &sw->recp_list[recp_id].filt_rules; + list_for_each_entry(list_itr, list_head, list_entry) { + if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, + sizeof(f_info->l_data)) && + f_info->flag == list_itr->fltr_info.flag) { + ret = list_itr; break; } } - mutex_unlock(&sw->mac_list_lock); - return mac_ret; + return ret; +} + +/** + * ice_find_vsi_list_entry - Search VSI list map with VSI count 1 + * @hw: pointer to the hardware structure + * @recp_id: lookup type for which VSI lists needs to be searched + * @vsi_handle: VSI handle to be found in VSI list + * @vsi_list_id: VSI list id found containing vsi_handle + * + * Helper function to search a VSI list with single entry containing given VSI + * handle element. This can be extended further to search VSI list with more + * than 1 vsi_count. Returns pointer to VSI list entry if found. + */ +static struct ice_vsi_list_map_info * +ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, + u16 *vsi_list_id) +{ + struct ice_vsi_list_map_info *map_info = NULL; + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *list_itr; + struct list_head *list_head; + + list_head = &sw->recp_list[recp_id].filt_rules; + list_for_each_entry(list_itr, list_head, list_entry) { + if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { + map_info = list_itr->vsi_list_info; + if (test_bit(vsi_handle, map_info->vsi_map)) { + *vsi_list_id = map_info->vsi_list_id; + return map_info; + } + } + } + return NULL; } /** - * ice_add_shared_mac - Add one MAC shared filter rule + * ice_add_rule_internal - add rule for a given lookup type * @hw: pointer to the hardware structure + * @recp_id: lookup type (recipe id) for which rule has to be added * @f_entry: structure containing MAC forwarding information * - * Adds or updates the book keeping list for the MAC addresses + * Adds or updates the rule lists for a given recipe */ static enum ice_status -ice_add_shared_mac(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) +ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, + struct ice_fltr_list_entry *f_entry) { + struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_info *new_fltr, *cur_fltr; struct ice_fltr_mgmt_list_entry *m_entry; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = 0; - new_fltr = &f_entry->fltr_info; + if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) + return ICE_ERR_PARAM; + f_entry->fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); + + rule_lock = &sw->recp_list[recp_id].filt_rule_lock; - m_entry = ice_find_mac_entry(hw, &new_fltr->l_data.mac.mac_addr[0]); - if (!m_entry) + mutex_lock(rule_lock); + new_fltr = &f_entry->fltr_info; + if (new_fltr->flag & ICE_FLTR_RX) + new_fltr->src = hw->port_info->lport; + else if (new_fltr->flag & ICE_FLTR_TX) + new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id; + + m_entry = ice_find_rule_entry(hw, recp_id, new_fltr); + if (!m_entry) { + mutex_unlock(rule_lock); return ice_create_pkt_fwd_rule(hw, f_entry); + } cur_fltr = &m_entry->fltr_info; + status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr); + mutex_unlock(rule_lock); - return ice_handle_vsi_list_mgmt(hw, m_entry, cur_fltr, new_fltr); + return status; +} + +/** + * ice_remove_vsi_list_rule + * @hw: pointer to the hardware structure + * @vsi_list_id: VSI list id generated as part of allocate resource + * @lkup_type: switch rule filter lookup type + * + * The VSI list should be emptied before this function is called to remove the + * VSI list. + */ +static enum ice_status +ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, + enum ice_sw_lkup_type lkup_type) +{ + struct ice_aqc_sw_rules_elem *s_rule; + enum ice_status status; + u16 s_rule_size; + + s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); + s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); + s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); + + /* Free the vsi_list resource that we allocated. It is assumed that the + * list is empty at this point. + */ + status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, + ice_aqc_opc_free_res); + + devm_kfree(ice_hw_to_dev(hw), s_rule); + return status; +} + +/** + * ice_rem_update_vsi_list + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle of the VSI to remove + * @fm_list: filter management entry for which the VSI list management needs to + * be done + */ +static enum ice_status +ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, + struct ice_fltr_mgmt_list_entry *fm_list) +{ + enum ice_sw_lkup_type lkup_type; + enum ice_status status = 0; + u16 vsi_list_id; + + if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || + fm_list->vsi_count == 0) + return ICE_ERR_PARAM; + + /* A rule with the VSI being removed does not exist */ + if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) + return ICE_ERR_DOES_NOT_EXIST; + + lkup_type = fm_list->fltr_info.lkup_type; + vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; + status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, + ice_aqc_opc_update_sw_rules, + lkup_type); + if (status) + return status; + + fm_list->vsi_count--; + clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); + + if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) { + struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info; + struct ice_vsi_list_map_info *vsi_list_info = + fm_list->vsi_list_info; + u16 rem_vsi_handle; + + rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, + ICE_MAX_VSI); + if (!ice_is_vsi_valid(hw, rem_vsi_handle)) + return ICE_ERR_OUT_OF_RANGE; + + /* Make sure VSI list is empty before removing it below */ + status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, + vsi_list_id, true, + ice_aqc_opc_update_sw_rules, + lkup_type); + if (status) + return status; + + tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI; + tmp_fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, rem_vsi_handle); + tmp_fltr_info.vsi_handle = rem_vsi_handle; + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); + if (status) { + ice_debug(hw, ICE_DBG_SW, + "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", + tmp_fltr_info.fwd_id.hw_vsi_id, status); + return status; + } + + fm_list->fltr_info = tmp_fltr_info; + } + + if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || + (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { + struct ice_vsi_list_map_info *vsi_list_info = + fm_list->vsi_list_info; + + /* Remove the VSI list since it is no longer used */ + status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); + if (status) { + ice_debug(hw, ICE_DBG_SW, + "Failed to remove VSI list %d, error %d\n", + vsi_list_id, status); + return status; + } + + list_del(&vsi_list_info->list_entry); + devm_kfree(ice_hw_to_dev(hw), vsi_list_info); + fm_list->vsi_list_info = NULL; + } + + return status; +} + +/** + * ice_remove_rule_internal - Remove a filter rule of a given type + * @hw: pointer to the hardware structure + * @recp_id: recipe id for which the rule needs to removed + * @f_entry: rule entry containing filter information + */ +static enum ice_status +ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, + struct ice_fltr_list_entry *f_entry) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *list_elem; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = 0; + bool remove_rule = false; + u16 vsi_handle; + + if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) + return ICE_ERR_PARAM; + f_entry->fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); + + rule_lock = &sw->recp_list[recp_id].filt_rule_lock; + mutex_lock(rule_lock); + list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); + if (!list_elem) { + status = ICE_ERR_DOES_NOT_EXIST; + goto exit; + } + + if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { + remove_rule = true; + } else if (!list_elem->vsi_list_info) { + status = ICE_ERR_DOES_NOT_EXIST; + goto exit; + } else { + if (list_elem->vsi_list_info->ref_cnt > 1) + list_elem->vsi_list_info->ref_cnt--; + vsi_handle = f_entry->fltr_info.vsi_handle; + status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); + if (status) + goto exit; + /* if vsi count goes to zero after updating the vsi list */ + if (list_elem->vsi_count == 0) + remove_rule = true; + } + + if (remove_rule) { + /* Remove the lookup rule */ + struct ice_aqc_sw_rules_elem *s_rule; + + s_rule = devm_kzalloc(ice_hw_to_dev(hw), + ICE_SW_RULE_RX_TX_NO_HDR_SIZE, + GFP_KERNEL); + if (!s_rule) { + status = ICE_ERR_NO_MEMORY; + goto exit; + } + + ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule, + ice_aqc_opc_remove_sw_rules); + + status = ice_aq_sw_rules(hw, s_rule, + ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, + ice_aqc_opc_remove_sw_rules, NULL); + if (status) + goto exit; + + /* Remove a book keeping from the list */ + devm_kfree(ice_hw_to_dev(hw), s_rule); + + list_del(&list_elem->list_entry); + devm_kfree(ice_hw_to_dev(hw), list_elem); + } +exit: + mutex_unlock(rule_lock); + return status; } /** @@ -1106,7 +1573,10 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) { struct ice_aqc_sw_rules_elem *s_rule, *r_iter; struct ice_fltr_list_entry *m_list_itr; + struct list_head *rule_head; u16 elem_sent, total_elem_left; + struct ice_switch_info *sw; + struct mutex *rule_lock; /* Lock to protect filter rule list */ enum ice_status status = 0; u16 num_unicast = 0; u16 s_rule_size; @@ -1114,48 +1584,73 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) if (!m_list || !hw) return ICE_ERR_PARAM; + s_rule = NULL; + sw = hw->switch_info; + rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; list_for_each_entry(m_list_itr, m_list, list_entry) { u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; + u16 vsi_handle; + u16 hw_vsi_id; - if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC) + m_list_itr->fltr_info.flag = ICE_FLTR_TX; + vsi_handle = m_list_itr->fltr_info.vsi_handle; + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; + /* update the src in case it is vsi num */ + if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) return ICE_ERR_PARAM; - if (is_zero_ether_addr(add)) + m_list_itr->fltr_info.src = hw_vsi_id; + if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || + is_zero_ether_addr(add)) return ICE_ERR_PARAM; if (is_unicast_ether_addr(add) && !hw->ucast_shared) { /* Don't overwrite the unicast address */ - if (ice_find_mac_entry(hw, add)) + mutex_lock(rule_lock); + if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, + &m_list_itr->fltr_info)) { + mutex_unlock(rule_lock); return ICE_ERR_ALREADY_EXISTS; + } + mutex_unlock(rule_lock); num_unicast++; } else if (is_multicast_ether_addr(add) || (is_unicast_ether_addr(add) && hw->ucast_shared)) { - status = ice_add_shared_mac(hw, m_list_itr); - if (status) { - m_list_itr->status = ICE_FLTR_STATUS_FW_FAIL; - return status; - } - m_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS; + m_list_itr->status = + ice_add_rule_internal(hw, ICE_SW_LKUP_MAC, + m_list_itr); + if (m_list_itr->status) + return m_list_itr->status; } } + mutex_lock(rule_lock); /* Exit if no suitable entries were found for adding bulk switch rule */ - if (!num_unicast) - return 0; + if (!num_unicast) { + status = 0; + goto ice_add_mac_exit; + } + + rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; /* Allocate switch rule buffer for the bulk update for unicast */ s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; + if (!s_rule) { + status = ICE_ERR_NO_MEMORY; + goto ice_add_mac_exit; + } r_iter = s_rule; list_for_each_entry(m_list_itr, m_list, list_entry) { struct ice_fltr_info *f_info = &m_list_itr->fltr_info; - u8 *addr = &f_info->l_data.mac.mac_addr[0]; + u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; - if (is_unicast_ether_addr(addr)) { - ice_fill_sw_rule(hw, &m_list_itr->fltr_info, - r_iter, ice_aqc_opc_add_sw_rules); + if (is_unicast_ether_addr(mac_addr)) { + ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter, + ice_aqc_opc_add_sw_rules); r_iter = (struct ice_aqc_sw_rules_elem *) ((u8 *)r_iter + s_rule_size); } @@ -1183,11 +1678,10 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) r_iter = s_rule; list_for_each_entry(m_list_itr, m_list, list_entry) { struct ice_fltr_info *f_info = &m_list_itr->fltr_info; - u8 *addr = &f_info->l_data.mac.mac_addr[0]; - struct ice_switch_info *sw = hw->switch_info; + u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; struct ice_fltr_mgmt_list_entry *fm_entry; - if (is_unicast_ether_addr(addr)) { + if (is_unicast_ether_addr(mac_addr)) { f_info->fltr_rule_id = le16_to_cpu(r_iter->pdata.lkup_tx_rx.index); f_info->fltr_act = ICE_FWD_TO_VSI; @@ -1203,46 +1697,21 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) /* The book keeping entries will get removed when * base driver calls remove filter AQ command */ - mutex_lock(&sw->mac_list_lock); - list_add(&fm_entry->list_entry, &sw->mac_list_head); - mutex_unlock(&sw->mac_list_lock); + list_add(&fm_entry->list_entry, rule_head); r_iter = (struct ice_aqc_sw_rules_elem *) ((u8 *)r_iter + s_rule_size); } } ice_add_mac_exit: - devm_kfree(ice_hw_to_dev(hw), s_rule); + mutex_unlock(rule_lock); + if (s_rule) + devm_kfree(ice_hw_to_dev(hw), s_rule); return status; } /** - * ice_find_vlan_entry - * @hw: pointer to the hardware structure - * @vlan_id: VLAN id to search for - * - * Helper function to search for a VLAN entry using a given VLAN id - * Returns pointer to the entry if found. - */ -static struct ice_fltr_mgmt_list_entry * -ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id) -{ - struct ice_fltr_mgmt_list_entry *vlan_list_itr, *vlan_ret = NULL; - struct ice_switch_info *sw = hw->switch_info; - - mutex_lock(&sw->vlan_list_lock); - list_for_each_entry(vlan_list_itr, &sw->vlan_list_head, list_entry) - if (vlan_list_itr->fltr_info.l_data.vlan.vlan_id == vlan_id) { - vlan_ret = vlan_list_itr; - break; - } - - mutex_unlock(&sw->vlan_list_lock); - return vlan_ret; -} - -/** * ice_add_vlan_internal - Add one VLAN based filter rule * @hw: pointer to the hardware structure * @f_entry: filter entry containing one VLAN information @@ -1250,53 +1719,150 @@ ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id) static enum ice_status ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) { - struct ice_fltr_info *new_fltr, *cur_fltr; + struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *v_list_itr; - u16 vlan_id; + struct ice_fltr_info *new_fltr, *cur_fltr; + enum ice_sw_lkup_type lkup_type; + u16 vsi_list_id = 0, vsi_handle; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = 0; + + if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) + return ICE_ERR_PARAM; + f_entry->fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); new_fltr = &f_entry->fltr_info; + /* VLAN id should only be 12 bits */ if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) return ICE_ERR_PARAM; - vlan_id = new_fltr->l_data.vlan.vlan_id; - v_list_itr = ice_find_vlan_entry(hw, vlan_id); + if (new_fltr->src_id != ICE_SRC_ID_VSI) + return ICE_ERR_PARAM; + + new_fltr->src = new_fltr->fwd_id.hw_vsi_id; + lkup_type = new_fltr->lkup_type; + vsi_handle = new_fltr->vsi_handle; + rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; + mutex_lock(rule_lock); + v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr); if (!v_list_itr) { - u16 vsi_id = ICE_VSI_INVAL_ID; - enum ice_status status; - u16 vsi_list_id = 0; + struct ice_vsi_list_map_info *map_info = NULL; if (new_fltr->fltr_act == ICE_FWD_TO_VSI) { - enum ice_sw_lkup_type lkup_type = new_fltr->lkup_type; - - /* All VLAN pruning rules use a VSI list. - * Convert the action to forwarding to a VSI list. + /* All VLAN pruning rules use a VSI list. Check if + * there is already a VSI list containing VSI that we + * want to add. If found, use the same vsi_list_id for + * this new VLAN rule or else create a new list. */ - vsi_id = new_fltr->fwd_id.vsi_id; - status = ice_create_vsi_list_rule(hw, &vsi_id, 1, - &vsi_list_id, - lkup_type); - if (status) - return status; + map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN, + vsi_handle, + &vsi_list_id); + if (!map_info) { + status = ice_create_vsi_list_rule(hw, + &vsi_handle, + 1, + &vsi_list_id, + lkup_type); + if (status) + goto exit; + } + /* Convert the action to forwarding to a VSI list. */ new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; new_fltr->fwd_id.vsi_list_id = vsi_list_id; } status = ice_create_pkt_fwd_rule(hw, f_entry); - if (!status && vsi_id != ICE_VSI_INVAL_ID) { - v_list_itr = ice_find_vlan_entry(hw, vlan_id); - if (!v_list_itr) - return ICE_ERR_DOES_NOT_EXIST; - v_list_itr->vsi_list_info = - ice_create_vsi_list_map(hw, &vsi_id, 1, - vsi_list_id); + if (!status) { + v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, + new_fltr); + if (!v_list_itr) { + status = ICE_ERR_DOES_NOT_EXIST; + goto exit; + } + /* reuse VSI list for new rule and increment ref_cnt */ + if (map_info) { + v_list_itr->vsi_list_info = map_info; + map_info->ref_cnt++; + } else { + v_list_itr->vsi_list_info = + ice_create_vsi_list_map(hw, &vsi_handle, + 1, vsi_list_id); + } } + } else if (v_list_itr->vsi_list_info->ref_cnt == 1) { + /* Update existing VSI list to add new VSI id only if it used + * by one VLAN rule. + */ + cur_fltr = &v_list_itr->fltr_info; + status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, + new_fltr); + } else { + /* If VLAN rule exists and VSI list being used by this rule is + * referenced by more than 1 VLAN rule. Then create a new VSI + * list appending previous VSI with new VSI and update existing + * VLAN rule to point to new VSI list id + */ + struct ice_fltr_info tmp_fltr; + u16 vsi_handle_arr[2]; + u16 cur_handle; - return status; + /* Current implementation only supports reusing VSI list with + * one VSI count. We should never hit below condition + */ + if (v_list_itr->vsi_count > 1 && + v_list_itr->vsi_list_info->ref_cnt > 1) { + ice_debug(hw, ICE_DBG_SW, + "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); + status = ICE_ERR_CFG; + goto exit; + } + + cur_handle = + find_first_bit(v_list_itr->vsi_list_info->vsi_map, + ICE_MAX_VSI); + + /* A rule already exists with the new VSI being added */ + if (cur_handle == vsi_handle) { + status = ICE_ERR_ALREADY_EXISTS; + goto exit; + } + + vsi_handle_arr[0] = cur_handle; + vsi_handle_arr[1] = vsi_handle; + status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, + &vsi_list_id, lkup_type); + if (status) + goto exit; + + tmp_fltr = v_list_itr->fltr_info; + tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id; + tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; + tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; + /* Update the previous switch rule to a new VSI list which + * includes current VSI thats requested + */ + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); + if (status) + goto exit; + + /* before overriding VSI list map info. decrement ref_cnt of + * previous VSI list + */ + v_list_itr->vsi_list_info->ref_cnt--; + + /* now update to newly created list */ + v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id; + v_list_itr->vsi_list_info = + ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, + vsi_list_id); + v_list_itr->vsi_count++; } - cur_fltr = &v_list_itr->fltr_info; - return ice_handle_vsi_list_mgmt(hw, v_list_itr, cur_fltr, new_fltr); +exit: + mutex_unlock(rule_lock); + return status; } /** @@ -1313,335 +1879,58 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) return ICE_ERR_PARAM; list_for_each_entry(v_list_itr, v_list, list_entry) { - enum ice_status status; - if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) return ICE_ERR_PARAM; - - status = ice_add_vlan_internal(hw, v_list_itr); - if (status) { - v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL; - return status; - } - v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS; + v_list_itr->fltr_info.flag = ICE_FLTR_TX; + v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); + if (v_list_itr->status) + return v_list_itr->status; } return 0; } /** - * ice_remove_vsi_list_rule + * ice_rem_sw_rule_info * @hw: pointer to the hardware structure - * @vsi_list_id: VSI list id generated as part of allocate resource - * @lkup_type: switch rule filter lookup type + * @rule_head: pointer to the switch list structure that we want to delete */ -static enum ice_status -ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, - enum ice_sw_lkup_type lkup_type) -{ - struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; - u16 s_rule_size; - - s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); - s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; - - s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); - s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); - /* FW expects number of VSIs in vsi_list resource to be 0 for clear - * command. Since memory is zero'ed out during initialization, it's not - * necessary to explicitly initialize the variable to 0. - */ - - status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, - ice_aqc_opc_remove_sw_rules, NULL); - if (!status) - /* Free the vsi_list resource that we allocated */ - status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, - ice_aqc_opc_free_res); - - devm_kfree(ice_hw_to_dev(hw), s_rule); - return status; -} - -/** - * ice_handle_rem_vsi_list_mgmt - * @hw: pointer to the hardware structure - * @vsi_id: ID of the VSI to remove - * @fm_list_itr: filter management entry for which the VSI list management - * needs to be done - */ -static enum ice_status -ice_handle_rem_vsi_list_mgmt(struct ice_hw *hw, u16 vsi_id, - struct ice_fltr_mgmt_list_entry *fm_list_itr) +static void +ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head) { - struct ice_switch_info *sw = hw->switch_info; - enum ice_status status = 0; - enum ice_sw_lkup_type lkup_type; - bool is_last_elem = true; - bool conv_list = false; - bool del_list = false; - u16 vsi_list_id; - - lkup_type = fm_list_itr->fltr_info.lkup_type; - vsi_list_id = fm_list_itr->fltr_info.fwd_id.vsi_list_id; - - if (fm_list_itr->vsi_count > 1) { - status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id, - true, - ice_aqc_opc_update_sw_rules, - lkup_type); - if (status) - return status; - fm_list_itr->vsi_count--; - is_last_elem = false; - clear_bit(vsi_id, fm_list_itr->vsi_list_info->vsi_map); - } - - /* For non-VLAN rules that forward packets to a VSI list, convert them - * to forwarding packets to a VSI if there is only one VSI left in the - * list. Unused lists are then removed. - * VLAN rules need to use VSI lists even with only one VSI. - */ - if (fm_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST) { - if (lkup_type == ICE_SW_LKUP_VLAN) { - del_list = is_last_elem; - } else if (fm_list_itr->vsi_count == 1) { - conv_list = true; - del_list = true; - } - } - - if (del_list) { - /* Remove the VSI list since it is no longer used */ - struct ice_vsi_list_map_info *vsi_list_info = - fm_list_itr->vsi_list_info; + if (!list_empty(rule_head)) { + struct ice_fltr_mgmt_list_entry *entry; + struct ice_fltr_mgmt_list_entry *tmp; - status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); - if (status) - return status; - - if (conv_list) { - u16 rem_vsi_id; - - rem_vsi_id = find_first_bit(vsi_list_info->vsi_map, - ICE_MAX_VSI); - - /* Error out when the expected last element is not in - * the VSI list map - */ - if (rem_vsi_id == ICE_MAX_VSI) - return ICE_ERR_OUT_OF_RANGE; - - /* Change the list entry action from VSI_LIST to VSI */ - fm_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; - fm_list_itr->fltr_info.fwd_id.vsi_id = rem_vsi_id; + list_for_each_entry_safe(entry, tmp, rule_head, list_entry) { + list_del(&entry->list_entry); + devm_kfree(ice_hw_to_dev(hw), entry); } - - list_del(&vsi_list_info->list_entry); - devm_kfree(ice_hw_to_dev(hw), vsi_list_info); - fm_list_itr->vsi_list_info = NULL; - } - - if (conv_list) { - /* Convert the rule's forward action to forwarding packets to - * a VSI - */ - struct ice_aqc_sw_rules_elem *s_rule; - - s_rule = devm_kzalloc(ice_hw_to_dev(hw), - ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, - GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; - - ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule, - ice_aqc_opc_update_sw_rules); - - s_rule->pdata.lkup_tx_rx.index = - cpu_to_le16(fm_list_itr->fltr_info.fltr_rule_id); - - status = ice_aq_sw_rules(hw, s_rule, - ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, - ice_aqc_opc_update_sw_rules, NULL); - devm_kfree(ice_hw_to_dev(hw), s_rule); - if (status) - return status; } - - if (is_last_elem) { - /* Remove the lookup rule */ - struct ice_aqc_sw_rules_elem *s_rule; - - s_rule = devm_kzalloc(ice_hw_to_dev(hw), - ICE_SW_RULE_RX_TX_NO_HDR_SIZE, - GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; - - ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule, - ice_aqc_opc_remove_sw_rules); - - status = ice_aq_sw_rules(hw, s_rule, - ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, - ice_aqc_opc_remove_sw_rules, NULL); - if (status) - return status; - - /* Remove a book keeping entry from the MAC address list */ - mutex_lock(&sw->mac_list_lock); - list_del(&fm_list_itr->list_entry); - mutex_unlock(&sw->mac_list_lock); - devm_kfree(ice_hw_to_dev(hw), fm_list_itr); - devm_kfree(ice_hw_to_dev(hw), s_rule); - } - return status; -} - -/** - * ice_remove_mac_entry - * @hw: pointer to the hardware structure - * @f_entry: structure containing MAC forwarding information - */ -static enum ice_status -ice_remove_mac_entry(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) -{ - struct ice_fltr_mgmt_list_entry *m_entry; - u16 vsi_id; - u8 *add; - - add = &f_entry->fltr_info.l_data.mac.mac_addr[0]; - - m_entry = ice_find_mac_entry(hw, add); - if (!m_entry) - return ICE_ERR_PARAM; - - vsi_id = f_entry->fltr_info.fwd_id.vsi_id; - return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, m_entry); } /** - * ice_remove_mac - remove a MAC address based filter rule + * ice_cfg_dflt_vsi - change state of VSI to set/clear default * @hw: pointer to the hardware structure - * @m_list: list of MAC addresses and forwarding information - * - * This function removes either a MAC filter rule or a specific VSI from a - * VSI list for a multicast MAC address. - * - * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by - * ice_add_mac. Caller should be aware that this call will only work if all - * the entries passed into m_list were added previously. It will not attempt to - * do a partial remove of entries that were found. - */ -enum ice_status -ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) -{ - struct ice_aqc_sw_rules_elem *s_rule, *r_iter; - u8 s_rule_size = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; - struct ice_switch_info *sw = hw->switch_info; - struct ice_fltr_mgmt_list_entry *m_entry; - struct ice_fltr_list_entry *m_list_itr; - u16 elem_sent, total_elem_left; - enum ice_status status = 0; - u16 num_unicast = 0; - - if (!m_list) - return ICE_ERR_PARAM; - - list_for_each_entry(m_list_itr, m_list, list_entry) { - u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr; - - if (is_unicast_ether_addr(addr) && !hw->ucast_shared) - num_unicast++; - else if (is_multicast_ether_addr(addr) || - (is_unicast_ether_addr(addr) && hw->ucast_shared)) - ice_remove_mac_entry(hw, m_list_itr); - } - - /* Exit if no unicast addresses found. Multicast switch rules - * were added individually - */ - if (!num_unicast) - return 0; - - /* Allocate switch rule buffer for the bulk update for unicast */ - s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, - GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; - - r_iter = s_rule; - list_for_each_entry(m_list_itr, m_list, list_entry) { - u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr; - - if (is_unicast_ether_addr(addr)) { - m_entry = ice_find_mac_entry(hw, addr); - if (!m_entry) { - status = ICE_ERR_DOES_NOT_EXIST; - goto ice_remove_mac_exit; - } - - ice_fill_sw_rule(hw, &m_entry->fltr_info, - r_iter, ice_aqc_opc_remove_sw_rules); - r_iter = (struct ice_aqc_sw_rules_elem *) - ((u8 *)r_iter + s_rule_size); - } - } - - /* Call AQ bulk switch rule update for all unicast addresses */ - r_iter = s_rule; - /* Call AQ switch rule in AQ_MAX chunk */ - for (total_elem_left = num_unicast; total_elem_left > 0; - total_elem_left -= elem_sent) { - struct ice_aqc_sw_rules_elem *entry = r_iter; - - elem_sent = min(total_elem_left, - (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size)); - status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, - elem_sent, ice_aqc_opc_remove_sw_rules, - NULL); - if (status) - break; - r_iter = (struct ice_aqc_sw_rules_elem *) - ((u8 *)r_iter + s_rule_size); - } - - list_for_each_entry(m_list_itr, m_list, list_entry) { - u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr; - - if (is_unicast_ether_addr(addr)) { - m_entry = ice_find_mac_entry(hw, addr); - if (!m_entry) - return ICE_ERR_OUT_OF_RANGE; - mutex_lock(&sw->mac_list_lock); - list_del(&m_entry->list_entry); - mutex_unlock(&sw->mac_list_lock); - devm_kfree(ice_hw_to_dev(hw), m_entry); - } - } - -ice_remove_mac_exit: - devm_kfree(ice_hw_to_dev(hw), s_rule); - return status; -} - -/** - * ice_cfg_dflt_vsi - add filter rule to set/unset given VSI as default - * VSI for the switch (represented by swid) - * @hw: pointer to the hardware structure - * @vsi_id: number of VSI to set as default + * @vsi_handle: VSI handle to set as default * @set: true to add the above mentioned switch rule, false to remove it * @direction: ICE_FLTR_RX or ICE_FLTR_TX + * + * add filter rule to set/unset given VSI as default VSI for the switch + * (represented by swid) */ enum ice_status -ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction) +ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) { struct ice_aqc_sw_rules_elem *s_rule; struct ice_fltr_info f_info; enum ice_adminq_opc opcode; enum ice_status status; u16 s_rule_size; + u16 hw_vsi_id; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : ICE_SW_RULE_RX_TX_NO_HDR_SIZE; @@ -1654,15 +1943,17 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction) f_info.lkup_type = ICE_SW_LKUP_DFLT; f_info.flag = direction; f_info.fltr_act = ICE_FWD_TO_VSI; - f_info.fwd_id.vsi_id = vsi_id; + f_info.fwd_id.hw_vsi_id = hw_vsi_id; if (f_info.flag & ICE_FLTR_RX) { f_info.src = hw->port_info->lport; + f_info.src_id = ICE_SRC_ID_LPORT; if (!set) f_info.fltr_rule_id = hw->port_info->dflt_rx_vsi_rule_id; } else if (f_info.flag & ICE_FLTR_TX) { - f_info.src = vsi_id; + f_info.src_id = ICE_SRC_ID_VSI; + f_info.src = hw_vsi_id; if (!set) f_info.fltr_rule_id = hw->port_info->dflt_tx_vsi_rule_id; @@ -1682,10 +1973,10 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction) u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); if (f_info.flag & ICE_FLTR_TX) { - hw->port_info->dflt_tx_vsi_num = vsi_id; + hw->port_info->dflt_tx_vsi_num = hw_vsi_id; hw->port_info->dflt_tx_vsi_rule_id = index; } else if (f_info.flag & ICE_FLTR_RX) { - hw->port_info->dflt_rx_vsi_num = vsi_id; + hw->port_info->dflt_rx_vsi_num = hw_vsi_id; hw->port_info->dflt_rx_vsi_rule_id = index; } } else { @@ -1704,26 +1995,38 @@ out: } /** - * ice_remove_vlan_internal - Remove one VLAN based filter rule + * ice_remove_mac - remove a MAC address based filter rule * @hw: pointer to the hardware structure - * @f_entry: filter entry containing one VLAN information + * @m_list: list of MAC addresses and forwarding information + * + * This function removes either a MAC filter rule or a specific VSI from a + * VSI list for a multicast MAC address. + * + * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by + * ice_add_mac. Caller should be aware that this call will only work if all + * the entries passed into m_list were added previously. It will not attempt to + * do a partial remove of entries that were found. */ -static enum ice_status -ice_remove_vlan_internal(struct ice_hw *hw, - struct ice_fltr_list_entry *f_entry) +enum ice_status +ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) { - struct ice_fltr_info *new_fltr; - struct ice_fltr_mgmt_list_entry *v_list_elem; - u16 vsi_id; + struct ice_fltr_list_entry *list_itr, *tmp; - new_fltr = &f_entry->fltr_info; - - v_list_elem = ice_find_vlan_entry(hw, new_fltr->l_data.vlan.vlan_id); - if (!v_list_elem) + if (!m_list) return ICE_ERR_PARAM; - vsi_id = f_entry->fltr_info.fwd_id.vsi_id; - return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, v_list_elem); + list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { + enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; + + if (l_type != ICE_SW_LKUP_MAC) + return ICE_ERR_PARAM; + list_itr->status = ice_remove_rule_internal(hw, + ICE_SW_LKUP_MAC, + list_itr); + if (list_itr->status) + return list_itr->status; + } + return 0; } /** @@ -1734,131 +2037,169 @@ ice_remove_vlan_internal(struct ice_hw *hw, enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) { - struct ice_fltr_list_entry *v_list_itr; - enum ice_status status = 0; + struct ice_fltr_list_entry *v_list_itr, *tmp; if (!v_list || !hw) return ICE_ERR_PARAM; - list_for_each_entry(v_list_itr, v_list, list_entry) { - status = ice_remove_vlan_internal(hw, v_list_itr); - if (status) { - v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL; - return status; - } - v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS; + list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { + enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; + + if (l_type != ICE_SW_LKUP_VLAN) + return ICE_ERR_PARAM; + v_list_itr->status = ice_remove_rule_internal(hw, + ICE_SW_LKUP_VLAN, + v_list_itr); + if (v_list_itr->status) + return v_list_itr->status; } - return status; + return 0; +} + +/** + * ice_vsi_uses_fltr - Determine if given VSI uses specified filter + * @fm_entry: filter entry to inspect + * @vsi_handle: VSI handle to compare with filter info + */ +static bool +ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) +{ + return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && + fm_entry->fltr_info.vsi_handle == vsi_handle) || + (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && + (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map)))); +} + +/** + * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to remove filters from + * @vsi_list_head: pointer to the list to add entry to + * @fi: pointer to fltr_info of filter entry to copy & add + * + * Helper function, used when creating a list of filters to remove from + * a specific VSI. The entry added to vsi_list_head is a COPY of the + * original filter entry, with the exception of fltr_info.fltr_act and + * fltr_info.fwd_id fields. These are set such that later logic can + * extract which VSI to remove the fltr from, and pass on that information. + */ +static enum ice_status +ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, + struct list_head *vsi_list_head, + struct ice_fltr_info *fi) +{ + struct ice_fltr_list_entry *tmp; + + /* this memory is freed up in the caller function + * once filters for this VSI are removed + */ + tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return ICE_ERR_NO_MEMORY; + + tmp->fltr_info = *fi; + + /* Overwrite these fields to indicate which VSI to remove filter from, + * so find and remove logic can extract the information from the + * list entries. Note that original entries will still have proper + * values. + */ + tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; + tmp->fltr_info.vsi_handle = vsi_handle; + tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + + list_add(&tmp->list_entry, vsi_list_head); + + return 0; } /** * ice_add_to_vsi_fltr_list - Add VSI filters to the list * @hw: pointer to the hardware structure - * @vsi_id: ID of VSI to remove filters from + * @vsi_handle: VSI handle to remove filters from * @lkup_list_head: pointer to the list that has certain lookup type filters - * @vsi_list_head: pointer to the list pertaining to VSI with vsi_id + * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle + * + * Locates all filters in lkup_list_head that are used by the given VSI, + * and adds COPIES of those entries to vsi_list_head (intended to be used + * to remove the listed filters). + * Note that this means all entries in vsi_list_head must be explicitly + * deallocated by the caller when done with list. */ static enum ice_status -ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id, +ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, struct list_head *lkup_list_head, struct list_head *vsi_list_head) { struct ice_fltr_mgmt_list_entry *fm_entry; + enum ice_status status = 0; /* check to make sure VSI id is valid and within boundary */ - if (vsi_id >= - (sizeof(fm_entry->vsi_list_info->vsi_map) * BITS_PER_BYTE - 1)) + if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; list_for_each_entry(fm_entry, lkup_list_head, list_entry) { struct ice_fltr_info *fi; fi = &fm_entry->fltr_info; - if ((fi->fltr_act == ICE_FWD_TO_VSI && - fi->fwd_id.vsi_id == vsi_id) || - (fi->fltr_act == ICE_FWD_TO_VSI_LIST && - (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map)))) { - struct ice_fltr_list_entry *tmp; - - /* this memory is freed up in the caller function - * ice_remove_vsi_lkup_fltr() once filters for - * this VSI are removed - */ - tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), - GFP_KERNEL); - if (!tmp) - return ICE_ERR_NO_MEMORY; + if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle)) + continue; - memcpy(&tmp->fltr_info, fi, sizeof(*fi)); - - /* Expected below fields to be set to ICE_FWD_TO_VSI and - * the particular VSI id since we are only removing this - * one VSI - */ - if (fi->fltr_act == ICE_FWD_TO_VSI_LIST) { - tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; - tmp->fltr_info.fwd_id.vsi_id = vsi_id; - } - - list_add(&tmp->list_entry, vsi_list_head); - } + status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, + vsi_list_head, fi); + if (status) + return status; } - return 0; + return status; } /** * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI * @hw: pointer to the hardware structure - * @vsi_id: ID of VSI to remove filters from + * @vsi_handle: VSI handle to remove filters from * @lkup: switch rule filter lookup type */ static void -ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id, +ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, enum ice_sw_lkup_type lkup) { struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_list_entry *fm_entry; struct list_head remove_list_head; + struct list_head *rule_head; struct ice_fltr_list_entry *tmp; + struct mutex *rule_lock; /* Lock to protect filter rule list */ enum ice_status status; INIT_LIST_HEAD(&remove_list_head); + rule_lock = &sw->recp_list[lkup].filt_rule_lock; + rule_head = &sw->recp_list[lkup].filt_rules; + mutex_lock(rule_lock); + status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head, + &remove_list_head); + mutex_unlock(rule_lock); + if (status) + return; + switch (lkup) { case ICE_SW_LKUP_MAC: - mutex_lock(&sw->mac_list_lock); - status = ice_add_to_vsi_fltr_list(hw, vsi_id, - &sw->mac_list_head, - &remove_list_head); - mutex_unlock(&sw->mac_list_lock); - if (!status) { - ice_remove_mac(hw, &remove_list_head); - goto free_fltr_list; - } + ice_remove_mac(hw, &remove_list_head); break; case ICE_SW_LKUP_VLAN: - mutex_lock(&sw->vlan_list_lock); - status = ice_add_to_vsi_fltr_list(hw, vsi_id, - &sw->vlan_list_head, - &remove_list_head); - mutex_unlock(&sw->vlan_list_lock); - if (!status) { - ice_remove_vlan(hw, &remove_list_head); - goto free_fltr_list; - } + ice_remove_vlan(hw, &remove_list_head); break; case ICE_SW_LKUP_MAC_VLAN: case ICE_SW_LKUP_ETHERTYPE: case ICE_SW_LKUP_ETHERTYPE_MAC: case ICE_SW_LKUP_PROMISC: - case ICE_SW_LKUP_PROMISC_VLAN: case ICE_SW_LKUP_DFLT: - ice_debug(hw, ICE_DBG_SW, - "Remove filters for this lookup type hasn't been implemented yet\n"); + case ICE_SW_LKUP_PROMISC_VLAN: + case ICE_SW_LKUP_LAST: + default: + ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup); break; } - return; -free_fltr_list: list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { list_del(&fm_entry->list_entry); devm_kfree(ice_hw_to_dev(hw), fm_entry); @@ -1868,16 +2209,121 @@ free_fltr_list: /** * ice_remove_vsi_fltr - Remove all filters for a VSI * @hw: pointer to the hardware structure - * @vsi_id: ID of VSI to remove filters from + * @vsi_handle: VSI handle to remove filters from + */ +void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) +{ + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN); +} + +/** + * ice_replay_vsi_fltr - Replay filters for requested VSI + * @hw: pointer to the hardware structure + * @vsi_handle: driver VSI handle + * @recp_id: Recipe id for which rules need to be replayed + * @list_head: list for which filters need to be replayed + * + * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. + * It is required to pass valid VSI handle. + */ +static enum ice_status +ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, + struct list_head *list_head) +{ + struct ice_fltr_mgmt_list_entry *itr; + enum ice_status status = 0; + u16 hw_vsi_id; + + if (list_empty(list_head)) + return status; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + + list_for_each_entry(itr, list_head, list_entry) { + struct ice_fltr_list_entry f_entry; + + f_entry.fltr_info = itr->fltr_info; + if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && + itr->fltr_info.vsi_handle == vsi_handle) { + /* update the src in case it is vsi num */ + if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) + f_entry.fltr_info.src = hw_vsi_id; + status = ice_add_rule_internal(hw, recp_id, &f_entry); + if (status) + goto end; + continue; + } + if (!itr->vsi_list_info || + !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) + continue; + /* Clearing it so that the logic can add it back */ + clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); + f_entry.fltr_info.vsi_handle = vsi_handle; + f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; + /* update the src in case it is vsi num */ + if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) + f_entry.fltr_info.src = hw_vsi_id; + if (recp_id == ICE_SW_LKUP_VLAN) + status = ice_add_vlan_internal(hw, &f_entry); + else + status = ice_add_rule_internal(hw, recp_id, &f_entry); + if (status) + goto end; + } +end: + return status; +} + +/** + * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists + * @hw: pointer to the hardware structure + * @vsi_handle: driver VSI handle + * + * Replays filters for requested VSI via vsi_handle. */ -void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id) +enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) { - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC); - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC_VLAN); - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC); - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_VLAN); - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_DFLT); - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE); - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE_MAC); - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC_VLAN); + struct ice_switch_info *sw = hw->switch_info; + enum ice_status status = 0; + u8 i; + + for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + struct list_head *head; + + head = &sw->recp_list[i].filt_replay_rules; + status = ice_replay_vsi_fltr(hw, vsi_handle, i, head); + if (status) + return status; + } + return status; +} + +/** + * ice_rm_all_sw_replay_rule_info - deletes filter replay rules + * @hw: pointer to the hw struct + * + * Deletes the filter replay rules. + */ +void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw) +{ + struct ice_switch_info *sw = hw->switch_info; + u8 i; + + if (!sw) + return; + + for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + if (!list_empty(&sw->recp_list[i].filt_replay_rules)) { + struct list_head *l_head; + + l_head = &sw->recp_list[i].filt_replay_rules; + ice_rem_sw_rule_info(hw, l_head); + } + } } diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 9b8ec128ee31..b88d96a1ef69 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -17,7 +17,9 @@ struct ice_vsi_ctx { u16 vsis_unallocated; u16 flags; struct ice_aqc_vsi_props info; + struct ice_sched_vsi_info sched; u8 alloc_from_pool; + u8 vf_num; }; enum ice_sw_fwd_act_type { @@ -39,6 +41,15 @@ enum ice_sw_lkup_type { ICE_SW_LKUP_DFLT = 5, ICE_SW_LKUP_ETHERTYPE_MAC = 8, ICE_SW_LKUP_PROMISC_VLAN = 9, + ICE_SW_LKUP_LAST +}; + +/* type of filter src id */ +enum ice_src_id { + ICE_SRC_ID_UNKNOWN = 0, + ICE_SRC_ID_VSI, + ICE_SRC_ID_QUEUE, + ICE_SRC_ID_LPORT, }; struct ice_fltr_info { @@ -55,6 +66,7 @@ struct ice_fltr_info { /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */ u16 src; + enum ice_src_id src_id; union { struct { @@ -76,7 +88,10 @@ struct ice_fltr_info { u16 ethertype; u8 mac_addr[ETH_ALEN]; /* optional */ } ethertype_mac; - } l_data; + } l_data; /* Make sure to zero out the memory of l_data before using + * it or only set the data associated with lookup match + * rest everything should be zero + */ /* Depending on filter action */ union { @@ -84,12 +99,16 @@ struct ice_fltr_info { * queue id in case of ICE_FWD_TO_QGRP. */ u16 q_id:11; - u16 vsi_id:10; + u16 hw_vsi_id:10; u16 vsi_list_id:10; } fwd_id; + /* Sw VSI handle */ + u16 vsi_handle; + /* Set to num_queues if action is ICE_FWD_TO_QGRP. This field - * determines the range of queues the packet needs to be forwarded to + * determines the range of queues the packet needs to be forwarded to. + * Note that qgrp_size must be set to a power of 2. */ u8 qgrp_size; @@ -98,29 +117,52 @@ struct ice_fltr_info { u8 lan_en; /* Indicate if packet can be forwarded to the uplink */ }; +struct ice_sw_recipe { + struct list_head l_entry; + + /* To protect modification of filt_rule list + * defined below + */ + struct mutex filt_rule_lock; + + /* List of type ice_fltr_mgmt_list_entry */ + struct list_head filt_rules; + struct list_head filt_replay_rules; + + /* linked list of type recipe_list_entry */ + struct list_head rg_list; + /* linked list of type ice_sw_fv_list_entry*/ + struct list_head fv_list; + struct ice_aqc_recipe_data_elem *r_buf; + u8 recp_count; + u8 root_rid; + u8 num_profs; + u8 *prof_ids; + + /* recipe bitmap: what all recipes makes this recipe */ + DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); +}; + /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ struct ice_vsi_list_map_info { struct list_head list_entry; DECLARE_BITMAP(vsi_map, ICE_MAX_VSI); u16 vsi_list_id; -}; - -enum ice_sw_fltr_status { - ICE_FLTR_STATUS_NEW = 0, - ICE_FLTR_STATUS_FW_SUCCESS, - ICE_FLTR_STATUS_FW_FAIL, + /* counter to track how many rules are reusing this VSI list */ + u16 ref_cnt; }; struct ice_fltr_list_entry { struct list_head list_entry; - enum ice_sw_fltr_status status; + enum ice_status status; struct ice_fltr_info fltr_info; }; /* This defines an entry in the list that maintains MAC or VLAN membership * to HW list mapping, since multiple VSIs can subscribe to the same MAC or * VLAN. As an optimization the VSI list should be created only when a - * second VSI becomes a subscriber to the VLAN address. + * second VSI becomes a subscriber to the same MAC address. VSI lists are always + * used for VLAN membership. */ struct ice_fltr_mgmt_list_entry { /* back pointer to VSI list id to VSI list mapping */ @@ -138,24 +180,33 @@ struct ice_fltr_mgmt_list_entry { /* VSI related commands */ enum ice_status -ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, - struct ice_sq_cd *cd); +ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd); enum ice_status -ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, - struct ice_sq_cd *cd); +ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + bool keep_vsi_alloc, struct ice_sq_cd *cd); enum ice_status -ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, - bool keep_vsi_alloc, struct ice_sq_cd *cd); - +ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd); +bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle); +struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); /* Switch/bridge related commands */ +enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); -void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id); +void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list); enum ice_status -ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction); +ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction); + +enum ice_status ice_init_def_sw_recp(struct ice_hw *hw); +u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle); +bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle); + +enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle); +void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw); #endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 6481e3d86374..5dae968d853e 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -251,6 +251,7 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; + tx_ring->tx_stats.prev_pkt = -1; return 0; err: diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 31bc998fe200..1d0f58bd389b 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -71,6 +71,7 @@ struct ice_txq_stats { u64 restart_q; u64 tx_busy; u64 tx_linearize; + int prev_pkt; /* negative if no pending Tx descriptors */ }; struct ice_rxq_stats { @@ -103,10 +104,17 @@ enum ice_rx_dtype { #define ICE_RX_ITR ICE_IDX_ITR0 #define ICE_TX_ITR ICE_IDX_ITR1 #define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ -#define ICE_ITR_8K 0x003E +#define ICE_ITR_8K 125 +#define ICE_ITR_20K 50 +#define ICE_DFLT_TX_ITR ICE_ITR_20K +#define ICE_DFLT_RX_ITR ICE_ITR_20K +/* apply ITR granularity translation to program the register. itr_gran is either + * 2 or 4 usecs so we need to divide by 2 first then shift by that value + */ +#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> \ + ((itr_gran) / 2)) -/* apply ITR HW granularity translation to program the HW registers */ -#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran)) +#define ICE_DFLT_INTRL 0 /* Legacy or Advanced Mode Queue */ #define ICE_TX_ADVANCED 0 @@ -128,14 +136,6 @@ struct ice_ring { u16 q_index; /* Queue number of ring */ u32 txq_teid; /* Added Tx queue TEID */ - /* high bit set means dynamic, use accessor routines to read/write. - * hardware supports 2us/1us resolution for the ITR registers. - * these values always store the USER setting, and must be converted - * before programming to a register. - */ - u16 rx_itr_setting; - u16 tx_itr_setting; - u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ @@ -172,6 +172,7 @@ struct ice_ring_container { unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_pkts; /* total packets processed this int */ enum ice_latency_range latency_range; + int itr_idx; /* index in the interrupt vector */ u16 itr; }; diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 97c366e0ca59..12f9432abf11 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -18,6 +18,9 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) return test_bit(tc, (unsigned long *)&bitmap); } +/* Driver always calls main vsi_handle first */ +#define ICE_MAIN_VSI_HANDLE 0 + /* debug masks - set these bits in hw->debug_mask to control output */ #define ICE_DBG_INIT BIT_ULL(1) #define ICE_DBG_LINK BIT_ULL(4) @@ -34,10 +37,15 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) enum ice_aq_res_ids { ICE_NVM_RES_ID = 1, ICE_SPD_RES_ID, - ICE_GLOBAL_CFG_LOCK_RES_ID, - ICE_CHANGE_LOCK_RES_ID + ICE_CHANGE_LOCK_RES_ID, + ICE_GLOBAL_CFG_LOCK_RES_ID }; +/* FW update timeout definitions are in milliseconds */ +#define ICE_NVM_TIMEOUT 180000 +#define ICE_CHANGE_LOCK_TIMEOUT 1000 +#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000 + enum ice_aq_res_access_type { ICE_RES_READ = 1, ICE_RES_WRITE @@ -76,6 +84,7 @@ enum ice_media_type { enum ice_vsi_type { ICE_VSI_PF = 0, + ICE_VSI_VF, }; struct ice_link_status { @@ -95,6 +104,15 @@ struct ice_link_status { u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; }; +/* Different reset sources for which a disable queue AQ call has to be made in + * order to clean the TX scheduler as a part of the reset + */ +enum ice_disq_rst_src { + ICE_NO_RESET = 0, + ICE_VM_RESET, + ICE_VF_RESET, +}; + /* PHY info such as phy_type, etc... */ struct ice_phy_info { struct ice_link_status link_info; @@ -119,6 +137,9 @@ struct ice_hw_common_caps { /* Max MTU for function or device */ u16 max_mtu; + /* Virtualization support */ + u8 sr_iov_1_1; /* SR-IOV enabled */ + /* RSS related capabilities */ u16 rss_table_size; /* 512 for PFs and 64 for VFs */ u8 rss_table_entry_width; /* RSS Entry width in bits */ @@ -127,12 +148,15 @@ struct ice_hw_common_caps { /* Function specific capabilities */ struct ice_hw_func_caps { struct ice_hw_common_caps common_cap; + u32 num_allocd_vfs; /* Number of allocated VFs */ + u32 vf_base_id; /* Logical ID of the first VF */ u32 guaranteed_num_vsi; }; /* Device wide capabilities */ struct ice_hw_dev_caps { struct ice_hw_common_caps common_cap; + u32 num_vfs_exposed; /* Total number of VFs exposed */ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */ }; @@ -142,11 +166,18 @@ struct ice_mac_info { u8 perm_addr[ETH_ALEN]; }; -/* Various RESET request, These are not tied with HW reset types */ +/* Reset types used to determine which kind of reset was requested. These + * defines match what the RESET_TYPE field of the GLGEN_RSTAT register. + * ICE_RESET_PFR does not match any RESET_TYPE field in the GLGEN_RSTAT register + * because its reset source is different than the other types listed. + */ enum ice_reset_req { - ICE_RESET_PFR = 0, + ICE_RESET_POR = 0, + ICE_RESET_INVAL = 0, ICE_RESET_CORER = 1, ICE_RESET_GLOBR = 2, + ICE_RESET_EMPR = 3, + ICE_RESET_PFR = 4, }; /* Bus parameters */ @@ -180,7 +211,7 @@ struct ice_sched_node { struct ice_sched_node **children; struct ice_aqc_txsched_elem_data info; u32 agg_id; /* aggregator group id */ - u16 vsi_id; + u16 vsi_handle; u8 in_use; /* suspended or in use */ u8 tx_sched_layer; /* Logical Layer (1-9) */ u8 num_children; @@ -204,6 +235,7 @@ enum ice_agg_type { }; #define ICE_SCHED_DFLT_RL_PROF_ID 0 +#define ICE_SCHED_DFLT_BW_WT 1 /* vsi type list entry to locate corresponding vsi/ag nodes */ struct ice_sched_vsi_info { @@ -238,8 +270,6 @@ struct ice_port_info { struct ice_mac_info mac; struct ice_phy_info phy; struct mutex sched_lock; /* protect access to TXSched tree */ - struct ice_sched_tx_policy sched_policy; - struct list_head vsi_info_list; struct list_head agg_list; /* lists all aggregator */ u8 lport; #define ICE_LPORT_MASK 0xff @@ -247,19 +277,26 @@ struct ice_port_info { }; struct ice_switch_info { - /* Switch VSI lists to MAC/VLAN translation */ - struct mutex mac_list_lock; /* protect MAC list */ - struct list_head mac_list_head; - struct mutex vlan_list_lock; /* protect VLAN list */ - struct list_head vlan_list_head; - struct mutex eth_m_list_lock; /* protect ethtype list */ - struct list_head eth_m_list_head; - struct mutex promisc_list_lock; /* protect promisc mode list */ - struct list_head promisc_list_head; - struct mutex mac_vlan_list_lock; /* protect MAC-VLAN list */ - struct list_head mac_vlan_list_head; - struct list_head vsi_list_map_head; + struct ice_sw_recipe *recp_list; +}; + +/* FW logging configuration */ +struct ice_fw_log_evnt { + u8 cfg : 4; /* New event enables to configure */ + u8 cur : 4; /* Current/active event enables */ +}; + +struct ice_fw_log_cfg { + u8 cq_en : 1; /* FW logging is enabled via the control queue */ + u8 uart_en : 1; /* FW logging is enabled via UART for all PFs */ + u8 actv_evnts; /* Cumulation of currently enabled log events */ + +#define ICE_FW_LOG_EVNT_INFO (ICE_AQC_FW_LOG_INFO_EN >> ICE_AQC_FW_LOG_EN_S) +#define ICE_FW_LOG_EVNT_INIT (ICE_AQC_FW_LOG_INIT_EN >> ICE_AQC_FW_LOG_EN_S) +#define ICE_FW_LOG_EVNT_FLOW (ICE_AQC_FW_LOG_FLOW_EN >> ICE_AQC_FW_LOG_EN_S) +#define ICE_FW_LOG_EVNT_ERR (ICE_AQC_FW_LOG_ERR_EN >> ICE_AQC_FW_LOG_EN_S) + struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX]; }; /* Port hardware description */ @@ -286,8 +323,11 @@ struct ice_hw { u8 flattened_layers; u8 max_cgds; u8 sw_entry_point_layer; + u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM]; + struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI]; u8 evb_veb; /* true for VEB, false for VEPA */ + u8 reset_ongoing; /* true if hw is in reset, false otherwise */ struct ice_bus_info bus; struct ice_nvm_info nvm; struct ice_hw_dev_caps dev_caps; /* device capabilities */ @@ -297,6 +337,7 @@ struct ice_hw { /* Control Queue info */ struct ice_ctl_q_info adminq; + struct ice_ctl_q_info mailboxq; u8 api_branch; /* API branch version */ u8 api_maj_ver; /* API major version */ @@ -308,16 +349,27 @@ struct ice_hw { u8 fw_patch; /* firmware patch version */ u32 fw_build; /* firmware build number */ - /* minimum allowed value for different speeds */ -#define ICE_ITR_GRAN_MIN_200 1 -#define ICE_ITR_GRAN_MIN_100 1 -#define ICE_ITR_GRAN_MIN_50 2 -#define ICE_ITR_GRAN_MIN_25 4 + struct ice_fw_log_cfg fw_log; + +/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL + * register. Used for determining the itr/intrl granularity during + * initialization. + */ +#define ICE_MAX_AGG_BW_200G 0x0 +#define ICE_MAX_AGG_BW_100G 0X1 +#define ICE_MAX_AGG_BW_50G 0x2 +#define ICE_MAX_AGG_BW_25G 0x3 + /* ITR granularity for different speeds */ +#define ICE_ITR_GRAN_ABOVE_25 2 +#define ICE_ITR_GRAN_MAX_25 4 /* ITR granularity in 1 us */ - u8 itr_gran_200; - u8 itr_gran_100; - u8 itr_gran_50; - u8 itr_gran_25; + u8 itr_gran; + /* INTRL granularity for different speeds */ +#define ICE_INTRL_GRAN_ABOVE_25 4 +#define ICE_INTRL_GRAN_MAX_25 8 + /* INTRL granularity in 1 us */ + u8 intrl_gran; + u8 ucast_shared; /* true if VSIs can share unicast addr */ }; @@ -391,4 +443,7 @@ struct ice_hw_port_stats { #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 #define ICE_SR_WORDS_IN_1KB 512 +/* Hash redirection LUT for VSI - maximum array size */ +#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4) + #endif /* _ICE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c new file mode 100644 index 000000000000..c25e486706f3 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -0,0 +1,2668 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" + +/** + * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF + * @pf: pointer to the PF structure + * @v_opcode: operation code + * @v_retval: return value + * @msg: pointer to the msg buffer + * @msglen: msg length + */ +static void +ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, + enum ice_status v_retval, u8 *msg, u16 msglen) +{ + struct ice_hw *hw = &pf->hw; + struct ice_vf *vf = pf->vf; + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { + /* Not all vfs are enabled so skip the ones that are not */ + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && + !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + continue; + + /* Ignore return value on purpose - a given VF may fail, but + * we need to keep going and send to all of them + */ + ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg, + msglen, NULL); + } +} + +/** + * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event + * @vf: pointer to the VF structure + * @pfe: pointer to the virtchnl_pf_event to set link speed/status for + * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_* + * @link_up: whether or not to set the link up/down + */ +static void +ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe, + int ice_link_speed, bool link_up) +{ + if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { + pfe->event_data.link_event_adv.link_status = link_up; + /* Speed in Mbps */ + pfe->event_data.link_event_adv.link_speed = + ice_conv_link_speed_to_virtchnl(true, ice_link_speed); + } else { + pfe->event_data.link_event.link_status = link_up; + /* Legacy method for virtchnl link speeds */ + pfe->event_data.link_event.link_speed = + (enum virtchnl_link_speed) + ice_conv_link_speed_to_virtchnl(false, ice_link_speed); + } +} + +/** + * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status + * @vf: pointer to the VF structure + * @pfe: pointer to the virtchnl_pf_event to set link speed/status for + * @link_up: whether or not to set the link up/down + */ +static void +ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe, + bool link_up) +{ + u16 link_speed; + + if (link_up) + link_speed = ICE_AQ_LINK_SPEED_40GB; + else + link_speed = ICE_AQ_LINK_SPEED_UNKNOWN; + + ice_set_pfe_link(vf, pfe, link_speed, link_up); +} + +/** + * ice_vc_notify_vf_link_state - Inform a VF of link status + * @vf: pointer to the VF structure + * + * send a link status message to a single VF + */ +static void ice_vc_notify_vf_link_state(struct ice_vf *vf) +{ + struct virtchnl_pf_event pfe = { 0 }; + struct ice_link_status *ls; + struct ice_pf *pf = vf->pf; + struct ice_hw *hw; + + hw = &pf->hw; + ls = &hw->port_info->phy.link_info; + + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = PF_EVENT_SEVERITY_INFO; + + if (vf->link_forced) + ice_set_pfe_link_forced(vf, &pfe, vf->link_up); + else + ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info & + ICE_AQ_LINK_UP); + + ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, + sizeof(pfe), NULL); +} + +/** + * ice_get_vf_vector - get VF interrupt vector register offset + * @vf_msix: number of MSIx vector per VF on a PF + * @vf_id: VF identifier + * @i: index of MSIx vector + */ +static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i) +{ + return ((i == 0) ? VFINT_DYN_CTLN(vf_id) : + VFINT_DYN_CTLN(((vf_msix - 1) * (vf_id)) + (i - 1))); +} + +/** + * ice_free_vf_res - Free a VF's resources + * @vf: pointer to the VF info + */ +static void ice_free_vf_res(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + int i, pf_vf_msix; + + /* First, disable VF's configuration API to prevent OS from + * accessing the VF's VSI after it's freed or invalidated. + */ + clear_bit(ICE_VF_STATE_INIT, vf->vf_states); + + /* free vsi & disconnect it from the parent uplink */ + if (vf->lan_vsi_idx) { + ice_vsi_release(pf->vsi[vf->lan_vsi_idx]); + vf->lan_vsi_idx = 0; + vf->lan_vsi_num = 0; + vf->num_mac = 0; + } + + pf_vf_msix = pf->num_vf_msix; + /* Disable interrupts so that VF starts in a known state */ + for (i = 0; i < pf_vf_msix; i++) { + u32 reg_idx; + + reg_idx = ice_get_vf_vector(pf_vf_msix, vf->vf_id, i); + wr32(&pf->hw, reg_idx, VFINT_DYN_CTLN_CLEARPBA_M); + ice_flush(&pf->hw); + } + /* reset some of the state variables keeping track of the resources */ + clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); + clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); +} + +/***********************enable_vf routines*****************************/ + +/** + * ice_dis_vf_mappings + * @vf: pointer to the VF structure + */ +static void ice_dis_vf_mappings(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + int first, last, v; + struct ice_hw *hw; + + hw = &pf->hw; + vsi = pf->vsi[vf->lan_vsi_idx]; + + wr32(hw, VPINT_ALLOC(vf->vf_id), 0); + + first = vf->first_vector_idx; + last = first + pf->num_vf_msix - 1; + for (v = first; v <= last; v++) { + u32 reg; + + reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) & + GLINT_VECT2FUNC_IS_PF_M) | + ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & + GLINT_VECT2FUNC_PF_NUM_M)); + wr32(hw, GLINT_VECT2FUNC(v), reg); + } + + if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) + wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); + else + dev_err(&pf->pdev->dev, + "Scattered mode for VF Tx queues is not yet implemented\n"); + + if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) + wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); + else + dev_err(&pf->pdev->dev, + "Scattered mode for VF Rx queues is not yet implemented\n"); +} + +/** + * ice_free_vfs - Free all VFs + * @pf: pointer to the PF structure + */ +void ice_free_vfs(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + int tmp, i; + + if (!pf->vf) + return; + + while (test_and_set_bit(__ICE_VF_DIS, pf->state)) + usleep_range(1000, 2000); + + /* Avoid wait time by stopping all VFs at the same time */ + for (i = 0; i < pf->num_alloc_vfs; i++) { + if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states)) + continue; + + /* stop rings without wait time */ + ice_vsi_stop_tx_rings(pf->vsi[pf->vf[i].lan_vsi_idx], + ICE_NO_RESET, i); + ice_vsi_stop_rx_rings(pf->vsi[pf->vf[i].lan_vsi_idx]); + + clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); + } + + /* Disable IOV before freeing resources. This lets any VF drivers + * running in the host get themselves cleaned up before we yank + * the carpet out from underneath their feet. + */ + if (!pci_vfs_assigned(pf->pdev)) + pci_disable_sriov(pf->pdev); + else + dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); + + tmp = pf->num_alloc_vfs; + pf->num_vf_qps = 0; + pf->num_alloc_vfs = 0; + for (i = 0; i < tmp; i++) { + if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { + /* disable VF qp mappings */ + ice_dis_vf_mappings(&pf->vf[i]); + + /* Set this state so that assigned VF vectors can be + * reclaimed by PF for reuse in ice_vsi_release(). No + * need to clear this bit since pf->vf array is being + * freed anyways after this for loop + */ + set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states); + ice_free_vf_res(&pf->vf[i]); + } + } + + devm_kfree(&pf->pdev->dev, pf->vf); + pf->vf = NULL; + + /* This check is for when the driver is unloaded while VFs are + * assigned. Setting the number of VFs to 0 through sysfs is caught + * before this function ever gets called. + */ + if (!pci_vfs_assigned(pf->pdev)) { + int vf_id; + + /* Acknowledge VFLR for all VFs. Without this, VFs will fail to + * work correctly when SR-IOV gets re-enabled. + */ + for (vf_id = 0; vf_id < tmp; vf_id++) { + u32 reg_idx, bit_idx; + + reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; + wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); + } + } + clear_bit(__ICE_VF_DIS, pf->state); + clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); +} + +/** + * ice_trigger_vf_reset - Reset a VF on HW + * @vf: pointer to the VF structure + * @is_vflr: true if VFLR was issued, false if not + * + * Trigger hardware to start a reset for a particular VF. Expects the caller + * to wait the proper amount of time to allow hardware to reset the VF before + * it cleans up and restores VF functionality. + */ +static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) +{ + struct ice_pf *pf = vf->pf; + u32 reg, reg_idx, bit_idx; + struct ice_hw *hw; + int vf_abs_id, i; + + hw = &pf->hw; + vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; + + /* Inform VF that it is no longer active, as a warning */ + clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); + + /* Disable VF's configuration API during reset. The flag is re-enabled + * in ice_alloc_vf_res(), when it's safe again to access VF's VSI. + * It's normally disabled in ice_free_vf_res(), but it's safer + * to do it earlier to give some time to finish to any VF config + * functions that may still be running at this point. + */ + clear_bit(ICE_VF_STATE_INIT, vf->vf_states); + + /* In the case of a VFLR, the HW has already reset the VF and we + * just need to clean up, so don't hit the VFRTRIG register. + */ + if (!is_vflr) { + /* reset VF using VPGEN_VFRTRIG reg */ + reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); + reg |= VPGEN_VFRTRIG_VFSWR_M; + wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); + } + /* clear the VFLR bit in GLGEN_VFLRSTAT */ + reg_idx = (vf_abs_id) / 32; + bit_idx = (vf_abs_id) % 32; + wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); + ice_flush(hw); + + wr32(hw, PF_PCI_CIAA, + VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); + for (i = 0; i < 100; i++) { + reg = rd32(hw, PF_PCI_CIAD); + if ((reg & VF_TRANS_PENDING_M) != 0) + dev_err(&pf->pdev->dev, + "VF %d PCI transactions stuck\n", vf->vf_id); + udelay(1); + } +} + +/** + * ice_vsi_set_pvid - Set port VLAN id for the VSI + * @vsi: the VSI being changed + * @vid: the VLAN id to set as a PVID + */ +static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid) +{ + struct device *dev = &vsi->back->pdev->dev; + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx ctxt = { 0 }; + enum ice_status status; + + ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED | + ICE_AQ_VSI_PVLAN_INSERT_PVID | + ICE_AQ_VSI_VLAN_EMOD_STR; + ctxt.info.pvid = cpu_to_le16(vid); + ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); + + status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + if (status) { + dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", + status, hw->adminq.sq_last_status); + return -EIO; + } + + vsi->info.pvid = ctxt.info.pvid; + vsi->info.vlan_flags = ctxt.info.vlan_flags; + return 0; +} + +/** + * ice_vsi_kill_pvid - Remove port VLAN id from the VSI + * @vsi: the VSI being changed + */ +static int ice_vsi_kill_pvid(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + + if (ice_vsi_manage_vlan_stripping(vsi, false)) { + dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n", + vsi->vsi_num); + return -ENODEV; + } + + vsi->info.pvid = 0; + return 0; +} + +/** + * ice_vf_vsi_setup - Set up a VF VSI + * @pf: board private structure + * @pi: pointer to the port_info instance + * @vf_id: defines VF id to which this VSI connects. + * + * Returns pointer to the successfully allocated VSI struct on success, + * otherwise returns NULL on failure. + */ +static struct ice_vsi * +ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id) +{ + return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id); +} + +/** + * ice_alloc_vsi_res - Setup VF VSI and its resources + * @vf: pointer to the VF structure + * + * Returns 0 on success, negative value on failure + */ +static int ice_alloc_vsi_res(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + LIST_HEAD(tmp_add_list); + u8 broadcast[ETH_ALEN]; + struct ice_vsi *vsi; + int status = 0; + + vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id); + + if (!vsi) { + dev_err(&pf->pdev->dev, "Failed to create VF VSI\n"); + return -ENOMEM; + } + + vf->lan_vsi_idx = vsi->idx; + vf->lan_vsi_num = vsi->vsi_num; + + /* first vector index is the VFs OICR index */ + vf->first_vector_idx = vsi->hw_base_vector; + /* Since hw_base_vector holds the vector where data queue interrupts + * starts, increment by 1 since VFs allocated vectors include OICR intr + * as well. + */ + vsi->hw_base_vector += 1; + + /* Check if port VLAN exist before, and restore it accordingly */ + if (vf->port_vlan_id) + ice_vsi_set_pvid(vsi, vf->port_vlan_id); + + eth_broadcast_addr(broadcast); + + status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); + if (status) + goto ice_alloc_vsi_res_exit; + + if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) { + status = ice_add_mac_to_list(vsi, &tmp_add_list, + vf->dflt_lan_addr.addr); + if (status) + goto ice_alloc_vsi_res_exit; + } + + status = ice_add_mac(&pf->hw, &tmp_add_list); + if (status) + dev_err(&pf->pdev->dev, "could not add mac filters\n"); + + /* Clear this bit after VF initialization since we shouldn't reclaim + * and reassign interrupts for synchronous or asynchronous VFR events. + * We don't want to reconfigure interrupts since AVF driver doesn't + * expect vector assignment to be changed unless there is a request for + * more vectors. + */ + clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states); +ice_alloc_vsi_res_exit: + ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + return status; +} + +/** + * ice_alloc_vf_res - Allocate VF resources + * @vf: pointer to the VF structure + */ +static int ice_alloc_vf_res(struct ice_vf *vf) +{ + int status; + + /* setup VF VSI and necessary resources */ + status = ice_alloc_vsi_res(vf); + if (status) + goto ice_alloc_vf_res_exit; + + if (vf->trusted) + set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); + else + clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); + + /* VF is now completely initialized */ + set_bit(ICE_VF_STATE_INIT, vf->vf_states); + + return status; + +ice_alloc_vf_res_exit: + ice_free_vf_res(vf); + return status; +} + +/** + * ice_ena_vf_mappings + * @vf: pointer to the VF structure + * + * Enable VF vectors and queues allocation by writing the details into + * respective registers. + */ +static void ice_ena_vf_mappings(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + int first, last, v; + struct ice_hw *hw; + int abs_vf_id; + u32 reg; + + hw = &pf->hw; + vsi = pf->vsi[vf->lan_vsi_idx]; + first = vf->first_vector_idx; + last = (first + pf->num_vf_msix) - 1; + abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + + /* VF Vector allocation */ + reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) | + ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | + VPINT_ALLOC_VALID_M); + wr32(hw, VPINT_ALLOC(vf->vf_id), reg); + + /* map the interrupts to its functions */ + for (v = first; v <= last; v++) { + reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & + GLINT_VECT2FUNC_VF_NUM_M) | + ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & + GLINT_VECT2FUNC_PF_NUM_M)); + wr32(hw, GLINT_VECT2FUNC(v), reg); + } + + /* VF Tx queues allocation */ + if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) { + wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), + VPLAN_TXQ_MAPENA_TX_ENA_M); + /* set the VF PF Tx queue range + * VFNUMQ value should be set to (number of queues - 1). A value + * of 0 means 1 queue and a value of 255 means 256 queues + */ + reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & + VPLAN_TX_QBASE_VFFIRSTQ_M) | + (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & + VPLAN_TX_QBASE_VFNUMQ_M)); + wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); + } else { + dev_err(&pf->pdev->dev, + "Scattered mode for VF Tx queues is not yet implemented\n"); + } + + /* VF Rx queues allocation */ + if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) { + wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), + VPLAN_RXQ_MAPENA_RX_ENA_M); + /* set the VF PF Rx queue range + * VFNUMQ value should be set to (number of queues - 1). A value + * of 0 means 1 queue and a value of 255 means 256 queues + */ + reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & + VPLAN_RX_QBASE_VFFIRSTQ_M) | + (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & + VPLAN_RX_QBASE_VFNUMQ_M)); + wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); + } else { + dev_err(&pf->pdev->dev, + "Scattered mode for VF Rx queues is not yet implemented\n"); + } +} + +/** + * ice_determine_res + * @pf: pointer to the PF structure + * @avail_res: available resources in the PF structure + * @max_res: maximum resources that can be given per VF + * @min_res: minimum resources that can be given per VF + * + * Returns non-zero value if resources (queues/vectors) are available or + * returns zero if PF cannot accommodate for all num_alloc_vfs. + */ +static int +ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res) +{ + bool checked_min_res = false; + int res; + + /* start by checking if PF can assign max number of resources for + * all num_alloc_vfs. + * if yes, return number per VF + * If no, divide by 2 and roundup, check again + * repeat the loop till we reach a point where even minimum resources + * are not available, in that case return 0 + */ + res = max_res; + while ((res >= min_res) && !checked_min_res) { + int num_all_res; + + num_all_res = pf->num_alloc_vfs * res; + if (num_all_res <= avail_res) + return res; + + if (res == min_res) + checked_min_res = true; + + res = DIV_ROUND_UP(res, 2); + } + return 0; +} + +/** + * ice_check_avail_res - check if vectors and queues are available + * @pf: pointer to the PF structure + * + * This function is where we calculate actual number of resources for VF VSIs, + * we don't reserve ahead of time during probe. Returns success if vectors and + * queues resources are available, otherwise returns error code + */ +static int ice_check_avail_res(struct ice_pf *pf) +{ + u16 num_msix, num_txq, num_rxq; + + if (!pf->num_alloc_vfs) + return -EINVAL; + + /* Grab from HW interrupts common pool + * Note: By the time the user decides it needs more vectors in a VF + * its already too late since one must decide this prior to creating the + * VF interface. So the best we can do is take a guess as to what the + * user might want. + * + * We have two policies for vector allocation: + * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small + * number of NFV VFs used for NFV appliances, since this is a special + * case, we try to assign maximum vectors per VF (65) as much as + * possible, based on determine_resources algorithm. + * 2. if num_alloc_vfs is from 17 to 256, then its large number of + * regular VFs which are not used for any special purpose. Hence try to + * grab default interrupt vectors (5 as supported by AVF driver). + */ + if (pf->num_alloc_vfs <= 16) { + num_msix = ice_determine_res(pf, pf->num_avail_hw_msix, + ICE_MAX_INTR_PER_VF, + ICE_MIN_INTR_PER_VF); + } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) { + num_msix = ice_determine_res(pf, pf->num_avail_hw_msix, + ICE_DFLT_INTR_PER_VF, + ICE_MIN_INTR_PER_VF); + } else { + dev_err(&pf->pdev->dev, + "Number of VFs %d exceeds max VF count %d\n", + pf->num_alloc_vfs, ICE_MAX_VF_COUNT); + return -EIO; + } + + if (!num_msix) + return -EIO; + + /* Grab from the common pool + * start by requesting Default queues (4 as supported by AVF driver), + * Note that, the main difference between queues and vectors is, latter + * can only be reserved at init time but queues can be requested by VF + * at runtime through Virtchnl, that is the reason we start by reserving + * few queues. + */ + num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF, + ICE_MIN_QS_PER_VF); + + num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF, + ICE_MIN_QS_PER_VF); + + if (!num_txq || !num_rxq) + return -EIO; + + /* since AVF driver works with only queue pairs which means, it expects + * to have equal number of Rx and Tx queues, so take the minimum of + * available Tx or Rx queues + */ + pf->num_vf_qps = min_t(int, num_txq, num_rxq); + pf->num_vf_msix = num_msix; + + return 0; +} + +/** + * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset + * @vf: pointer to the VF structure + * + * Cleanup a VF after the hardware reset is finished. Expects the caller to + * have verified whether the reset is finished properly, and ensure the + * minimum amount of wait time has passed. Reallocate VF resources back to make + * VF state active + */ +static void ice_cleanup_and_realloc_vf(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_hw *hw; + u32 reg; + + hw = &pf->hw; + + /* PF software completes the flow by notifying VF that reset flow is + * completed. This is done by enabling hardware by clearing the reset + * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT + * register to VFR completed (done at the end of this function) + * By doing this we allow HW to access VF memory at any point. If we + * did it any sooner, HW could access memory while it was being freed + * in ice_free_vf_res(), causing an IOMMU fault. + * + * On the other hand, this needs to be done ASAP, because the VF driver + * is waiting for this to happen and may report a timeout. It's + * harmless, but it gets logged into Guest OS kernel log, so best avoid + * it. + */ + reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); + reg &= ~VPGEN_VFRTRIG_VFSWR_M; + wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); + + /* reallocate VF resources to finish resetting the VSI state */ + if (!ice_alloc_vf_res(vf)) { + ice_ena_vf_mappings(vf); + set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); + clear_bit(ICE_VF_STATE_DIS, vf->vf_states); + vf->num_vlan = 0; + } + + /* Tell the VF driver the reset is done. This needs to be done only + * after VF has been fully initialized, because the VF driver may + * request resources immediately after setting this flag. + */ + wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); +} + +/** + * ice_reset_all_vfs - reset all allocated VFs in one go + * @pf: pointer to the PF structure + * @is_vflr: true if VFLR was issued, false if not + * + * First, tell the hardware to reset each VF, then do all the waiting in one + * chunk, and finally finish restoring each VF after the wait. This is useful + * during PF routines which need to reset all VFs, as otherwise it must perform + * these resets in a serialized fashion. + * + * Returns true if any VFs were reset, and false otherwise. + */ +bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) +{ + struct ice_hw *hw = &pf->hw; + int v, i; + + /* If we don't have any VFs, then there is nothing to reset */ + if (!pf->num_alloc_vfs) + return false; + + /* If VFs have been disabled, there is no need to reset */ + if (test_and_set_bit(__ICE_VF_DIS, pf->state)) + return false; + + /* Begin reset on all VFs at once */ + for (v = 0; v < pf->num_alloc_vfs; v++) + ice_trigger_vf_reset(&pf->vf[v], is_vflr); + + /* Call Disable LAN Tx queue AQ call with VFR bit set and 0 + * queues to inform Firmware about VF reset. + */ + for (v = 0; v < pf->num_alloc_vfs; v++) + ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL, + ICE_VF_RESET, v, NULL); + + /* HW requires some time to make sure it can flush the FIFO for a VF + * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in + * sequence to make sure that it has completed. We'll keep track of + * the VFs using a simple iterator that increments once that VF has + * finished resetting. + */ + for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { + usleep_range(10000, 20000); + + /* Check each VF in sequence */ + while (v < pf->num_alloc_vfs) { + struct ice_vf *vf = &pf->vf[v]; + u32 reg; + + reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); + if (!(reg & VPGEN_VFRSTAT_VFRD_M)) + break; + + /* If the current VF has finished resetting, move on + * to the next VF in sequence. + */ + v++; + } + } + + /* Display a warning if at least one VF didn't manage to reset in + * time, but continue on with the operation. + */ + if (v < pf->num_alloc_vfs) + dev_warn(&pf->pdev->dev, "VF reset check timeout\n"); + usleep_range(10000, 20000); + + /* free VF resources to begin resetting the VSI state */ + for (v = 0; v < pf->num_alloc_vfs; v++) + ice_free_vf_res(&pf->vf[v]); + + if (ice_check_avail_res(pf)) { + dev_err(&pf->pdev->dev, + "Cannot allocate VF resources, try with fewer number of VFs\n"); + return false; + } + + /* Finish the reset on each VF */ + for (v = 0; v < pf->num_alloc_vfs; v++) + ice_cleanup_and_realloc_vf(&pf->vf[v]); + + ice_flush(hw); + clear_bit(__ICE_VF_DIS, pf->state); + + return true; +} + +/** + * ice_reset_vf - Reset a particular VF + * @vf: pointer to the VF structure + * @is_vflr: true if VFLR was issued, false if not + * + * Returns true if the VF is reset, false otherwise. + */ +static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) +{ + struct ice_pf *pf = vf->pf; + struct ice_hw *hw = &pf->hw; + bool rsd = false; + u32 reg; + int i; + + /* If the VFs have been disabled, this means something else is + * resetting the VF, so we shouldn't continue. + */ + if (test_and_set_bit(__ICE_VF_DIS, pf->state)) + return false; + + ice_trigger_vf_reset(vf, is_vflr); + + if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { + ice_vsi_stop_tx_rings(pf->vsi[vf->lan_vsi_idx], ICE_VF_RESET, + vf->vf_id); + ice_vsi_stop_rx_rings(pf->vsi[vf->lan_vsi_idx]); + clear_bit(ICE_VF_STATE_ENA, vf->vf_states); + } else { + /* Call Disable LAN Tx queue AQ call even when queues are not + * enabled. This is needed for successful completiom of VFR + */ + ice_dis_vsi_txq(pf->vsi[vf->lan_vsi_idx]->port_info, 0, + NULL, NULL, ICE_VF_RESET, vf->vf_id, NULL); + } + + /* poll VPGEN_VFRSTAT reg to make sure + * that reset is complete + */ + for (i = 0; i < 10; i++) { + /* VF reset requires driver to first reset the VF and then + * poll the status register to make sure that the reset + * completed successfully. + */ + usleep_range(10000, 20000); + reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); + if (reg & VPGEN_VFRSTAT_VFRD_M) { + rsd = true; + break; + } + } + + /* Display a warning if VF didn't manage to reset in time, but need to + * continue on with the operation. + */ + if (!rsd) + dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n", + vf->vf_id); + + usleep_range(10000, 20000); + + /* free VF resources to begin resetting the VSI state */ + ice_free_vf_res(vf); + + ice_cleanup_and_realloc_vf(vf); + + ice_flush(hw); + clear_bit(__ICE_VF_DIS, pf->state); + + return true; +} + +/** + * ice_vc_notify_link_state - Inform all VFs on a PF of link status + * @pf: pointer to the PF structure + */ +void ice_vc_notify_link_state(struct ice_pf *pf) +{ + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++) + ice_vc_notify_vf_link_state(&pf->vf[i]); +} + +/** + * ice_vc_notify_reset - Send pending reset message to all VFs + * @pf: pointer to the PF structure + * + * indicate a pending reset to all VFs on a given PF + */ +void ice_vc_notify_reset(struct ice_pf *pf) +{ + struct virtchnl_pf_event pfe; + + if (!pf->num_alloc_vfs) + return; + + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; + ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS, + (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); +} + +/** + * ice_vc_notify_vf_reset - Notify VF of a reset event + * @vf: pointer to the VF structure + */ +static void ice_vc_notify_vf_reset(struct ice_vf *vf) +{ + struct virtchnl_pf_event pfe; + + /* validate the request */ + if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + return; + + /* verify if the VF is in either init or active before proceeding */ + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && + !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + return; + + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; + ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, + (u8 *)&pfe, sizeof(pfe), NULL); +} + +/** + * ice_alloc_vfs - Allocate and set up VFs resources + * @pf: pointer to the PF structure + * @num_alloc_vfs: number of VFs to allocate + */ +static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) +{ + struct ice_hw *hw = &pf->hw; + struct ice_vf *vfs; + int i, ret; + + /* Disable global interrupt 0 so we don't try to handle the VFLR. */ + wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx), + ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); + + ice_flush(hw); + + ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); + if (ret) { + pf->num_alloc_vfs = 0; + goto err_unroll_intr; + } + /* allocate memory */ + vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs), + GFP_KERNEL); + if (!vfs) { + ret = -ENOMEM; + goto err_unroll_sriov; + } + pf->vf = vfs; + + /* apply default profile */ + for (i = 0; i < num_alloc_vfs; i++) { + vfs[i].pf = pf; + vfs[i].vf_sw_id = pf->first_sw; + vfs[i].vf_id = i; + + /* assign default capabilities */ + set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); + vfs[i].spoofchk = true; + + /* Set this state so that PF driver does VF vector assignment */ + set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states); + } + pf->num_alloc_vfs = num_alloc_vfs; + + /* VF resources get allocated during reset */ + if (!ice_reset_all_vfs(pf, false)) + goto err_unroll_sriov; + + goto err_unroll_intr; + +err_unroll_sriov: + pci_disable_sriov(pf->pdev); +err_unroll_intr: + /* rearm interrupts here */ + ice_irq_dynamic_ena(hw, NULL, NULL); + return ret; +} + +/** + * ice_pf_state_is_nominal - checks the pf for nominal state + * @pf: pointer to pf to check + * + * Check the PF's state for a collection of bits that would indicate + * the PF is in a state that would inhibit normal operation for + * driver functionality. + * + * Returns true if PF is in a nominal state. + * Returns false otherwise + */ +static bool ice_pf_state_is_nominal(struct ice_pf *pf) +{ + DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 }; + + if (!pf) + return false; + + bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS); + if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS)) + return false; + + return true; +} + +/** + * ice_pci_sriov_ena - Enable or change number of VFs + * @pf: pointer to the PF structure + * @num_vfs: number of VFs to allocate + */ +static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) +{ + int pre_existing_vfs = pci_num_vf(pf->pdev); + struct device *dev = &pf->pdev->dev; + int err; + + if (!ice_pf_state_is_nominal(pf)) { + dev_err(dev, "Cannot enable SR-IOV, device not ready\n"); + return -EBUSY; + } + + if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { + dev_err(dev, "This device is not capable of SR-IOV\n"); + return -ENODEV; + } + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + ice_free_vfs(pf); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + return num_vfs; + + if (num_vfs > pf->num_vfs_supported) { + dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", + num_vfs, pf->num_vfs_supported); + return -ENOTSUPP; + } + + dev_info(dev, "Allocating %d VFs\n", num_vfs); + err = ice_alloc_vfs(pf, num_vfs); + if (err) { + dev_err(dev, "Failed to enable SR-IOV: %d\n", err); + return err; + } + + set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); + return num_vfs; +} + +/** + * ice_sriov_configure - Enable or change number of VFs via sysfs + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs to allocate + * + * This function is called when the user updates the number of VFs in sysfs. + */ +int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + + if (num_vfs) + return ice_pci_sriov_ena(pf, num_vfs); + + if (!pci_vfs_assigned(pdev)) { + ice_free_vfs(pf); + } else { + dev_err(&pf->pdev->dev, + "can't free VFs because some are assigned to VMs.\n"); + return -EBUSY; + } + + return 0; +} + +/** + * ice_process_vflr_event - Free VF resources via IRQ calls + * @pf: pointer to the PF structure + * + * called from the VLFR IRQ handler to + * free up VF resources and state variables + */ +void ice_process_vflr_event(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + int vf_id; + u32 reg; + + if (!test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || + !pf->num_alloc_vfs) + return; + + /* Re-enable the VFLR interrupt cause here, before looking for which + * VF got reset. Otherwise, if another VF gets a reset while the + * first one is being processed, that interrupt will be lost, and + * that VF will be stuck in reset forever. + */ + reg = rd32(hw, PFINT_OICR_ENA); + reg |= PFINT_OICR_VFLR_M; + wr32(hw, PFINT_OICR_ENA, reg); + ice_flush(hw); + + clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state); + for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { + struct ice_vf *vf = &pf->vf[vf_id]; + u32 reg_idx, bit_idx; + + reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; + /* read GLGEN_VFLRSTAT register to find out the flr VFs */ + reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); + if (reg & BIT(bit_idx)) + /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ + ice_reset_vf(vf, true); + } +} + +/** + * ice_vc_dis_vf - Disable a given VF via SW reset + * @vf: pointer to the VF info + * + * Disable the VF through a SW reset + */ +static void ice_vc_dis_vf(struct ice_vf *vf) +{ + ice_vc_notify_vf_reset(vf); + ice_reset_vf(vf, false); +} + +/** + * ice_vc_send_msg_to_vf - Send message to VF + * @vf: pointer to the VF info + * @v_opcode: virtual channel opcode + * @v_retval: virtual channel return value + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * send msg to VF + */ +static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, + enum ice_status v_retval, u8 *msg, u16 msglen) +{ + enum ice_status aq_ret; + struct ice_pf *pf; + + /* validate the request */ + if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + return -EINVAL; + + pf = vf->pf; + + /* single place to detect unsuccessful return values */ + if (v_retval) { + vf->num_inval_msgs++; + dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", + vf->vf_id, v_opcode, v_retval); + if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) { + dev_err(&pf->pdev->dev, + "Number of invalid messages exceeded for VF %d\n", + vf->vf_id); + dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); + set_bit(ICE_VF_STATE_DIS, vf->vf_states); + return -EIO; + } + } else { + vf->num_valid_msgs++; + /* reset the invalid counter, if a valid message is received. */ + vf->num_inval_msgs = 0; + } + + aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, + msg, msglen, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "Unable to send the message to VF %d aq_err %d\n", + vf->vf_id, pf->hw.mailboxq.sq_last_status); + return -EIO; + } + + return 0; +} + +/** + * ice_vc_get_ver_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to request the API version used by the PF + */ +static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_version_info info = { + VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR + }; + + vf->vf_ver = *(struct virtchnl_version_info *)msg; + /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ + if (VF_IS_V10(&vf->vf_ver)) + info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; + + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS, + (u8 *)&info, + sizeof(struct virtchnl_version_info)); +} + +/** + * ice_vc_get_vf_res_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to request its resources + */ +static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_vf_resource *vfres = NULL; + enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + int len = 0; + int ret; + + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto err; + } + + len = sizeof(struct virtchnl_vf_resource); + + vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL); + if (!vfres) { + aq_ret = ICE_ERR_NO_MEMORY; + len = 0; + goto err; + } + if (VF_IS_V11(&vf->vf_ver)) + vf->driver_caps = *(u32 *)msg; + else + vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | + VIRTCHNL_VF_OFFLOAD_RSS_REG | + VIRTCHNL_VF_OFFLOAD_VLAN; + + vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi->info.pvid) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; + } else { + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; + else + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; + } + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; + + if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) + vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; + + vfres->num_vsis = 1; + /* Tx and Rx queue are equal for VF */ + vfres->num_queue_pairs = vsi->num_txq; + vfres->max_vectors = pf->num_vf_msix; + vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE; + vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE; + + vfres->vsi_res[0].vsi_id = vf->lan_vsi_num; + vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; + vfres->vsi_res[0].num_queue_pairs = vsi->num_txq; + ether_addr_copy(vfres->vsi_res[0].default_mac_addr, + vf->dflt_lan_addr.addr); + + set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); + +err: + /* send the response back to the VF */ + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret, + (u8 *)vfres, len); + + devm_kfree(&pf->pdev->dev, vfres); + return ret; +} + +/** + * ice_vc_reset_vf_msg + * @vf: pointer to the VF info + * + * called from the VF to reset itself, + * unlike other virtchnl messages, PF driver + * doesn't send the response back to the VF + */ +static void ice_vc_reset_vf_msg(struct ice_vf *vf) +{ + if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + ice_reset_vf(vf, false); +} + +/** + * ice_find_vsi_from_id + * @pf: the pf structure to search for the VSI + * @id: id of the VSI it is searching for + * + * searches for the VSI with the given id + */ +static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) +{ + int i; + + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && pf->vsi[i]->vsi_num == id) + return pf->vsi[i]; + + return NULL; +} + +/** + * ice_vc_isvalid_vsi_id + * @vf: pointer to the VF info + * @vsi_id: VF relative VSI id + * + * check for the valid VSI id + */ +static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = ice_find_vsi_from_id(pf, vsi_id); + + return (vsi && (vsi->vf_id == vf->vf_id)); +} + +/** + * ice_vc_isvalid_q_id + * @vf: pointer to the VF info + * @vsi_id: VSI id + * @qid: VSI relative queue id + * + * check for the valid queue id + */ +static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) +{ + struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id); + /* allocated Tx and Rx queues should be always equal for VF VSI */ + return (vsi && (qid < vsi->alloc_txq)); +} + +/** + * ice_vc_config_rss_key + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Configure the VF's RSS key + */ +static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; + struct ice_vsi *vsi = NULL; + enum ice_status aq_ret; + int ret; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + ret = ice_set_rss(vsi, vrk->key, NULL, 0); + aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS; +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret, + NULL, 0); +} + +/** + * ice_vc_config_rss_lut + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Configure the VF's RSS LUT + */ +static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; + struct ice_vsi *vsi = NULL; + enum ice_status aq_ret; + int ret; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE); + aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS; +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret, + NULL, 0); +} + +/** + * ice_vc_get_stats_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to get VSI stats + */ +static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_queue_select *vqs = + (struct virtchnl_queue_select *)msg; + enum ice_status aq_ret = 0; + struct ice_eth_stats stats; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + memset(&stats, 0, sizeof(struct ice_eth_stats)); + ice_update_eth_stats(vsi); + + stats = vsi->eth_stats; + +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, + (u8 *)&stats, sizeof(stats)); +} + +/** + * ice_vc_ena_qs_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to enable all or specific queue(s) + */ +static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_queue_select *vqs = + (struct virtchnl_queue_select *)msg; + enum ice_status aq_ret = 0; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!vqs->rx_queues && !vqs->tx_queues) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + /* Enable only Rx rings, Tx rings were enabled by the FW when the + * Tx queue group list was configured and the context bits were + * programmed using ice_vsi_cfg_txqs + */ + if (ice_vsi_start_rx_rings(vsi)) + aq_ret = ICE_ERR_PARAM; + + /* Set flag to indicate that queues are enabled */ + if (!aq_ret) + set_bit(ICE_VF_STATE_ENA, vf->vf_states); + +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret, + NULL, 0); +} + +/** + * ice_vc_dis_qs_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to disable all or specific + * queue(s) + */ +static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_queue_select *vqs = + (struct virtchnl_queue_select *)msg; + enum ice_status aq_ret = 0; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) && + !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!vqs->rx_queues && !vqs->tx_queues) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) { + dev_err(&vsi->back->pdev->dev, + "Failed to stop tx rings on VSI %d\n", + vsi->vsi_num); + aq_ret = ICE_ERR_PARAM; + } + + if (ice_vsi_stop_rx_rings(vsi)) { + dev_err(&vsi->back->pdev->dev, + "Failed to stop rx rings on VSI %d\n", + vsi->vsi_num); + aq_ret = ICE_ERR_PARAM; + } + + /* Clear enabled queues flag */ + if (!aq_ret) + clear_bit(ICE_VF_STATE_ENA, vf->vf_states); + +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret, + NULL, 0); +} + +/** + * ice_vc_cfg_irq_map_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to configure the IRQ to queue map + */ +static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_irq_map_info *irqmap_info = + (struct virtchnl_irq_map_info *)msg; + u16 vsi_id, vsi_q_id, vector_id; + struct virtchnl_vector_map *map; + struct ice_vsi *vsi = NULL; + struct ice_pf *pf = vf->pf; + enum ice_status aq_ret = 0; + unsigned long qmap; + int i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < irqmap_info->num_vectors; i++) { + map = &irqmap_info->vecmap[i]; + + vector_id = map->vector_id; + vsi_id = map->vsi_id; + /* validate msg params */ + if (!(vector_id < pf->hw.func_caps.common_cap + .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + /* lookout for the invalid queue index */ + qmap = map->rxq_map; + for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { + struct ice_q_vector *q_vector; + + if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + q_vector = vsi->q_vectors[i]; + q_vector->num_ring_rx++; + q_vector->rx.itr_idx = map->rxitr_idx; + vsi->rx_rings[vsi_q_id]->q_vector = q_vector; + } + + qmap = map->txq_map; + for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { + struct ice_q_vector *q_vector; + + if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + q_vector = vsi->q_vectors[i]; + q_vector->num_ring_tx++; + q_vector->tx.itr_idx = map->txitr_idx; + vsi->tx_rings[vsi_q_id]->q_vector = q_vector; + } + } + + if (vsi) + ice_vsi_cfg_msix(vsi); +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret, + NULL, 0); +} + +/** + * ice_vc_cfg_qs_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to configure the Rx/Tx queues + */ +static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_vsi_queue_config_info *qci = + (struct virtchnl_vsi_queue_config_info *)msg; + struct virtchnl_queue_pair_info *qpi; + enum ice_status aq_ret = 0; + struct ice_vsi *vsi; + int i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < qci->num_queue_pairs; i++) { + qpi = &qci->qpair[i]; + if (qpi->txq.vsi_id != qci->vsi_id || + qpi->rxq.vsi_id != qci->vsi_id || + qpi->rxq.queue_id != qpi->txq.queue_id || + !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + /* copy Tx queue info from VF into VSI */ + vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; + vsi->tx_rings[i]->count = qpi->txq.ring_len; + /* copy Rx queue info from VF into vsi */ + vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; + vsi->rx_rings[i]->count = qpi->rxq.ring_len; + if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + vsi->rx_buf_len = qpi->rxq.databuffer_size; + if (qpi->rxq.max_pkt_size >= (16 * 1024) || + qpi->rxq.max_pkt_size < 64) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + vsi->max_frame = qpi->rxq.max_pkt_size; + } + + /* VF can request to configure less than allocated queues + * or default allocated queues. So update the VSI with new number + */ + vsi->num_txq = qci->num_queue_pairs; + vsi->num_rxq = qci->num_queue_pairs; + + if (!ice_vsi_cfg_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi)) + aq_ret = 0; + else + aq_ret = ICE_ERR_PARAM; + +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret, + NULL, 0); +} + +/** + * ice_is_vf_trusted + * @vf: pointer to the VF info + */ +static bool ice_is_vf_trusted(struct ice_vf *vf) +{ + return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); +} + +/** + * ice_can_vf_change_mac + * @vf: pointer to the VF info + * + * Return true if the VF is allowed to change its MAC filters, false otherwise + */ +static bool ice_can_vf_change_mac(struct ice_vf *vf) +{ + /* If the VF MAC address has been set administratively (via the + * ndo_set_vf_mac command), then deny permission to the VF to + * add/delete unicast MAC addresses, unless the VF is trusted + */ + if (vf->pf_set_mac && !ice_is_vf_trusted(vf)) + return false; + + return true; +} + +/** + * ice_vc_handle_mac_addr_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @set: true if mac filters are being set, false otherwise + * + * add guest mac address filter + */ +static int +ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) +{ + struct virtchnl_ether_addr_list *al = + (struct virtchnl_ether_addr_list *)msg; + struct ice_pf *pf = vf->pf; + enum virtchnl_ops vc_op; + enum ice_status ret; + LIST_HEAD(mac_list); + struct ice_vsi *vsi; + int mac_count = 0; + int i; + + if (set) + vc_op = VIRTCHNL_OP_ADD_ETH_ADDR; + else + vc_op = VIRTCHNL_OP_DEL_ETH_ADDR; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || + !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) { + ret = ICE_ERR_PARAM; + goto handle_mac_exit; + } + + if (set && !ice_is_vf_trusted(vf) && + (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) { + dev_err(&pf->pdev->dev, + "Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n"); + ret = ICE_ERR_PARAM; + goto handle_mac_exit; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + + for (i = 0; i < al->num_elements; i++) { + u8 *maddr = al->list[i].addr; + + if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) || + is_broadcast_ether_addr(maddr)) { + if (set) { + /* VF is trying to add filters that the PF + * already added. Just continue. + */ + dev_info(&pf->pdev->dev, + "mac %pM already set for VF %d\n", + maddr, vf->vf_id); + continue; + } else { + /* VF can't remove dflt_lan_addr/bcast mac */ + dev_err(&pf->pdev->dev, + "can't remove mac %pM for VF %d\n", + maddr, vf->vf_id); + ret = ICE_ERR_PARAM; + goto handle_mac_exit; + } + } + + /* check for the invalid cases and bail if necessary */ + if (is_zero_ether_addr(maddr)) { + dev_err(&pf->pdev->dev, + "invalid mac %pM provided for VF %d\n", + maddr, vf->vf_id); + ret = ICE_ERR_PARAM; + goto handle_mac_exit; + } + + if (is_unicast_ether_addr(maddr) && + !ice_can_vf_change_mac(vf)) { + dev_err(&pf->pdev->dev, + "can't change unicast mac for untrusted VF %d\n", + vf->vf_id); + ret = ICE_ERR_PARAM; + goto handle_mac_exit; + } + + /* get here if maddr is multicast or if VF can change mac */ + if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) { + ret = ICE_ERR_NO_MEMORY; + goto handle_mac_exit; + } + mac_count++; + } + + /* program the updated filter list */ + if (set) + ret = ice_add_mac(&pf->hw, &mac_list); + else + ret = ice_remove_mac(&pf->hw, &mac_list); + + if (ret) { + dev_err(&pf->pdev->dev, + "can't update mac filters for VF %d, error %d\n", + vf->vf_id, ret); + } else { + if (set) + vf->num_mac += mac_count; + else + vf->num_mac -= mac_count; + } + +handle_mac_exit: + ice_free_fltr_list(&pf->pdev->dev, &mac_list); + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0); +} + +/** + * ice_vc_add_mac_addr_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * add guest MAC address filter + */ +static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg) +{ + return ice_vc_handle_mac_addr_msg(vf, msg, true); +} + +/** + * ice_vc_del_mac_addr_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * remove guest MAC address filter + */ +static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg) +{ + return ice_vc_handle_mac_addr_msg(vf, msg, false); +} + +/** + * ice_vc_request_qs_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * VFs get a default number of queues but can use this message to request a + * different number. If the request is successful, PF will reset the VF and + * return 0. If unsuccessful, PF will send message informing VF of number of + * available queue pairs via virtchnl message response to VF. + */ +static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_vf_res_request *vfres = + (struct virtchnl_vf_res_request *)msg; + int req_queues = vfres->num_queue_pairs; + enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; + int tx_rx_queue_left; + int cur_queues; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + cur_queues = pf->num_vf_qps; + tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); + if (req_queues <= 0) { + dev_err(&pf->pdev->dev, + "VF %d tried to request %d queues. Ignoring.\n", + vf->vf_id, req_queues); + } else if (req_queues > ICE_MAX_QS_PER_VF) { + dev_err(&pf->pdev->dev, + "VF %d tried to request more than %d queues.\n", + vf->vf_id, ICE_MAX_QS_PER_VF); + vfres->num_queue_pairs = ICE_MAX_QS_PER_VF; + } else if (req_queues - cur_queues > tx_rx_queue_left) { + dev_warn(&pf->pdev->dev, + "VF %d requested %d more queues, but only %d left.\n", + vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); + vfres->num_queue_pairs = tx_rx_queue_left + cur_queues; + } else { + /* request is successful, then reset VF */ + vf->num_req_qs = req_queues; + ice_vc_dis_vf(vf); + dev_info(&pf->pdev->dev, + "VF %d granted request of %d queues.\n", + vf->vf_id, req_queues); + return 0; + } + +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, + aq_ret, (u8 *)vfres, sizeof(*vfres)); +} + +/** + * ice_set_vf_port_vlan + * @netdev: network interface device structure + * @vf_id: VF identifier + * @vlan_id: VLAN id being set + * @qos: priority setting + * @vlan_proto: VLAN protocol + * + * program VF Port VLAN id and/or qos + */ +int +ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, + __be16 vlan_proto) +{ + u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S); + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + struct ice_vsi *vsi; + struct ice_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id); + return -EINVAL; + } + + if (vlan_id > ICE_MAX_VLANID || qos > 7) { + dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); + return -EINVAL; + } + + if (vlan_proto != htons(ETH_P_8021Q)) { + dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); + return -EPROTONOSUPPORT; + } + + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + if (le16_to_cpu(vsi->info.pvid) == vlanprio) { + /* duplicate request, so just return success */ + dev_info(&pf->pdev->dev, + "Duplicate pvid %d request\n", vlanprio); + return ret; + } + + /* If pvid, then remove all filters on the old VLAN */ + if (vsi->info.pvid) + ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & + VLAN_VID_MASK)); + + if (vlan_id || qos) { + ret = ice_vsi_set_pvid(vsi, vlanprio); + if (ret) + goto error_set_pvid; + } else { + ice_vsi_kill_pvid(vsi); + } + + if (vlan_id) { + dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", + vlan_id, qos, vf_id); + + /* add new VLAN filter for each MAC */ + ret = ice_vsi_add_vlan(vsi, vlan_id); + if (ret) + goto error_set_pvid; + } + + /* The Port VLAN needs to be saved across resets the same as the + * default LAN MAC address. + */ + vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); + +error_set_pvid: + return ret; +} + +/** + * ice_vc_process_vlan_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @add_v: Add VLAN if true, otherwise delete VLAN + * + * Process virtchnl op to add or remove programmed guest VLAN id + */ +static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) +{ + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; + enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + int i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (add_v && !ice_is_vf_trusted(vf) && + vf->num_vlan >= ICE_MAX_VLAN_PER_VF) { + dev_info(&pf->pdev->dev, + "VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n"); + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < vfl->num_elements; i++) { + if (vfl->vlan_id[i] > ICE_MAX_VLANID) { + aq_ret = ICE_ERR_PARAM; + dev_err(&pf->pdev->dev, + "invalid VF VLAN id %d\n", vfl->vlan_id[i]); + goto error_param; + } + } + + vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (vsi->info.pvid) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (ice_vsi_manage_vlan_stripping(vsi, add_v)) { + dev_err(&pf->pdev->dev, + "%sable VLAN stripping failed for VSI %i\n", + add_v ? "en" : "dis", vsi->vsi_num); + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (add_v) { + for (i = 0; i < vfl->num_elements; i++) { + u16 vid = vfl->vlan_id[i]; + + if (!ice_vsi_add_vlan(vsi, vid)) { + vf->num_vlan++; + set_bit(vid, vsi->active_vlans); + + /* Enable VLAN pruning when VLAN 0 is added */ + if (unlikely(!vid)) + if (ice_cfg_vlan_pruning(vsi, true)) + aq_ret = ICE_ERR_PARAM; + } else { + aq_ret = ICE_ERR_PARAM; + } + } + } else { + for (i = 0; i < vfl->num_elements; i++) { + u16 vid = vfl->vlan_id[i]; + + /* Make sure ice_vsi_kill_vlan is successful before + * updating VLAN information + */ + if (!ice_vsi_kill_vlan(vsi, vid)) { + vf->num_vlan--; + clear_bit(vid, vsi->active_vlans); + + /* Disable VLAN pruning when removing VLAN 0 */ + if (unlikely(!vid)) + ice_cfg_vlan_pruning(vsi, false); + } + } + } + +error_param: + /* send the response to the VF */ + if (add_v) + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret, + NULL, 0); + else + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret, + NULL, 0); +} + +/** + * ice_vc_add_vlan_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Add and program guest VLAN id + */ +static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg) +{ + return ice_vc_process_vlan_msg(vf, msg, true); +} + +/** + * ice_vc_remove_vlan_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * remove programmed guest VLAN id + */ +static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg) +{ + return ice_vc_process_vlan_msg(vf, msg, false); +} + +/** + * ice_vc_ena_vlan_stripping + * @vf: pointer to the VF info + * + * Enable VLAN header stripping for a given VF + */ +static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) +{ + enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (ice_vsi_manage_vlan_stripping(vsi, true)) + aq_ret = ICE_ERR_AQ_ERROR; + +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, + aq_ret, NULL, 0); +} + +/** + * ice_vc_dis_vlan_stripping + * @vf: pointer to the VF info + * + * Disable VLAN header stripping for a given VF + */ +static int ice_vc_dis_vlan_stripping(struct ice_vf *vf) +{ + enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (ice_vsi_manage_vlan_stripping(vsi, false)) + aq_ret = ICE_ERR_AQ_ERROR; + +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, + aq_ret, NULL, 0); +} + +/** + * ice_vc_process_vf_msg - Process request from VF + * @pf: pointer to the PF structure + * @event: pointer to the AQ event + * + * called from the common asq/arq handler to + * process request from VF + */ +void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) +{ + u32 v_opcode = le32_to_cpu(event->desc.cookie_high); + s16 vf_id = le16_to_cpu(event->desc.retval); + u16 msglen = event->msg_len; + u8 *msg = event->msg_buf; + struct ice_vf *vf = NULL; + int err = 0; + + if (vf_id >= pf->num_alloc_vfs) { + err = -EINVAL; + goto error_handler; + } + + vf = &pf->vf[vf_id]; + + /* Check if VF is disabled. */ + if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) { + err = -EPERM; + goto error_handler; + } + + /* Perform basic checks on the msg */ + err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); + if (err) { + if (err == VIRTCHNL_ERR_PARAM) + err = -EPERM; + else + err = -EINVAL; + goto error_handler; + } + + /* Perform additional checks specific to RSS and Virtchnl */ + if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { + struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; + + if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) + err = -EINVAL; + } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { + struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; + + if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) + err = -EINVAL; + } + +error_handler: + if (err) { + ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0); + dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", + vf_id, v_opcode, msglen, err); + return; + } + + switch (v_opcode) { + case VIRTCHNL_OP_VERSION: + err = ice_vc_get_ver_msg(vf, msg); + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + err = ice_vc_get_vf_res_msg(vf, msg); + break; + case VIRTCHNL_OP_RESET_VF: + ice_vc_reset_vf_msg(vf); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + err = ice_vc_add_mac_addr_msg(vf, msg); + break; + case VIRTCHNL_OP_DEL_ETH_ADDR: + err = ice_vc_del_mac_addr_msg(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + err = ice_vc_cfg_qs_msg(vf, msg); + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + err = ice_vc_ena_qs_msg(vf, msg); + ice_vc_notify_vf_link_state(vf); + break; + case VIRTCHNL_OP_DISABLE_QUEUES: + err = ice_vc_dis_qs_msg(vf, msg); + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + err = ice_vc_request_qs_msg(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + err = ice_vc_cfg_irq_map_msg(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_RSS_KEY: + err = ice_vc_config_rss_key(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + err = ice_vc_config_rss_lut(vf, msg); + break; + case VIRTCHNL_OP_GET_STATS: + err = ice_vc_get_stats_msg(vf, msg); + break; + case VIRTCHNL_OP_ADD_VLAN: + err = ice_vc_add_vlan_msg(vf, msg); + break; + case VIRTCHNL_OP_DEL_VLAN: + err = ice_vc_remove_vlan_msg(vf, msg); + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + err = ice_vc_ena_vlan_stripping(vf); + break; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + err = ice_vc_dis_vlan_stripping(vf); + break; + case VIRTCHNL_OP_UNKNOWN: + default: + dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", + v_opcode, vf_id); + err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL, + NULL, 0); + break; + } + if (err) { + /* Helper function cares less about error return values here + * as it is busy with pending work. + */ + dev_info(&pf->pdev->dev, + "PF failed to honor VF %d, opcode %d\n, error %d\n", + vf_id, v_opcode, err); + } +} + +/** + * ice_get_vf_cfg + * @netdev: network interface device structure + * @vf_id: VF identifier + * @ivi: VF configuration structure + * + * return VF configuration + */ +int ice_get_vf_cfg(struct net_device *netdev, int vf_id, + struct ifla_vf_info *ivi) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_vf *vf; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + netdev_err(netdev, "invalid VF id: %d\n", vf_id); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + ivi->vf = vf_id; + ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr); + + /* VF configuration for VLAN and applicable QoS */ + ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M; + ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >> + ICE_VLAN_PRIORITY_S; + + ivi->trusted = vf->trusted; + ivi->spoofchk = vf->spoofchk; + if (!vf->link_forced) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf->link_up) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + ivi->max_tx_rate = vf->tx_rate; + ivi->min_tx_rate = 0; + return 0; +} + +/** + * ice_set_vf_spoofchk + * @netdev: network interface device structure + * @vf_id: VF identifier + * @ena: flag to enable or disable feature + * + * Enable or disable VF spoof checking + */ +int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi_ctx ctx = { 0 }; + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_vf *vf; + int status; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + netdev_err(netdev, "invalid VF id: %d\n", vf_id); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + if (ena == vf->spoofchk) { + dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n", + ena ? "ON" : "OFF"); + return 0; + } + + ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); + + if (ena) { + ctx.info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; + ctx.info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M; + } + + status = ice_update_vsi(&pf->hw, vsi->idx, &ctx, NULL); + if (status) { + dev_dbg(&pf->pdev->dev, + "Error %d, failed to update VSI* parameters\n", status); + return -EIO; + } + + vf->spoofchk = ena; + vsi->info.sec_flags = ctx.info.sec_flags; + vsi->info.sw_flags2 = ctx.info.sw_flags2; + + return status; +} + +/** + * ice_set_vf_mac + * @netdev: network interface device structure + * @vf_id: VF identifier + * @mac: mac address + * + * program VF mac address + */ +int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + netdev_err(netdev, "invalid VF id: %d\n", vf_id); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) { + netdev_err(netdev, "%pM not a valid unicast address\n", mac); + return -EINVAL; + } + + /* copy mac into dflt_lan_addr and trigger a VF reset. The reset + * flow will use the updated dflt_lan_addr and add a MAC filter + * using ice_add_mac. Also set pf_set_mac to indicate that the PF has + * set the MAC address for this VF. + */ + ether_addr_copy(vf->dflt_lan_addr.addr, mac); + vf->pf_set_mac = true; + netdev_info(netdev, + "mac on VF %d set to %pM\n. VF driver will be reinitialized\n", + vf_id, mac); + + ice_vc_dis_vf(vf); + return ret; +} + +/** + * ice_set_vf_trust + * @netdev: network interface device structure + * @vf_id: VF identifier + * @trusted: Boolean value to enable/disable trusted VF + * + * Enable or disable a given VF as trusted + */ +int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_vf *vf; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + /* Check if already trusted */ + if (trusted == vf->trusted) + return 0; + + vf->trusted = trusted; + ice_vc_dis_vf(vf); + dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", + vf_id, trusted ? "" : "un"); + + return 0; +} + +/** + * ice_set_vf_link_state + * @netdev: network interface device structure + * @vf_id: VF identifier + * @link_state: required link state + * + * Set VF's link state, irrespective of physical link state status + */ +int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + struct virtchnl_pf_event pfe = { 0 }; + struct ice_link_status *ls; + struct ice_vf *vf; + struct ice_hw *hw; + + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + hw = &pf->hw; + ls = &pf->hw.port_info->phy.link_info; + + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = PF_EVENT_SEVERITY_INFO; + + switch (link_state) { + case IFLA_VF_LINK_STATE_AUTO: + vf->link_forced = false; + vf->link_up = ls->link_info & ICE_AQ_LINK_UP; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->link_forced = true; + vf->link_up = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->link_forced = true; + vf->link_up = false; + break; + default: + return -EINVAL; + } + + if (vf->link_forced) + ice_set_pfe_link_forced(vf, &pfe, vf->link_up); + else + ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up); + + /* Notify the VF of its new link state */ + ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, + sizeof(pfe), NULL); + + return 0; +} diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h new file mode 100644 index 000000000000..10131e0180f9 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_VIRTCHNL_PF_H_ +#define _ICE_VIRTCHNL_PF_H_ +#include "ice.h" + +#define ICE_MAX_VLANID 4095 +#define ICE_VLAN_PRIORITY_S 12 +#define ICE_VLAN_M 0xFFF +#define ICE_PRIORITY_M 0x7000 + +/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */ +#define ICE_MAX_VLAN_PER_VF 8 +#define ICE_MAX_MACADDR_PER_VF 12 + +/* Malicious Driver Detection */ +#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED 3 +#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10 + +/* Static VF transaction/status register def */ +#define VF_DEVICE_STATUS 0xAA +#define VF_TRANS_PENDING_M 0x20 + +/* Specific VF states */ +enum ice_vf_states { + ICE_VF_STATE_INIT = 0, + ICE_VF_STATE_ACTIVE, + ICE_VF_STATE_ENA, + ICE_VF_STATE_DIS, + ICE_VF_STATE_MC_PROMISC, + ICE_VF_STATE_UC_PROMISC, + /* state to indicate if PF needs to do vector assignment for VF. + * This needs to be set during first time VF initialization or later + * when VF asks for more Vectors through virtchnl OP. + */ + ICE_VF_STATE_CFG_INTR, + ICE_VF_STATES_NBITS +}; + +/* VF capabilities */ +enum ice_virtchnl_cap { + ICE_VIRTCHNL_VF_CAP_L2 = 0, + ICE_VIRTCHNL_VF_CAP_PRIVILEGE, +}; + +/* VF information structure */ +struct ice_vf { + struct ice_pf *pf; + + s16 vf_id; /* VF id in the PF space */ + u32 driver_caps; /* reported by VF driver */ + int first_vector_idx; /* first vector index of this VF */ + struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */ + struct virtchnl_version_info vf_ver; + struct virtchnl_ether_addr dflt_lan_addr; + u16 port_vlan_id; + u8 pf_set_mac; /* VF MAC address set by VMM admin */ + u8 trusted; + u16 lan_vsi_idx; /* index into PF struct */ + u16 lan_vsi_num; /* ID as used by firmware */ + u64 num_mdd_events; /* number of mdd events detected */ + u64 num_inval_msgs; /* number of continuous invalid msgs */ + u64 num_valid_msgs; /* number of valid msgs detected */ + unsigned long vf_caps; /* vf's adv. capabilities */ + DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ + unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ + u8 link_forced; + u8 link_up; /* only valid if VF link is forced */ + u8 spoofchk; + u16 num_mac; + u16 num_vlan; + u8 num_req_qs; /* num of queue pairs requested by VF */ +}; + +#ifdef CONFIG_PCI_IOV +void ice_process_vflr_event(struct ice_pf *pf); +int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); +int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); +int ice_get_vf_cfg(struct net_device *netdev, int vf_id, + struct ifla_vf_info *ivi); + +void ice_free_vfs(struct ice_pf *pf); +void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event); +void ice_vc_notify_link_state(struct ice_pf *pf); +void ice_vc_notify_reset(struct ice_pf *pf); +bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr); + +int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, + u16 vlan_id, u8 qos, __be16 vlan_proto); + +int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, + int max_tx_rate); + +int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); + +int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); + +int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); +#else /* CONFIG_PCI_IOV */ +#define ice_process_vflr_event(pf) do {} while (0) +#define ice_free_vfs(pf) do {} while (0) +#define ice_vc_process_vf_msg(pf, event) do {} while (0) +#define ice_vc_notify_link_state(pf) do {} while (0) +#define ice_vc_notify_reset(pf) do {} while (0) + +static inline bool +ice_reset_all_vfs(struct ice_pf __always_unused *pf, + bool __always_unused is_vflr) +{ + return true; +} + +static inline int +ice_sriov_configure(struct pci_dev __always_unused *pdev, + int __always_unused num_vfs) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_mac(struct net_device __always_unused *netdev, + int __always_unused vf_id, u8 __always_unused *mac) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_get_vf_cfg(struct net_device __always_unused *netdev, + int __always_unused vf_id, + struct ifla_vf_info __always_unused *ivi) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_trust(struct net_device __always_unused *netdev, + int __always_unused vf_id, bool __always_unused trusted) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_port_vlan(struct net_device __always_unused *netdev, + int __always_unused vf_id, u16 __always_unused vid, + u8 __always_unused qos, __be16 __always_unused v_proto) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_spoofchk(struct net_device __always_unused *netdev, + int __always_unused vf_id, bool __always_unused ena) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_link_state(struct net_device __always_unused *netdev, + int __always_unused vf_id, int __always_unused link_state) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_bw(struct net_device __always_unused *netdev, + int __always_unused vf_id, int __always_unused min_tx_rate, + int __always_unused max_tx_rate) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_PCI_IOV */ +#endif /* _ICE_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 0796cef96fa3..0d29df8accd8 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -239,7 +239,7 @@ static struct pci_driver igb_driver = { MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index e0c989ffb2b3..820d49eb41ab 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -3011,7 +3011,7 @@ module_exit(igbvf_exit_module); MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); /* netdev.c */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 7722153c4ac2..1d4d1686909a 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -102,7 +102,7 @@ static struct pci_driver ixgb_driver = { MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 5414685189ce..ca6b0c458e4a 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -8,7 +8,8 @@ obj-$(CONFIG_IXGBE) += ixgbe.o ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o + ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \ + ixgbe_xsk.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 4fc906c6166b..7a7679e7be84 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -228,13 +228,17 @@ struct ixgbe_tx_buffer { struct ixgbe_rx_buffer { struct sk_buff *skb; dma_addr_t dma; - struct page *page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 page_offset; -#else - __u16 page_offset; -#endif - __u16 pagecnt_bias; + union { + struct { + struct page *page; + __u32 page_offset; + __u16 pagecnt_bias; + }; + struct { + void *addr; + u64 handle; + }; + }; }; struct ixgbe_queue_stats { @@ -271,6 +275,7 @@ enum ixgbe_ring_state_t { __IXGBE_TX_DETECT_HANG, __IXGBE_HANG_CHECK_ARMED, __IXGBE_TX_XDP_RING, + __IXGBE_TX_DISABLED, }; #define ring_uses_build_skb(ring) \ @@ -347,6 +352,10 @@ struct ixgbe_ring { struct ixgbe_rx_queue_stats rx_stats; }; struct xdp_rxq_info xdp_rxq; + struct xdp_umem *xsk_umem; + struct zero_copy_allocator zca; /* ZC allocator anchor */ + u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */ + u16 rx_buf_len; } ____cacheline_internodealigned_in_smp; enum ixgbe_ring_f_enum { @@ -605,6 +614,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_EEE_ENABLED BIT(15) #define IXGBE_FLAG2_RX_LEGACY BIT(16) #define IXGBE_FLAG2_IPSEC_ENABLED BIT(17) +#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18) /* Tx fast path data */ int num_tx_queues; @@ -763,6 +773,11 @@ struct ixgbe_adapter { #ifdef CONFIG_XFRM_OFFLOAD struct ixgbe_ipsec *ipsec; #endif /* CONFIG_XFRM_OFFLOAD */ + + /* AF_XDP zero-copy */ + struct xdp_umem **xsk_umems; + u16 num_xsk_umems_used; + u16 num_xsk_umems; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) @@ -1003,15 +1018,24 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, struct sk_buff *skb); int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, struct ixgbe_ipsec_tx_data *itd); +void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf); +int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf); +int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf); #else -static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }; -static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }; -static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }; +static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { } +static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { } +static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { } static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) { }; + struct sk_buff *skb) { } static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, - struct ixgbe_ipsec_tx_data *itd) { return 0; }; + struct ixgbe_ipsec_tx_data *itd) { return 0; } +static inline void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, + u32 vf) { } +static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, + u32 *mbuf, u32 vf) { return -EACCES; } +static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, + u32 *mbuf, u32 vf) { return -EACCES; } #endif /* CONFIG_XFRM_OFFLOAD */ #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index e5a8461fe6a9..732b1e6ecc43 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -136,6 +136,8 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0) "legacy-rx", +#define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1) + "vf-ipsec", }; #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) @@ -3409,6 +3411,9 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev) if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX; + if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED) + priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN; + return priv_flags; } @@ -3421,6 +3426,10 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) flags2 |= IXGBE_FLAG2_RX_LEGACY; + flags2 &= ~IXGBE_FLAG2_VF_IPSEC_ENABLED; + if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN) + flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED; + if (flags2 != adapter->flags2) { adapter->flags2 = flags2; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index da4322e4daed..fd1b0546fd67 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -5,6 +5,11 @@ #include <net/xfrm.h> #include <crypto/aead.h> +#define IXGBE_IPSEC_KEY_BITS 160 +static const char aes_gcm_name[] = "rfc4106(gcm(aes))"; + +static void ixgbe_ipsec_del_sa(struct xfrm_state *xs); + /** * ixgbe_ipsec_set_tx_sa - set the Tx SA registers * @hw: hw specific details @@ -113,7 +118,6 @@ static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[]) **/ static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter) { - struct ixgbe_ipsec *ipsec = adapter->ipsec; struct ixgbe_hw *hw = &adapter->hw; u32 buf[4] = {0, 0, 0, 0}; u16 idx; @@ -132,9 +136,6 @@ static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter) ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0); ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0); } - - ipsec->num_rx_sa = 0; - ipsec->num_tx_sa = 0; } /** @@ -290,6 +291,13 @@ static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter) /** * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset * @adapter: board private structure + * + * Reload the HW tables from the SW tables after they've been bashed + * by a chip reset. + * + * Any VF entries are removed from the SW and HW tables since either + * (a) the VF also gets reset on PF reset and will ask again for the + * offloads, or (b) the VF has been removed by a change in the num_vfs. **/ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { @@ -305,6 +313,28 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) ixgbe_ipsec_clear_hw_tables(adapter); ixgbe_ipsec_start_engine(adapter); + /* reload the Rx and Tx keys */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { + struct rx_sa *r = &ipsec->rx_tbl[i]; + struct tx_sa *t = &ipsec->tx_tbl[i]; + + if (r->used) { + if (r->mode & IXGBE_RXTXMOD_VF) + ixgbe_ipsec_del_sa(r->xs); + else + ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi, + r->key, r->salt, + r->mode, r->iptbl_ind); + } + + if (t->used) { + if (t->mode & IXGBE_RXTXMOD_VF) + ixgbe_ipsec_del_sa(t->xs); + else + ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt); + } + } + /* reload the IP addrs */ for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) { struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i]; @@ -312,20 +342,6 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) if (ipsa->used) ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr); } - - /* reload the Rx and Tx keys */ - for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { - struct rx_sa *rsa = &ipsec->rx_tbl[i]; - struct tx_sa *tsa = &ipsec->tx_tbl[i]; - - if (rsa->used) - ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi, - rsa->key, rsa->salt, - rsa->mode, rsa->iptbl_ind); - - if (tsa->used) - ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt); - } } /** @@ -382,6 +398,8 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, rcu_read_lock(); hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, (__force u32)spi) { + if (rsa->mode & IXGBE_RXTXMOD_VF) + continue; if (spi == rsa->xs->id.spi && ((ip4 && *daddr == rsa->xs->id.daddr.a4) || (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, @@ -411,7 +429,6 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, struct net_device *dev = xs->xso.dev; unsigned char *key_data; char *alg_name = NULL; - const char aes_gcm_name[] = "rfc4106(gcm(aes))"; int key_len; if (!xs->aead) { @@ -439,9 +456,9 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, * we don't need to do any byteswapping. * 160 accounts for 16 byte key and 4 byte salt */ - if (key_len == 160) { + if (key_len == IXGBE_IPSEC_KEY_BITS) { *mysalt = ((u32 *)key_data)[4]; - } else if (key_len != 128) { + } else if (key_len != (IXGBE_IPSEC_KEY_BITS - (sizeof(*mysalt) * 8))) { netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n"); return -EINVAL; } else { @@ -676,6 +693,9 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) } else { struct tx_sa tsa; + if (adapter->num_vfs) + return -EOPNOTSUPP; + /* find the first unused index */ ret = ixgbe_ipsec_find_empty_idx(ipsec, false); if (ret < 0) { @@ -811,6 +831,226 @@ static const struct xfrmdev_ops ixgbe_xfrmdev_ops = { }; /** + * ixgbe_ipsec_vf_clear - clear the tables of data for a VF + * @adapter: board private structure + * @vf: VF id to be removed + **/ +void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_ipsec *ipsec = adapter->ipsec; + int i; + + /* search rx sa table */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) { + if (!ipsec->rx_tbl[i].used) + continue; + if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF && + ipsec->rx_tbl[i].vf == vf) + ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs); + } + + /* search tx sa table */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_tx_sa; i++) { + if (!ipsec->tx_tbl[i].used) + continue; + if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF && + ipsec->tx_tbl[i].vf == vf) + ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs); + } +} + +/** + * ixgbe_ipsec_vf_add_sa - translate VF request to SA add + * @adapter: board private structure + * @msgbuf: The message buffer + * @vf: the VF index + * + * Make up a new xs and algorithm info from the data sent by the VF. + * We only need to sketch in just enough to set up the HW offload. + * Put the resulting offload_handle into the return message to the VF. + * + * Returns 0 or error value + **/ +int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct xfrm_algo_desc *algo; + struct sa_mbx_msg *sam; + struct xfrm_state *xs; + size_t aead_len; + u16 sa_idx; + u32 pfsa; + int err; + + sam = (struct sa_mbx_msg *)(&msgbuf[1]); + if (!adapter->vfinfo[vf].trusted || + !(adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)) { + e_warn(drv, "VF %d attempted to add an IPsec SA\n", vf); + err = -EACCES; + goto err_out; + } + + /* Tx IPsec offload doesn't seem to work on this + * device, so block these requests for now. + */ + if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) { + err = -EOPNOTSUPP; + goto err_out; + } + + xs = kzalloc(sizeof(*xs), GFP_KERNEL); + if (unlikely(!xs)) { + err = -ENOMEM; + goto err_out; + } + + xs->xso.flags = sam->flags; + xs->id.spi = sam->spi; + xs->id.proto = sam->proto; + xs->props.family = sam->family; + if (xs->props.family == AF_INET6) + memcpy(&xs->id.daddr.a6, sam->addr, sizeof(xs->id.daddr.a6)); + else + memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4)); + xs->xso.dev = adapter->netdev; + + algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1); + if (unlikely(!algo)) { + err = -ENOENT; + goto err_xs; + } + + aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8; + xs->aead = kzalloc(aead_len, GFP_KERNEL); + if (unlikely(!xs->aead)) { + err = -ENOMEM; + goto err_xs; + } + + xs->props.ealgo = algo->desc.sadb_alg_id; + xs->geniv = algo->uinfo.aead.geniv; + xs->aead->alg_icv_len = IXGBE_IPSEC_AUTH_BITS; + xs->aead->alg_key_len = IXGBE_IPSEC_KEY_BITS; + memcpy(xs->aead->alg_key, sam->key, sizeof(sam->key)); + memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name)); + + /* set up the HW offload */ + err = ixgbe_ipsec_add_sa(xs); + if (err) + goto err_aead; + + pfsa = xs->xso.offload_handle; + if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) { + sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX; + ipsec->rx_tbl[sa_idx].vf = vf; + ipsec->rx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF; + } else { + sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX; + ipsec->tx_tbl[sa_idx].vf = vf; + ipsec->tx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF; + } + + msgbuf[1] = xs->xso.offload_handle; + + return 0; + +err_aead: + memset(xs->aead, 0, sizeof(*xs->aead)); + kfree(xs->aead); +err_xs: + memset(xs, 0, sizeof(*xs)); + kfree(xs); +err_out: + msgbuf[1] = err; + return err; +} + +/** + * ixgbe_ipsec_vf_del_sa - translate VF request to SA delete + * @adapter: board private structure + * @msgbuf: The message buffer + * @vf: the VF index + * + * Given the offload_handle sent by the VF, look for the related SA table + * entry and use its xs field to call for a delete of the SA. + * + * Note: We silently ignore requests to delete entries that are already + * set to unused because when a VF is set to "DOWN", the PF first + * gets a reset and clears all the VF's entries; then the VF's + * XFRM stack sends individual deletes for each entry, which the + * reset already removed. In the future it might be good to try to + * optimize this so not so many unnecessary delete messages are sent. + * + * Returns 0 or error value + **/ +int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct xfrm_state *xs; + u32 pfsa = msgbuf[1]; + u16 sa_idx; + + if (!adapter->vfinfo[vf].trusted) { + e_err(drv, "vf %d attempted to delete an SA\n", vf); + return -EPERM; + } + + if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) { + struct rx_sa *rsa; + + sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX; + if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) { + e_err(drv, "vf %d SA index %d out of range\n", + vf, sa_idx); + return -EINVAL; + } + + rsa = &ipsec->rx_tbl[sa_idx]; + + if (!rsa->used) + return 0; + + if (!(rsa->mode & IXGBE_RXTXMOD_VF) || + rsa->vf != vf) { + e_err(drv, "vf %d bad Rx SA index %d\n", vf, sa_idx); + return -ENOENT; + } + + xs = ipsec->rx_tbl[sa_idx].xs; + } else { + struct tx_sa *tsa; + + sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX; + if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) { + e_err(drv, "vf %d SA index %d out of range\n", + vf, sa_idx); + return -EINVAL; + } + + tsa = &ipsec->tx_tbl[sa_idx]; + + if (!tsa->used) + return 0; + + if (!(tsa->mode & IXGBE_RXTXMOD_VF) || + tsa->vf != vf) { + e_err(drv, "vf %d bad Tx SA index %d\n", vf, sa_idx); + return -ENOENT; + } + + xs = ipsec->tx_tbl[sa_idx].xs; + } + + ixgbe_ipsec_del_sa(xs); + + /* remove the xs that was made-up in the add request */ + memset(xs, 0, sizeof(*xs)); + kfree(xs); + + return 0; +} + +/** * ixgbe_ipsec_tx - setup Tx flags for ipsec offload * @tx_ring: outgoing context * @first: current data packet diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h index 9ef7faadda69..d2b64ff8eb4e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h @@ -26,6 +26,7 @@ enum ixgbe_ipsec_tbl_sel { #define IXGBE_RXMOD_PROTO_ESP 0x00000004 #define IXGBE_RXMOD_DECRYPT 0x00000008 #define IXGBE_RXMOD_IPV6 0x00000010 +#define IXGBE_RXTXMOD_VF 0x00000020 struct rx_sa { struct hlist_node hlist; @@ -37,6 +38,7 @@ struct rx_sa { u8 iptbl_ind; bool used; bool decrypt; + u32 vf; }; struct rx_ip_sa { @@ -49,8 +51,10 @@ struct tx_sa { struct xfrm_state *xs; u32 key[4]; u32 salt; + u32 mode; bool encrypt; bool used; + u32 vf; }; struct ixgbe_ipsec_tx_data { @@ -67,4 +71,13 @@ struct ixgbe_ipsec { struct tx_sa *tx_tbl; DECLARE_HASHTABLE(rx_sa_list, 10); }; + +struct sa_mbx_msg { + __be32 spi; + u8 flags; + u8 proto; + u16 family; + __be32 addr[4]; + u32 key[5]; +}; #endif /* _IXGBE_IPSEC_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index d361f570ca37..62e6499e4146 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1055,7 +1055,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) int txr_remaining = adapter->num_tx_queues; int xdp_remaining = adapter->num_xdp_queues; int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; - int err; + int err, i; /* only one q_vector if MSI-X is disabled. */ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) @@ -1097,6 +1097,21 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) xdp_idx += xqpv; } + for (i = 0; i < adapter->num_rx_queues; i++) { + if (adapter->rx_ring[i]) + adapter->rx_ring[i]->ring_idx = i; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]) + adapter->tx_ring[i]->ring_idx = i; + } + + for (i = 0; i < adapter->num_xdp_queues; i++) { + if (adapter->xdp_ring[i]) + adapter->xdp_ring[i]->ring_idx = i; + } + return 0; err_out: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6cdd58d9d461..51268772a999 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -34,12 +34,14 @@ #include <net/tc_act/tc_mirred.h> #include <net/vxlan.h> #include <net/mpls.h> +#include <net/xdp_sock.h> #include "ixgbe.h" #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_sriov.h" #include "ixgbe_model.h" +#include "ixgbe_txrx_common.h" char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = @@ -159,7 +161,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); static struct workqueue_struct *ixgbe_wq; @@ -893,8 +895,8 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, } } -static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, - u64 qmask) +void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, + u64 qmask) { u32 mask; @@ -1673,9 +1675,9 @@ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, * order to populate the hash, checksum, VLAN, timestamp, protocol, and * other fields within the skb. **/ -static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) +void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { struct net_device *dev = rx_ring->netdev; u32 flags = rx_ring->q_vector->adapter->flags; @@ -1708,8 +1710,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, skb->protocol = eth_type_trans(skb, dev); } -static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb) +void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) { napi_gro_receive(&q_vector->napi, skb); } @@ -1868,9 +1870,9 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, * * Returns true if an error was encountered and skb was freed. **/ -static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) +bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { struct net_device *netdev = rx_ring->netdev; @@ -2186,14 +2188,6 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, return skb; } -#define IXGBE_XDP_PASS 0 -#define IXGBE_XDP_CONSUMED BIT(0) -#define IXGBE_XDP_TX BIT(1) -#define IXGBE_XDP_REDIR BIT(2) - -static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, - struct xdp_frame *xdpf); - static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, struct xdp_buff *xdp) @@ -3167,7 +3161,11 @@ int ixgbe_poll(struct napi_struct *napi, int budget) #endif ixgbe_for_each_ring(ring, q_vector->tx) { - if (!ixgbe_clean_tx_irq(q_vector, ring, budget)) + bool wd = ring->xsk_umem ? + ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) : + ixgbe_clean_tx_irq(q_vector, ring, budget); + + if (!wd) clean_complete = false; } @@ -3183,7 +3181,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget) per_ring_budget = budget; ixgbe_for_each_ring(ring, q_vector->rx) { - int cleaned = ixgbe_clean_rx_irq(q_vector, ring, + int cleaned = ring->xsk_umem ? + ixgbe_clean_rx_irq_zc(q_vector, ring, + per_ring_budget) : + ixgbe_clean_rx_irq(q_vector, ring, per_ring_budget); work_done += cleaned; @@ -3475,6 +3476,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, u32 txdctl = IXGBE_TXDCTL_ENABLE; u8 reg_idx = ring->reg_idx; + ring->xsk_umem = NULL; + if (ring_is_xdp(ring)) + ring->xsk_umem = ixgbe_xsk_umem(adapter, ring); + /* disable queue to avoid issues while updating state */ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); IXGBE_WRITE_FLUSH(hw); @@ -3579,12 +3584,18 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) else mtqc |= IXGBE_MTQC_64VF; } else { - if (tcs > 4) + if (tcs > 4) { mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; - else if (tcs > 1) + } else if (tcs > 1) { mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; - else - mtqc = IXGBE_MTQC_64Q_1PB; + } else { + u8 max_txq = adapter->num_tx_queues + + adapter->num_xdp_queues; + if (max_txq > 63) + mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + else + mtqc = IXGBE_MTQC_64Q_1PB; + } } IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); @@ -3707,10 +3718,27 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; /* configure the packet buffer length */ - if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) + if (rx_ring->xsk_umem) { + u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr - + XDP_PACKET_HEADROOM; + + /* If the MAC support setting RXDCTL.RLPML, the + * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and + * RXDCTL.RLPML is set to the actual UMEM buffer + * size. If not, then we are stuck with a 1k buffer + * size resolution. In this case frames larger than + * the UMEM buffer size viewed in a 1k resolution will + * be dropped. + */ + if (hw->mac.type != ixgbe_mac_82599EB) + srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) { srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; - else + } else { srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + } /* configure descriptor type */ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; @@ -4033,6 +4061,19 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, u32 rxdctl; u8 reg_idx = ring->reg_idx; + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + ring->xsk_umem = ixgbe_xsk_umem(adapter, ring); + if (ring->xsk_umem) { + ring->zca.free = ixgbe_zca_free; + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_ZERO_COPY, + &ring->zca)); + + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); + } + /* disable queue to avoid use of these values while updating state */ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); rxdctl &= ~IXGBE_RXDCTL_ENABLE; @@ -4082,6 +4123,17 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, #endif } + if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) { + u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr - + XDP_PACKET_HEADROOM; + + rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | + IXGBE_RXDCTL_RLPML_EN); + rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN; + + ring->rx_buf_len = xsk_buf_len; + } + /* initialize rx_buffer_info */ memset(ring->rx_buffer_info, 0, sizeof(struct ixgbe_rx_buffer) * ring->count); @@ -4095,7 +4147,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); - ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); + if (ring->xsk_umem) + ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring)); + else + ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); } static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) @@ -5175,6 +5230,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; struct ixgbe_fdir_filter *filter; + u64 action; spin_lock(&adapter->fdir_perfect_lock); @@ -5183,12 +5239,17 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { + action = filter->action; + if (action != IXGBE_FDIR_DROP_QUEUE && action != 0) + action = + (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1; + ixgbe_fdir_write_perfect_filter_82599(hw, &filter->filter, filter->sw_idx, - (filter->action == IXGBE_FDIR_DROP_QUEUE) ? + (action == IXGBE_FDIR_DROP_QUEUE) ? IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[filter->action]->reg_idx); + adapter->rx_ring[action]->reg_idx); } spin_unlock(&adapter->fdir_perfect_lock); @@ -5203,6 +5264,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) u16 i = rx_ring->next_to_clean; struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + if (rx_ring->xsk_umem) { + ixgbe_xsk_clean_rx_ring(rx_ring); + goto skip_free; + } + /* Free all the Rx ring sk_buffs */ while (i != rx_ring->next_to_alloc) { if (rx_buffer->skb) { @@ -5241,6 +5307,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) } } +skip_free: rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -5885,6 +5952,11 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) u16 i = tx_ring->next_to_clean; struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + if (tx_ring->xsk_umem) { + ixgbe_xsk_clean_tx_ring(tx_ring); + goto out; + } + while (i != tx_ring->next_to_use) { union ixgbe_adv_tx_desc *eop_desc, *tx_desc; @@ -5936,6 +6008,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) if (!ring_is_xdp(tx_ring)) netdev_tx_reset_queue(txring_txq(tx_ring)); +out: /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; @@ -6436,7 +6509,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, struct device *dev = rx_ring->dev; int orig_node = dev_to_node(dev); int ring_node = -1; - int size, err; + int size; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; @@ -6473,13 +6546,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, rx_ring->queue_index) < 0) goto err; - err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL); - if (err) { - xdp_rxq_info_unreg(&rx_ring->xdp_rxq); - goto err; - } - rx_ring->xdp_prog = adapter->xdp_prog; return 0; @@ -7777,6 +7843,33 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) } /** + * ixgbe_check_fw_error - Check firmware for errors + * @adapter: the adapter private structure + * + * Check firmware errors in register FWSM + */ +static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 fwsm; + + /* read fwsm.ext_err_ind register and log errors */ + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + + if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK || + !(fwsm & IXGBE_FWSM_FW_VAL_BIT)) + e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n", + fwsm); + + if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { + e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); + return true; + } + + return false; +} + +/** * ixgbe_service_task - manages and runs subtasks * @work: pointer to work_struct containing our data **/ @@ -7794,6 +7887,15 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); return; } + if (ixgbe_check_fw_error(adapter)) { + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + rtnl_lock(); + unregister_netdev(adapter->netdev); + rtnl_unlock(); + } + ixgbe_service_event_complete(adapter); + return; + } if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { rtnl_lock(); adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; @@ -8068,9 +8170,6 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) return __ixgbe_maybe_stop_tx(tx_ring, size); } -#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ - IXGBE_TXD_CMD_RS) - static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, const u8 hdr_len) @@ -8423,8 +8522,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, } #endif -static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, - struct xdp_frame *xdpf) +int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, + struct xdp_frame *xdpf) { struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; struct ixgbe_tx_buffer *tx_buffer; @@ -8646,6 +8745,8 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; + if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state))) + return NETDEV_TX_BUSY; return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); } @@ -10157,12 +10258,19 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) xdp->prog_id = adapter->xdp_prog ? adapter->xdp_prog->aux->id : 0; return 0; + case XDP_QUERY_XSK_UMEM: + return ixgbe_xsk_umem_query(adapter, &xdp->xsk.umem, + xdp->xsk.queue_id); + case XDP_SETUP_XSK_UMEM: + return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem, + xdp->xsk.queue_id); + default: return -EINVAL; } } -static void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) +void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) { /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. @@ -10192,6 +10300,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, if (unlikely(!ring)) return -ENXIO; + if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state))) + return -ENXIO; + for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; @@ -10253,8 +10364,162 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_features_check = ixgbe_features_check, .ndo_bpf = ixgbe_xdp, .ndo_xdp_xmit = ixgbe_xdp_xmit, + .ndo_xsk_async_xmit = ixgbe_xsk_async_xmit, }; +static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + unsigned long wait_delay, delay_interval; + struct ixgbe_hw *hw = &adapter->hw; + u8 reg_idx = tx_ring->reg_idx; + int wait_loop; + u32 txdctl; + + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + + /* delay mechanism from ixgbe_disable_tx */ + delay_interval = ixgbe_get_completion_timeout(adapter) / 100; + + wait_loop = IXGBE_MAX_RX_DESC_POLL; + wait_delay = delay_interval; + + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + + if (!(txdctl & IXGBE_TXDCTL_ENABLE)) + return; + } + + e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n"); +} + +static void ixgbe_disable_txr(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + set_bit(__IXGBE_TX_DISABLED, &tx_ring->state); + ixgbe_disable_txr_hw(adapter, tx_ring); +} + +static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) +{ + unsigned long wait_delay, delay_interval; + struct ixgbe_hw *hw = &adapter->hw; + u8 reg_idx = rx_ring->reg_idx; + int wait_loop; + u32 rxdctl; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + rxdctl |= IXGBE_RXDCTL_SWFLSH; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + + /* RXDCTL.EN may not change on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + /* delay mechanism from ixgbe_disable_rx */ + delay_interval = ixgbe_get_completion_timeout(adapter) / 100; + + wait_loop = IXGBE_MAX_RX_DESC_POLL; + wait_delay = delay_interval; + + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + + if (!(rxdctl & IXGBE_RXDCTL_ENABLE)) + return; + } + + e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n"); +} + +static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring) +{ + memset(&tx_ring->stats, 0, sizeof(tx_ring->stats)); + memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); +} + +static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring) +{ + memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); + memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); +} + +/** + * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings + * @adapter: adapter structure + * @ring: ring index + * + * This function disables a certain Rx/Tx/XDP Tx ring. The function + * assumes that the netdev is running. + **/ +void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring) +{ + struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; + + rx_ring = adapter->rx_ring[ring]; + tx_ring = adapter->tx_ring[ring]; + xdp_ring = adapter->xdp_ring[ring]; + + ixgbe_disable_txr(adapter, tx_ring); + if (xdp_ring) + ixgbe_disable_txr(adapter, xdp_ring); + ixgbe_disable_rxr_hw(adapter, rx_ring); + + if (xdp_ring) + synchronize_sched(); + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_disable(&rx_ring->q_vector->napi); + + ixgbe_clean_tx_ring(tx_ring); + if (xdp_ring) + ixgbe_clean_tx_ring(xdp_ring); + ixgbe_clean_rx_ring(rx_ring); + + ixgbe_reset_txr_stats(tx_ring); + if (xdp_ring) + ixgbe_reset_txr_stats(xdp_ring); + ixgbe_reset_rxr_stats(rx_ring); +} + +/** + * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings + * @adapter: adapter structure + * @ring: ring index + * + * This function enables a certain Rx/Tx/XDP Tx ring. The function + * assumes that the netdev is running. + **/ +void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) +{ + struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; + + rx_ring = adapter->rx_ring[ring]; + tx_ring = adapter->tx_ring[ring]; + xdp_ring = adapter->xdp_ring[ring]; + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_enable(&rx_ring->q_vector->napi); + + ixgbe_configure_tx_ring(adapter, tx_ring); + if (xdp_ring) + ixgbe_configure_tx_ring(adapter, xdp_ring); + ixgbe_configure_rx_ring(adapter, rx_ring); + + clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state); + clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); +} + /** * ixgbe_enumerate_functions - Get the number of ports this device has * @adapter: adapter structure @@ -10693,6 +10958,11 @@ skip_sriov: if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) netdev->features |= NETIF_F_LRO; + if (ixgbe_check_fw_error(adapter)) { + err = -EIO; + goto err_sw_init; + } + /* make sure the EEPROM is good */ if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { e_dev_err("The EEPROM Checksum Is Not Valid\n"); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index e085b6520dac..a148534d7256 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -50,6 +50,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; @@ -80,6 +81,10 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_UPDATE_XCAST_MODE 0x0c +/* mailbox API, version 1.4 VF requests */ +#define IXGBE_VF_IPSEC_ADD 0x0d +#define IXGBE_VF_IPSEC_DEL 0x0e + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 3c6f01c41b78..af25a8fffeb8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -496,6 +496,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: /* Version 1.1 supports jumbo frames on VFs if PF has * jumbo frames enabled which means legacy VFs are * disabled @@ -728,6 +729,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) /* reset multicast table array for vf */ adapter->vfinfo[vf].num_vf_mc_hashes = 0; + /* clear any ipsec table info */ + ixgbe_ipsec_vf_clear(adapter, vf); + /* Flush and reset the mta with the new values */ ixgbe_set_rx_mode(adapter->netdev); @@ -1000,6 +1004,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: adapter->vfinfo[vf].vf_api = api; return 0; default: @@ -1025,6 +1030,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: break; default: return -1; @@ -1065,6 +1071,7 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) /* verify the PF is supporting the correct API */ switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: break; @@ -1097,6 +1104,7 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, /* verify the PF is supporting the correct API */ switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: break; @@ -1122,8 +1130,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, /* promisc introduced in 1.3 version */ if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) return -EOPNOTSUPP; - /* Fall threw */ + /* Fall through */ case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: break; default: return -EOPNOTSUPP; @@ -1249,6 +1258,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_UPDATE_XCAST_MODE: retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); break; + case IXGBE_VF_IPSEC_ADD: + retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf); + break; + case IXGBE_VF_IPSEC_DEL: + retval = ixgbe_ipsec_vf_del_sa(adapter, msgbuf, vf); + break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); retval = IXGBE_ERR_MBX; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h new file mode 100644 index 000000000000..53d4089f5644 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2018 Intel Corporation. */ + +#ifndef _IXGBE_TXRX_COMMON_H_ +#define _IXGBE_TXRX_COMMON_H_ + +#define IXGBE_XDP_PASS 0 +#define IXGBE_XDP_CONSUMED BIT(0) +#define IXGBE_XDP_TX BIT(1) +#define IXGBE_XDP_REDIR BIT(2) + +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ + IXGBE_TXD_CMD_RS) + +int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, + struct xdp_frame *xdpf); +bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); +void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); +void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb); +void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring); +void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask); + +void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring); +void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring); + +struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring); +int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem, + u16 qid); +int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem, + u16 qid); + +void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle); + +void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count); +int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, + const int budget); +void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring); +bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *tx_ring, int napi_budget); +int ixgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id); +void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring); + +#endif /* #define _IXGBE_TXRX_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 41bcbb337e83..84f2dba39e36 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -924,6 +924,9 @@ struct ixgbe_nvm_version { /* Firmware Semaphore Register */ #define IXGBE_FWSM_MODE_MASK 0xE #define IXGBE_FWSM_FW_MODE_PT 0x4 +#define IXGBE_FWSM_FW_NVM_RECOVERY_MODE BIT(5) +#define IXGBE_FWSM_EXT_ERR_IND_MASK 0x01F80000 +#define IXGBE_FWSM_FW_VAL_BIT BIT(15) /* ARC Subsystem registers */ #define IXGBE_HICR 0x15F00 @@ -3461,6 +3464,7 @@ struct ixgbe_mac_operations { const char *); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + bool (*fw_recovery_mode)(struct ixgbe_hw *hw); void (*disable_rx)(struct ixgbe_hw *hw); void (*enable_rx)(struct ixgbe_hw *hw); void (*set_source_address_pruning)(struct ixgbe_hw *, bool, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index a8148c7126e5..10dbaf4f6e80 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1247,6 +1247,20 @@ static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) return 0; } +/** + * ixgbe_fw_recovery_mode - Check FW NVM recovery mode + * @hw: pointer t hardware structure + * + * Returns true if in FW NVM recovery mode. + */ +static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw) +{ + u32 fwsm; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE); +} + /** ixgbe_disable_rx_x550 - Disable RX unit * * Enables the Rx DMA unit for x550 @@ -3816,6 +3830,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ .get_thermal_sensor_data = NULL, \ .init_thermal_sensor_thresh = NULL, \ + .fw_recovery_mode = &ixgbe_fw_recovery_mode_X550, \ .enable_rx = &ixgbe_enable_rx_generic, \ .disable_rx = &ixgbe_disable_rx_x550, \ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c new file mode 100644 index 000000000000..65c3e2c979d4 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -0,0 +1,801 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2018 Intel Corporation. */ + +#include <linux/bpf_trace.h> +#include <net/xdp_sock.h> +#include <net/xdp.h> + +#include "ixgbe.h" +#include "ixgbe_txrx_common.h" + +struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + bool xdp_on = READ_ONCE(adapter->xdp_prog); + int qid = ring->ring_idx; + + if (!adapter->xsk_umems || !adapter->xsk_umems[qid] || + qid >= adapter->num_xsk_umems || !xdp_on) + return NULL; + + return adapter->xsk_umems[qid]; +} + +static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter) +{ + if (adapter->xsk_umems) + return 0; + + adapter->num_xsk_umems_used = 0; + adapter->num_xsk_umems = adapter->num_rx_queues; + adapter->xsk_umems = kcalloc(adapter->num_xsk_umems, + sizeof(*adapter->xsk_umems), + GFP_KERNEL); + if (!adapter->xsk_umems) { + adapter->num_xsk_umems = 0; + return -ENOMEM; + } + + return 0; +} + +static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter, + struct xdp_umem *umem, + u16 qid) +{ + int err; + + err = ixgbe_alloc_xsk_umems(adapter); + if (err) + return err; + + adapter->xsk_umems[qid] = umem; + adapter->num_xsk_umems_used++; + + return 0; +} + +static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid) +{ + adapter->xsk_umems[qid] = NULL; + adapter->num_xsk_umems_used--; + + if (adapter->num_xsk_umems == 0) { + kfree(adapter->xsk_umems); + adapter->xsk_umems = NULL; + adapter->num_xsk_umems = 0; + } +} + +static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter, + struct xdp_umem *umem) +{ + struct device *dev = &adapter->pdev->dev; + unsigned int i, j; + dma_addr_t dma; + + for (i = 0; i < umem->npgs; i++) { + dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, + DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); + if (dma_mapping_error(dev, dma)) + goto out_unmap; + + umem->pages[i].dma = dma; + } + + return 0; + +out_unmap: + for (j = 0; j < i; j++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); + umem->pages[i].dma = 0; + } + + return -1; +} + +static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter, + struct xdp_umem *umem) +{ + struct device *dev = &adapter->pdev->dev; + unsigned int i; + + for (i = 0; i < umem->npgs; i++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); + + umem->pages[i].dma = 0; + } +} + +static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, + struct xdp_umem *umem, + u16 qid) +{ + struct xdp_umem_fq_reuse *reuseq; + bool if_running; + int err; + + if (qid >= adapter->num_rx_queues) + return -EINVAL; + + if (adapter->xsk_umems) { + if (qid >= adapter->num_xsk_umems) + return -EINVAL; + if (adapter->xsk_umems[qid]) + return -EBUSY; + } + + reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count); + if (!reuseq) + return -ENOMEM; + + xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); + + err = ixgbe_xsk_umem_dma_map(adapter, umem); + if (err) + return err; + + if_running = netif_running(adapter->netdev) && + READ_ONCE(adapter->xdp_prog); + + if (if_running) + ixgbe_txrx_ring_disable(adapter, qid); + + err = ixgbe_add_xsk_umem(adapter, umem, qid); + + if (if_running) + ixgbe_txrx_ring_enable(adapter, qid); + + return err; +} + +static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) +{ + bool if_running; + + if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems || + !adapter->xsk_umems[qid]) + return -EINVAL; + + if_running = netif_running(adapter->netdev) && + READ_ONCE(adapter->xdp_prog); + + if (if_running) + ixgbe_txrx_ring_disable(adapter, qid); + + ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]); + ixgbe_remove_xsk_umem(adapter, qid); + + if (if_running) + ixgbe_txrx_ring_enable(adapter, qid); + + return 0; +} + +int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem, + u16 qid) +{ + if (qid >= adapter->num_rx_queues) + return -EINVAL; + + if (adapter->xsk_umems) { + if (qid >= adapter->num_xsk_umems) + return -EINVAL; + *umem = adapter->xsk_umems[qid]; + return 0; + } + + *umem = NULL; + return 0; +} + +int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem, + u16 qid) +{ + return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) : + ixgbe_xsk_umem_disable(adapter, qid); +} + +static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + struct xdp_buff *xdp) +{ + int err, result = IXGBE_XDP_PASS; + struct bpf_prog *xdp_prog; + struct xdp_frame *xdpf; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + act = bpf_prog_run_xdp(xdp_prog, xdp); + xdp->handle += xdp->data - xdp->data_hard_start; + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdpf = convert_to_xdp_frame(xdp); + if (unlikely(!xdpf)) { + result = IXGBE_XDP_CONSUMED; + break; + } + result = ixgbe_xmit_xdp_ring(adapter, xdpf); + break; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED; + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fallthrough */ + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + /* fallthrough -- handle aborts by dropping packet */ + case XDP_DROP: + result = IXGBE_XDP_CONSUMED; + break; + } + rcu_read_unlock(); + return result; +} + +static struct +ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring, + unsigned int size) +{ + struct ixgbe_rx_buffer *bi; + + bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + bi->dma, 0, + size, + DMA_BIDIRECTIONAL); + + return bi; +} + +static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *obi) +{ + unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; + u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; + u16 nta = rx_ring->next_to_alloc; + struct ixgbe_rx_buffer *nbi; + + nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc]; + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + nbi->dma = obi->dma & mask; + nbi->dma += hr; + + nbi->addr = (void *)((unsigned long)obi->addr & mask); + nbi->addr += hr; + + nbi->handle = obi->handle & mask; + nbi->handle += rx_ring->xsk_umem->headroom; + + obi->addr = NULL; + obi->skb = NULL; +} + +void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) +{ + struct ixgbe_rx_buffer *bi; + struct ixgbe_ring *rx_ring; + u64 hr, mask; + u16 nta; + + rx_ring = container_of(alloc, struct ixgbe_ring, zca); + hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; + mask = rx_ring->xsk_umem->chunk_mask; + + nta = rx_ring->next_to_alloc; + bi = rx_ring->rx_buffer_info; + + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + handle &= mask; + + bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); + bi->addr += hr; + + bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; +} + +static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi) +{ + struct xdp_umem *umem = rx_ring->xsk_umem; + void *addr = bi->addr; + u64 handle, hr; + + if (addr) + return true; + + if (!xsk_umem_peek_addr(umem, &handle)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(umem, handle); + bi->addr += hr; + + bi->handle = handle + umem->headroom; + + xsk_umem_discard_addr(umem); + return true; +} + +static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi) +{ + struct xdp_umem *umem = rx_ring->xsk_umem; + u64 handle, hr; + + if (!xsk_umem_peek_addr_rq(umem, &handle)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + handle &= rx_ring->xsk_umem->chunk_mask; + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(umem, handle); + bi->addr += hr; + + bi->handle = handle + umem->headroom; + + xsk_umem_discard_addr_rq(umem); + return true; +} + +static __always_inline bool +__ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count, + bool alloc(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi)) +{ + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + bool ok = true; + + /* nothing to do */ + if (!cleaned_count) + return true; + + rx_desc = IXGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (!alloc(rx_ring, bi)) { + ok = false; + break; + } + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + rx_ring->rx_buf_len, + DMA_BIDIRECTIONAL); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IXGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } + + return ok; +} + +void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) +{ + __ixgbe_alloc_rx_buffers_zc(rx_ring, count, + ixgbe_alloc_buffer_slow_zc); +} + +static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring, + u16 count) +{ + return __ixgbe_alloc_rx_buffers_zc(rx_ring, count, + ixgbe_alloc_buffer_zc); +} + +static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + struct sk_buff *skb; + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + xdp->data_end - xdp->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp->data, datasize); + if (metasize) + skb_metadata_set(skb, metasize); + + ixgbe_reuse_rx_buffer_zc(rx_ring, bi); + return skb; +} + +static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + prefetch(IXGBE_RX_DESC(rx_ring, ntc)); +} + +int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, + const int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct ixgbe_adapter *adapter = q_vector->adapter; + u16 cleaned_count = ixgbe_desc_unused(rx_ring); + unsigned int xdp_res, xdp_xmit = 0; + bool failure = false; + struct sk_buff *skb; + struct xdp_buff xdp; + + xdp.rxq = &rx_ring->xdp_rxq; + + while (likely(total_rx_packets < budget)) { + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *bi; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { + failure = failure || + !ixgbe_alloc_rx_buffers_fast_zc(rx_ring, + cleaned_count); + cleaned_count = 0; + } + + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + bi = ixgbe_get_rx_buffer_zc(rx_ring, size); + + if (unlikely(!ixgbe_test_staterr(rx_desc, + IXGBE_RXD_STAT_EOP))) { + struct ixgbe_rx_buffer *next_bi; + + ixgbe_reuse_rx_buffer_zc(rx_ring, bi); + ixgbe_inc_ntc(rx_ring); + next_bi = + &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + next_bi->skb = ERR_PTR(-EINVAL); + continue; + } + + if (unlikely(bi->skb)) { + ixgbe_reuse_rx_buffer_zc(rx_ring, bi); + ixgbe_inc_ntc(rx_ring); + continue; + } + + xdp.data = bi->addr; + xdp.data_meta = xdp.data; + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; + xdp.data_end = xdp.data + size; + xdp.handle = bi->handle; + + xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp); + + if (xdp_res) { + if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { + xdp_xmit |= xdp_res; + bi->addr = NULL; + bi->skb = NULL; + } else { + ixgbe_reuse_rx_buffer_zc(rx_ring, bi); + } + total_rx_packets++; + total_rx_bytes += size; + + cleaned_count++; + ixgbe_inc_ntc(rx_ring); + continue; + } + + /* XDP_PASS path */ + skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp); + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + break; + } + + cleaned_count++; + ixgbe_inc_ntc(rx_ring); + + if (eth_skb_pad(skb)) + continue; + + total_rx_bytes += skb->len; + total_rx_packets++; + + ixgbe_process_skb_fields(rx_ring, rx_desc, skb); + ixgbe_rx_skb(q_vector, skb); + } + + if (xdp_xmit & IXGBE_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & IXGBE_XDP_TX) { + struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); + } + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return failure ? budget : (int)total_rx_packets; +} + +void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i]; + + while (i != rx_ring->next_to_alloc) { + xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle); + i++; + bi++; + if (i == rx_ring->count) { + i = 0; + bi = rx_ring->rx_buffer_info; + } + } +} + +static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) +{ + union ixgbe_adv_tx_desc *tx_desc = NULL; + struct ixgbe_tx_buffer *tx_bi; + bool work_done = true; + u32 len, cmd_type; + dma_addr_t dma; + + while (budget-- > 0) { + if (unlikely(!ixgbe_desc_unused(xdp_ring))) { + work_done = false; + break; + } + + if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len)) + break; + + dma_sync_single_for_device(xdp_ring->dev, dma, len, + DMA_BIDIRECTIONAL); + + tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; + tx_bi->bytecount = len; + tx_bi->xdpf = NULL; + + tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + /* put descriptor type bits */ + cmd_type = IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_DEXT | + IXGBE_ADVTXD_DCMD_IFCS; + cmd_type |= len | IXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT); + + xdp_ring->next_to_use++; + if (xdp_ring->next_to_use == xdp_ring->count) + xdp_ring->next_to_use = 0; + } + + if (tx_desc) { + ixgbe_xdp_ring_update_tail(xdp_ring); + xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + } + + return !!budget && work_done; +} + +static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *tx_bi) +{ + xdp_return_frame(tx_bi->xdpf); + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_bi, dma), + dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_bi, len, 0); +} + +bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *tx_ring, int napi_budget) +{ + unsigned int total_packets = 0, total_bytes = 0; + u32 i = tx_ring->next_to_clean, xsk_frames = 0; + unsigned int budget = q_vector->tx.work_limit; + struct xdp_umem *umem = tx_ring->xsk_umem; + union ixgbe_adv_tx_desc *tx_desc; + struct ixgbe_tx_buffer *tx_bi; + bool xmit_done; + + tx_bi = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBE_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) + break; + + total_bytes += tx_bi->bytecount; + total_packets += tx_bi->gso_segs; + + if (tx_bi->xdpf) + ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + total_bytes += tx_bi->bytecount; + + tx_bi++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_bi = tx_ring->tx_buffer_info; + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (xsk_frames) + xsk_umem_complete_tx(umem, xsk_frames); + + xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); + return budget > 0 && xmit_done; +} + +int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ring *ring; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return -ENETDOWN; + + if (!READ_ONCE(adapter->xdp_prog)) + return -ENXIO; + + if (qid >= adapter->num_xdp_queues) + return -ENXIO; + + if (!adapter->xsk_umems || !adapter->xsk_umems[qid]) + return -ENXIO; + + ring = adapter->xdp_ring[qid]; + if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { + u64 eics = BIT_ULL(ring->q_vector->v_idx); + + ixgbe_irq_rearm_queues(adapter, eics); + } + + return 0; +} + +void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring) +{ + u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; + struct xdp_umem *umem = tx_ring->xsk_umem; + struct ixgbe_tx_buffer *tx_bi; + u32 xsk_frames = 0; + + while (ntc != ntu) { + tx_bi = &tx_ring->tx_buffer_info[ntc]; + + if (tx_bi->xdpf) + ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + + ntc++; + if (ntc == tx_ring->count) + ntc = 0; + } + + if (xsk_frames) + xsk_umem_complete_tx(umem, xsk_frames); +} diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile index aba1e6a37a6a..297d0f0858b5 100644 --- a/drivers/net/ethernet/intel/ixgbevf/Makefile +++ b/drivers/net/ethernet/intel/ixgbevf/Makefile @@ -10,4 +10,5 @@ ixgbevf-objs := vf.o \ mbx.o \ ethtool.o \ ixgbevf_main.o +ixgbevf-$(CONFIG_XFRM_OFFLOAD) += ipsec.o diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 700d8eb2f6f8..6bace746eaac 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -133,9 +133,14 @@ typedef u32 ixgbe_link_speed; #define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ #define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ #define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ +#define IXGBE_RXDADV_STAT_SECP 0x00020000 /* IPsec/MACsec pkt found */ #define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F #define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ #define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 #define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 #define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 @@ -229,7 +234,7 @@ union ixgbe_adv_rx_desc { /* Context descriptors */ struct ixgbe_adv_tx_context_desc { __le32 vlan_macip_lens; - __le32 seqnum_seed; + __le32 fceof_saidx; __le32 type_tucmd_mlhl; __le32 mss_l4len_idx; }; @@ -250,9 +255,12 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ #define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ #define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 /* ESP Encrypt Enable */ #define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ #define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ #define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ IXGBE_ADVTXD_POPTS_SHIFT) #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 631c91046f39..5399787e07af 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -55,6 +55,8 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = { IXGBEVF_STAT("alloc_rx_page", alloc_rx_page), IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed), IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + IXGBEVF_STAT("tx_ipsec", tx_ipsec), + IXGBEVF_STAT("rx_ipsec", rx_ipsec), }; #define IXGBEVF_QUEUE_STATS_LEN ( \ diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c new file mode 100644 index 000000000000..e8a3231be0bf --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -0,0 +1,670 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */ + +#include "ixgbevf.h" +#include <net/xfrm.h> +#include <crypto/aead.h> + +#define IXGBE_IPSEC_KEY_BITS 160 +static const char aes_gcm_name[] = "rfc4106(gcm(aes))"; + +/** + * ixgbevf_ipsec_set_pf_sa - ask the PF to set up an SA + * @adapter: board private structure + * @xs: xfrm info to be sent to the PF + * + * Returns: positive offload handle from the PF, or negative error code + **/ +static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter, + struct xfrm_state *xs) +{ + u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 }; + struct ixgbe_hw *hw = &adapter->hw; + struct sa_mbx_msg *sam; + int ret; + + /* send the important bits to the PF */ + sam = (struct sa_mbx_msg *)(&msgbuf[1]); + sam->flags = xs->xso.flags; + sam->spi = xs->id.spi; + sam->proto = xs->id.proto; + sam->family = xs->props.family; + + if (xs->props.family == AF_INET6) + memcpy(sam->addr, &xs->id.daddr.a6, sizeof(xs->id.daddr.a6)); + else + memcpy(sam->addr, &xs->id.daddr.a4, sizeof(xs->id.daddr.a4)); + memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key)); + + msgbuf[0] = IXGBE_VF_IPSEC_ADD; + + spin_lock_bh(&adapter->mbx_lock); + + ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); + if (ret) + goto out; + + ret = hw->mbx.ops.read_posted(hw, msgbuf, 2); + if (ret) + goto out; + + ret = (int)msgbuf[1]; + if (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK && ret >= 0) + ret = -1; + +out: + spin_unlock_bh(&adapter->mbx_lock); + + return ret; +} + +/** + * ixgbevf_ipsec_del_pf_sa - ask the PF to delete an SA + * @adapter: board private structure + * @pfsa: sa index returned from PF when created, -1 for all + * + * Returns: 0 on success, or negative error code + **/ +static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 msgbuf[2]; + int err; + + memset(msgbuf, 0, sizeof(msgbuf)); + msgbuf[0] = IXGBE_VF_IPSEC_DEL; + msgbuf[1] = (u32)pfsa; + + spin_lock_bh(&adapter->mbx_lock); + + err = hw->mbx.ops.write_posted(hw, msgbuf, 2); + if (err) + goto out; + + err = hw->mbx.ops.read_posted(hw, msgbuf, 2); + if (err) + goto out; + +out: + spin_unlock_bh(&adapter->mbx_lock); + return err; +} + +/** + * ixgbevf_ipsec_restore - restore the IPsec HW settings after a reset + * @adapter: board private structure + * + * Reload the HW tables from the SW tables after they've been bashed + * by a chip reset. While we're here, make sure any stale VF data is + * removed, since we go through reset when num_vfs changes. + **/ +void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter) +{ + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct net_device *netdev = adapter->netdev; + int i; + + if (!(adapter->netdev->features & NETIF_F_HW_ESP)) + return; + + /* reload the Rx and Tx keys */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { + struct rx_sa *r = &ipsec->rx_tbl[i]; + struct tx_sa *t = &ipsec->tx_tbl[i]; + int ret; + + if (r->used) { + ret = ixgbevf_ipsec_set_pf_sa(adapter, r->xs); + if (ret < 0) + netdev_err(netdev, "reload rx_tbl[%d] failed = %d\n", + i, ret); + } + + if (t->used) { + ret = ixgbevf_ipsec_set_pf_sa(adapter, t->xs); + if (ret < 0) + netdev_err(netdev, "reload tx_tbl[%d] failed = %d\n", + i, ret); + } + } +} + +/** + * ixgbevf_ipsec_find_empty_idx - find the first unused security parameter index + * @ipsec: pointer to IPsec struct + * @rxtable: true if we need to look in the Rx table + * + * Returns the first unused index in either the Rx or Tx SA table + **/ +static +int ixgbevf_ipsec_find_empty_idx(struct ixgbevf_ipsec *ipsec, bool rxtable) +{ + u32 i; + + if (rxtable) { + if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT) + return -ENOSPC; + + /* search rx sa table */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { + if (!ipsec->rx_tbl[i].used) + return i; + } + } else { + if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT) + return -ENOSPC; + + /* search tx sa table */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { + if (!ipsec->tx_tbl[i].used) + return i; + } + } + + return -ENOSPC; +} + +/** + * ixgbevf_ipsec_find_rx_state - find the state that matches + * @ipsec: pointer to IPsec struct + * @daddr: inbound address to match + * @proto: protocol to match + * @spi: SPI to match + * @ip4: true if using an IPv4 address + * + * Returns a pointer to the matching SA state information + **/ +static +struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec, + __be32 *daddr, u8 proto, + __be32 spi, bool ip4) +{ + struct xfrm_state *ret = NULL; + struct rx_sa *rsa; + + rcu_read_lock(); + hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, + (__force u32)spi) { + if (spi == rsa->xs->id.spi && + ((ip4 && *daddr == rsa->xs->id.daddr.a4) || + (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, + sizeof(rsa->xs->id.daddr.a6)))) && + proto == rsa->xs->id.proto) { + ret = rsa->xs; + xfrm_state_hold(ret); + break; + } + } + rcu_read_unlock(); + return ret; +} + +/** + * ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol + * @xs: pointer to xfrm_state struct + * @mykey: pointer to key array to populate + * @mysalt: pointer to salt value to populate + * + * This copies the protocol keys and salt to our own data tables. The + * 82599 family only supports the one algorithm. + **/ +static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, + u32 *mykey, u32 *mysalt) +{ + struct net_device *dev = xs->xso.dev; + unsigned char *key_data; + char *alg_name = NULL; + int key_len; + + if (!xs->aead) { + netdev_err(dev, "Unsupported IPsec algorithm\n"); + return -EINVAL; + } + + if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) { + netdev_err(dev, "IPsec offload requires %d bit authentication\n", + IXGBE_IPSEC_AUTH_BITS); + return -EINVAL; + } + + key_data = &xs->aead->alg_key[0]; + key_len = xs->aead->alg_key_len; + alg_name = xs->aead->alg_name; + + if (strcmp(alg_name, aes_gcm_name)) { + netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", + aes_gcm_name); + return -EINVAL; + } + + /* The key bytes come down in a big endian array of bytes, so + * we don't need to do any byte swapping. + * 160 accounts for 16 byte key and 4 byte salt + */ + if (key_len > IXGBE_IPSEC_KEY_BITS) { + *mysalt = ((u32 *)key_data)[4]; + } else if (key_len == IXGBE_IPSEC_KEY_BITS) { + *mysalt = 0; + } else { + netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n"); + return -EINVAL; + } + memcpy(mykey, key_data, 16); + + return 0; +} + +/** + * ixgbevf_ipsec_add_sa - program device with a security association + * @xs: pointer to transformer state struct + **/ +static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) +{ + struct net_device *dev = xs->xso.dev; + struct ixgbevf_adapter *adapter = netdev_priv(dev); + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + u16 sa_idx; + int ret; + + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { + netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n", + xs->id.proto); + return -EINVAL; + } + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + struct rx_sa rsa; + + if (xs->calg) { + netdev_err(dev, "Compression offload not supported\n"); + return -EINVAL; + } + + /* find the first unused index */ + ret = ixgbevf_ipsec_find_empty_idx(ipsec, true); + if (ret < 0) { + netdev_err(dev, "No space for SA in Rx table!\n"); + return ret; + } + sa_idx = (u16)ret; + + memset(&rsa, 0, sizeof(rsa)); + rsa.used = true; + rsa.xs = xs; + + if (rsa.xs->id.proto & IPPROTO_ESP) + rsa.decrypt = xs->ealg || xs->aead; + + /* get the key and salt */ + ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); + if (ret) { + netdev_err(dev, "Failed to get key data for Rx SA table\n"); + return ret; + } + + /* get ip for rx sa table */ + if (xs->props.family == AF_INET6) + memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16); + else + memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4); + + rsa.mode = IXGBE_RXMOD_VALID; + if (rsa.xs->id.proto & IPPROTO_ESP) + rsa.mode |= IXGBE_RXMOD_PROTO_ESP; + if (rsa.decrypt) + rsa.mode |= IXGBE_RXMOD_DECRYPT; + if (rsa.xs->props.family == AF_INET6) + rsa.mode |= IXGBE_RXMOD_IPV6; + + ret = ixgbevf_ipsec_set_pf_sa(adapter, xs); + if (ret < 0) + return ret; + rsa.pfsa = ret; + + /* the preparations worked, so save the info */ + memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa)); + + xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX; + + ipsec->num_rx_sa++; + + /* hash the new entry for faster search in Rx path */ + hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist, + (__force u32)rsa.xs->id.spi); + } else { + struct tx_sa tsa; + + /* find the first unused index */ + ret = ixgbevf_ipsec_find_empty_idx(ipsec, false); + if (ret < 0) { + netdev_err(dev, "No space for SA in Tx table\n"); + return ret; + } + sa_idx = (u16)ret; + + memset(&tsa, 0, sizeof(tsa)); + tsa.used = true; + tsa.xs = xs; + + if (xs->id.proto & IPPROTO_ESP) + tsa.encrypt = xs->ealg || xs->aead; + + ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); + if (ret) { + netdev_err(dev, "Failed to get key data for Tx SA table\n"); + memset(&tsa, 0, sizeof(tsa)); + return ret; + } + + ret = ixgbevf_ipsec_set_pf_sa(adapter, xs); + if (ret < 0) + return ret; + tsa.pfsa = ret; + + /* the preparations worked, so save the info */ + memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa)); + + xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX; + + ipsec->num_tx_sa++; + } + + return 0; +} + +/** + * ixgbevf_ipsec_del_sa - clear out this specific SA + * @xs: pointer to transformer state struct + **/ +static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) +{ + struct net_device *dev = xs->xso.dev; + struct ixgbevf_adapter *adapter = netdev_priv(dev); + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + u16 sa_idx; + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; + + if (!ipsec->rx_tbl[sa_idx].used) { + netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n", + sa_idx, xs->xso.offload_handle); + return; + } + + ixgbevf_ipsec_del_pf_sa(adapter, ipsec->rx_tbl[sa_idx].pfsa); + hash_del_rcu(&ipsec->rx_tbl[sa_idx].hlist); + memset(&ipsec->rx_tbl[sa_idx], 0, sizeof(struct rx_sa)); + ipsec->num_rx_sa--; + } else { + sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; + + if (!ipsec->tx_tbl[sa_idx].used) { + netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n", + sa_idx, xs->xso.offload_handle); + return; + } + + ixgbevf_ipsec_del_pf_sa(adapter, ipsec->tx_tbl[sa_idx].pfsa); + memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa)); + ipsec->num_tx_sa--; + } +} + +/** + * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload + * @skb: current data packet + * @xs: pointer to transformer state struct + **/ +static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + if (xs->props.family == AF_INET) { + /* Offload with IPv4 options is not supported yet */ + if (ip_hdr(skb)->ihl != 5) + return false; + } else { + /* Offload with IPv6 extension headers is not support yet */ + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) + return false; + } + + return true; +} + +static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = { + .xdo_dev_state_add = ixgbevf_ipsec_add_sa, + .xdo_dev_state_delete = ixgbevf_ipsec_del_sa, + .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok, +}; + +/** + * ixgbevf_ipsec_tx - setup Tx flags for IPsec offload + * @tx_ring: outgoing context + * @first: current data packet + * @itd: ipsec Tx data for later use in building context descriptor + **/ +int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first, + struct ixgbevf_ipsec_tx_data *itd) +{ + struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct xfrm_state *xs; + struct tx_sa *tsa; + u16 sa_idx; + + if (unlikely(!first->skb->sp->len)) { + netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n", + __func__, first->skb->sp->len); + return 0; + } + + xs = xfrm_input_state(first->skb); + if (unlikely(!xs)) { + netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n", + __func__, xs); + return 0; + } + + sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; + if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) { + netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", + __func__, sa_idx, xs->xso.offload_handle); + return 0; + } + + tsa = &ipsec->tx_tbl[sa_idx]; + if (unlikely(!tsa->used)) { + netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n", + __func__, sa_idx); + return 0; + } + + itd->pfsa = tsa->pfsa - IXGBE_IPSEC_BASE_TX_INDEX; + + first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CSUM; + + if (xs->id.proto == IPPROTO_ESP) { + itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | + IXGBE_ADVTXD_TUCMD_L4T_TCP; + if (first->protocol == htons(ETH_P_IP)) + itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4; + + /* The actual trailer length is authlen (16 bytes) plus + * 2 bytes for the proto and the padlen values, plus + * padlen bytes of padding. This ends up not the same + * as the static value found in xs->props.trailer_len (21). + * + * ... but if we're doing GSO, don't bother as the stack + * doesn't add a trailer for those. + */ + if (!skb_is_gso(first->skb)) { + /* The "correct" way to get the auth length would be + * to use + * authlen = crypto_aead_authsize(xs->data); + * but since we know we only have one size to worry + * about * we can let the compiler use the constant + * and save us a few CPU cycles. + */ + const int authlen = IXGBE_IPSEC_AUTH_BITS / 8; + struct sk_buff *skb = first->skb; + u8 padlen; + int ret; + + ret = skb_copy_bits(skb, skb->len - (authlen + 2), + &padlen, 1); + if (unlikely(ret)) + return 0; + itd->trailer_len = authlen + 2 + padlen; + } + } + if (tsa->encrypt) + itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN; + + return 1; +} + +/** + * ixgbevf_ipsec_rx - decode IPsec bits from Rx descriptor + * @rx_ring: receiving ring + * @rx_desc: receive data descriptor + * @skb: current data packet + * + * Determine if there was an IPsec encapsulation noticed, and if so set up + * the resulting status for later in the receive stack. + **/ +void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct ixgbevf_adapter *adapter = netdev_priv(rx_ring->netdev); + __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH | + IXGBE_RXDADV_PKTTYPE_IPSEC_ESP); + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct xfrm_offload *xo = NULL; + struct xfrm_state *xs = NULL; + struct ipv6hdr *ip6 = NULL; + struct iphdr *ip4 = NULL; + void *daddr; + __be32 spi; + u8 *c_hdr; + u8 proto; + + /* Find the IP and crypto headers in the data. + * We can assume no VLAN header in the way, b/c the + * hw won't recognize the IPsec packet and anyway the + * currently VLAN device doesn't support xfrm offload. + */ + if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) { + ip4 = (struct iphdr *)(skb->data + ETH_HLEN); + daddr = &ip4->daddr; + c_hdr = (u8 *)ip4 + ip4->ihl * 4; + } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) { + ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); + daddr = &ip6->daddr; + c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr); + } else { + return; + } + + switch (pkt_info & ipsec_pkt_types) { + case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH): + spi = ((struct ip_auth_hdr *)c_hdr)->spi; + proto = IPPROTO_AH; + break; + case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP): + spi = ((struct ip_esp_hdr *)c_hdr)->spi; + proto = IPPROTO_ESP; + break; + default: + return; + } + + xs = ixgbevf_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4); + if (unlikely(!xs)) + return; + + skb->sp = secpath_dup(skb->sp); + if (unlikely(!skb->sp)) + return; + + skb->sp->xvec[skb->sp->len++] = xs; + skb->sp->olen++; + xo = xfrm_offload(skb); + xo->flags = CRYPTO_DONE; + xo->status = CRYPTO_SUCCESS; + + adapter->rx_ipsec++; +} + +/** + * ixgbevf_init_ipsec_offload - initialize registers for IPsec operation + * @adapter: board private structure + **/ +void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter) +{ + struct ixgbevf_ipsec *ipsec; + size_t size; + + switch (adapter->hw.api_version) { + case ixgbe_mbox_api_14: + break; + default: + return; + } + + ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); + if (!ipsec) + goto err1; + hash_init(ipsec->rx_sa_list); + + size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; + ipsec->rx_tbl = kzalloc(size, GFP_KERNEL); + if (!ipsec->rx_tbl) + goto err2; + + size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; + ipsec->tx_tbl = kzalloc(size, GFP_KERNEL); + if (!ipsec->tx_tbl) + goto err2; + + ipsec->num_rx_sa = 0; + ipsec->num_tx_sa = 0; + + adapter->ipsec = ipsec; + + adapter->netdev->xfrmdev_ops = &ixgbevf_xfrmdev_ops; + +#define IXGBEVF_ESP_FEATURES (NETIF_F_HW_ESP | \ + NETIF_F_HW_ESP_TX_CSUM | \ + NETIF_F_GSO_ESP) + + adapter->netdev->features |= IXGBEVF_ESP_FEATURES; + adapter->netdev->hw_enc_features |= IXGBEVF_ESP_FEATURES; + + return; + +err2: + kfree(ipsec->rx_tbl); + kfree(ipsec->tx_tbl); + kfree(ipsec); +err1: + netdev_err(adapter->netdev, "Unable to allocate memory for SA tables"); +} + +/** + * ixgbevf_stop_ipsec_offload - tear down the IPsec offload + * @adapter: board private structure + **/ +void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter) +{ + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + + adapter->ipsec = NULL; + if (ipsec) { + kfree(ipsec->rx_tbl); + kfree(ipsec->tx_tbl); + kfree(ipsec); + } +} diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.h b/drivers/net/ethernet/intel/ixgbevf/ipsec.h new file mode 100644 index 000000000000..3740725041c3 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */ + +#ifndef _IXGBEVF_IPSEC_H_ +#define _IXGBEVF_IPSEC_H_ + +#define IXGBE_IPSEC_MAX_SA_COUNT 1024 +#define IXGBE_IPSEC_BASE_RX_INDEX 0 +#define IXGBE_IPSEC_BASE_TX_INDEX IXGBE_IPSEC_MAX_SA_COUNT +#define IXGBE_IPSEC_AUTH_BITS 128 + +#define IXGBE_RXMOD_VALID 0x00000001 +#define IXGBE_RXMOD_PROTO_ESP 0x00000004 +#define IXGBE_RXMOD_DECRYPT 0x00000008 +#define IXGBE_RXMOD_IPV6 0x00000010 + +struct rx_sa { + struct hlist_node hlist; + struct xfrm_state *xs; + __be32 ipaddr[4]; + u32 key[4]; + u32 salt; + u32 mode; + u32 pfsa; + bool used; + bool decrypt; +}; + +struct rx_ip_sa { + __be32 ipaddr[4]; + u32 ref_cnt; + bool used; +}; + +struct tx_sa { + struct xfrm_state *xs; + u32 key[4]; + u32 salt; + u32 pfsa; + bool encrypt; + bool used; +}; + +struct ixgbevf_ipsec_tx_data { + u32 flags; + u16 trailer_len; + u16 pfsa; +}; + +struct ixgbevf_ipsec { + u16 num_rx_sa; + u16 num_tx_sa; + struct rx_sa *rx_tbl; + struct tx_sa *tx_tbl; + DECLARE_HASHTABLE(rx_sa_list, 10); +}; + +struct sa_mbx_msg { + __be32 spi; + u8 flags; + u8 proto; + u16 family; + __be32 addr[4]; + u32 key[5]; +}; +#endif /* _IXGBEVF_IPSEC_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 56a1031dcc07..e399e1c0c54a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -14,6 +14,7 @@ #include <net/xdp.h> #include "vf.h" +#include "ipsec.h" #define IXGBE_MAX_TXD_PWR 14 #define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR) @@ -163,6 +164,7 @@ struct ixgbevf_ring { #define IXGBE_TX_FLAGS_VLAN BIT(1) #define IXGBE_TX_FLAGS_TSO BIT(2) #define IXGBE_TX_FLAGS_IPV4 BIT(3) +#define IXGBE_TX_FLAGS_IPSEC BIT(4) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 @@ -338,6 +340,7 @@ struct ixgbevf_adapter { struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */ u64 restart_queue; u32 tx_timeout_count; + u64 tx_ipsec; /* RX */ int num_rx_queues; @@ -348,6 +351,7 @@ struct ixgbevf_adapter { u64 alloc_rx_page_failed; u64 alloc_rx_buff_failed; u64 alloc_rx_page; + u64 rx_ipsec; struct msix_entry *msix_entries; @@ -384,6 +388,10 @@ struct ixgbevf_adapter { u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; u32 flags; #define IXGBEVF_FLAGS_LEGACY_RX BIT(1) + +#ifdef CONFIG_XFRM + struct ixgbevf_ipsec *ipsec; +#endif /* CONFIG_XFRM */ }; enum ixbgevf_state_t { @@ -451,6 +459,31 @@ int ethtool_ioctl(struct ifreq *ifr); extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); +#ifdef CONFIG_XFRM_OFFLOAD +void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter); +void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter); +void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter); +void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); +int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first, + struct ixgbevf_ipsec_tx_data *itd); +#else +static inline void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter) +{ } +static inline void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter) +{ } +static inline void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter) { } +static inline void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { } +static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first, + struct ixgbevf_ipsec_tx_data *itd) +{ return 0; } +#endif /* CONFIG_XFRM_OFFLOAD */ + void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 5a228582423b..98707ee11d72 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -40,7 +40,7 @@ static const char ixgbevf_driver_string[] = #define DRV_VERSION "4.1.0-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = - "Copyright (c) 2009 - 2015 Intel Corporation."; + "Copyright (c) 2009 - 2018 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, @@ -79,7 +79,7 @@ MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) @@ -268,7 +268,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - unsigned int total_bytes = 0, total_packets = 0; + unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0; unsigned int budget = tx_ring->count / 2; unsigned int i = tx_ring->next_to_clean; @@ -299,6 +299,8 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, /* update the statistics for this packet */ total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; + if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) + total_ipsec++; /* free the skb */ if (ring_is_xdp(tx_ring)) @@ -361,6 +363,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, u64_stats_update_end(&tx_ring->syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; + adapter->tx_ipsec += total_ipsec; if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { struct ixgbe_hw *hw = &adapter->hw; @@ -516,6 +519,9 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) + ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); } @@ -1012,7 +1018,7 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, context_desc = IXGBEVF_TX_CTXTDESC(ring, 0); context_desc->vlan_macip_lens = cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT); - context_desc->seqnum_seed = 0; + context_desc->fceof_saidx = 0; context_desc->type_tucmd_mlhl = cpu_to_le32(IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT); @@ -2200,6 +2206,7 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter) ixgbevf_set_rx_mode(adapter->netdev); ixgbevf_restore_vlan(adapter); + ixgbevf_ipsec_restore(adapter); ixgbevf_configure_tx(adapter); ixgbevf_configure_rx(adapter); @@ -2246,7 +2253,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int api[] = { ixgbe_mbox_api_13, + int api[] = { ixgbe_mbox_api_14, + ixgbe_mbox_api_13, ixgbe_mbox_api_12, ixgbe_mbox_api_11, ixgbe_mbox_api_10, @@ -2605,6 +2613,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: if (adapter->xdp_prog && hw->mac.max_tx_queues == rss) rss = rss > 3 ? 2 : 1; @@ -3700,8 +3709,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) } static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, - u32 vlan_macip_lens, u32 type_tucmd, - u32 mss_l4len_idx) + u32 vlan_macip_lens, u32 fceof_saidx, + u32 type_tucmd, u32 mss_l4len_idx) { struct ixgbe_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; @@ -3715,14 +3724,15 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = 0; + context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); } static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, - u8 *hdr_len) + u8 *hdr_len, + struct ixgbevf_ipsec_tx_data *itd) { u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; @@ -3736,6 +3746,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, unsigned char *hdr; } l4; u32 paylen, l4_offset; + u32 fceof_saidx = 0; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3761,13 +3772,15 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, if (ip.v4->version == 4) { unsigned char *csum_start = skb_checksum_start(skb); unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + int len = csum_start - trans_start; /* IP header will have to cancel out any data that - * is not a part of the outer IP header + * is not a part of the outer IP header, so set to + * a reverse csum if needed, else init check to 0. */ - ip.v4->check = csum_fold(csum_partial(trans_start, - csum_start - trans_start, - 0)); + ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? + csum_fold(csum_partial(trans_start, + len, 0)) : 0; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; ip.v4->tot_len = 0; @@ -3799,13 +3812,16 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT); + fceof_saidx |= itd->pfsa; + type_tucmd |= itd->flags | itd->trailer_len; + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = l4.hdr - ip.hdr; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, - type_tucmd, mss_l4len_idx); + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, + mss_l4len_idx); return 1; } @@ -3820,10 +3836,12 @@ static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb) } static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, - struct ixgbevf_tx_buffer *first) + struct ixgbevf_tx_buffer *first, + struct ixgbevf_ipsec_tx_data *itd) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; + u32 fceof_saidx = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3849,6 +3867,10 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, skb_checksum_help(skb); goto no_csum; } + + if (first->protocol == htons(ETH_P_IP)) + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; vlan_macip_lens = skb_checksum_start_offset(skb) - @@ -3858,7 +3880,11 @@ no_csum: vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); + fceof_saidx |= itd->pfsa; + type_tucmd |= itd->flags | itd->trailer_len; + + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, + fceof_saidx, type_tucmd, 0); } static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) @@ -3892,8 +3918,12 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, if (tx_flags & IXGBE_TX_FLAGS_IPV4) olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); - /* use index 1 context for TSO/FSO/FCOE */ - if (tx_flags & IXGBE_TX_FLAGS_TSO) + /* enable IPsec */ + if (tx_flags & IXGBE_TX_FLAGS_IPSEC) + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC); + + /* use index 1 context for TSO/FSO/FCOE/IPSEC */ + if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC)) olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT); /* Check Context must be set if Tx switch is enabled, which it @@ -4075,6 +4105,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, int tso; u32 tx_flags = 0; u16 count = TXD_USE_COUNT(skb_headlen(skb)); + struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 }; #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD unsigned short f; #endif @@ -4119,11 +4150,15 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, first->tx_flags = tx_flags; first->protocol = vlan_get_protocol(skb); - tso = ixgbevf_tso(tx_ring, first, &hdr_len); +#ifdef CONFIG_XFRM_OFFLOAD + if (skb->sp && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) + goto out_drop; +#endif + tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); if (tso < 0) goto out_drop; else if (!tso) - ixgbevf_tx_csum(tx_ring, first); + ixgbevf_tx_csum(tx_ring, first, &ipsec_tx); ixgbevf_tx_map(tx_ring, first, hdr_len); @@ -4613,6 +4648,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); break; @@ -4648,6 +4684,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, netdev); netif_carrier_off(netdev); + ixgbevf_init_ipsec_offload(adapter); ixgbevf_init_last_counter_stats(adapter); @@ -4714,6 +4751,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); + ixgbevf_stop_ipsec_offload(adapter); ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_reset_interrupt_capability(adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index bfd9ae150808..853796c8ef0e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -62,6 +62,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; @@ -92,6 +93,10 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_UPDATE_XCAST_MODE 0x0c +/* mailbox API, version 1.4 VF requests */ +#define IXGBE_VF_IPSEC_ADD 0x0d +#define IXGBE_VF_IPSEC_DEL 0x0e + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index bf0577e819e1..cd3b81300cc7 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -309,6 +309,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) * is not supported for this device type. */ switch (hw->api_version) { + case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: if (hw->mac.type < ixgbe_mac_X550_vf) @@ -376,6 +377,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) * or if the operation is not supported for this device type. */ switch (hw->api_version) { + case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: if (hw->mac.type < ixgbe_mac_X550_vf) @@ -540,6 +542,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) return -EOPNOTSUPP; /* Fall threw */ + case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: break; default: @@ -890,6 +893,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: break; default: return 0; diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index e08301d833e2..32ac9045cdae 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -365,15 +365,8 @@ ltq_etop_mdio_probe(struct net_device *dev) return PTR_ERR(phydev); } - phydev->supported &= (SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_Autoneg - | SUPPORTED_MII - | SUPPORTED_TP); - - phydev->advertising = phydev->supported; + phy_set_max_speed(phydev, SPEED_100); + phy_attached_info(phydev); return 0; @@ -439,6 +432,7 @@ ltq_etop_open(struct net_device *dev) if (!IS_TX(i) && (!IS_RX(i))) continue; ltq_dma_open(&ch->dma); + ltq_dma_enable_irq(&ch->dma); napi_enable(&ch->napi); } phy_start(dev->phydev); diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c new file mode 100644 index 000000000000..8c5ba4b81fb7 --- /dev/null +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -0,0 +1,567 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lantiq / Intel PMAC driver for XRX200 SoCs + * + * Copyright (C) 2010 Lantiq Deutschland + * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de> + */ + +#include <linux/etherdevice.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/clk.h> +#include <linux/delay.h> + +#include <linux/of_net.h> +#include <linux/of_platform.h> + +#include <xway_dma.h> + +/* DMA */ +#define XRX200_DMA_DATA_LEN 0x600 +#define XRX200_DMA_RX 0 +#define XRX200_DMA_TX 1 + +/* cpu port mac */ +#define PMAC_RX_IPG 0x0024 +#define PMAC_RX_IPG_MASK 0xf + +#define PMAC_HD_CTL 0x0000 +/* Add Ethernet header to packets from DMA to PMAC */ +#define PMAC_HD_CTL_ADD BIT(0) +/* Add VLAN tag to Packets from DMA to PMAC */ +#define PMAC_HD_CTL_TAG BIT(1) +/* Add CRC to packets from DMA to PMAC */ +#define PMAC_HD_CTL_AC BIT(2) +/* Add status header to packets from PMAC to DMA */ +#define PMAC_HD_CTL_AS BIT(3) +/* Remove CRC from packets from PMAC to DMA */ +#define PMAC_HD_CTL_RC BIT(4) +/* Remove Layer-2 header from packets from PMAC to DMA */ +#define PMAC_HD_CTL_RL2 BIT(5) +/* Status header is present from DMA to PMAC */ +#define PMAC_HD_CTL_RXSH BIT(6) +/* Add special tag from PMAC to switch */ +#define PMAC_HD_CTL_AST BIT(7) +/* Remove specail Tag from PMAC to DMA */ +#define PMAC_HD_CTL_RST BIT(8) +/* Check CRC from DMA to PMAC */ +#define PMAC_HD_CTL_CCRC BIT(9) +/* Enable reaction to Pause frames in the PMAC */ +#define PMAC_HD_CTL_FC BIT(10) + +struct xrx200_chan { + int tx_free; + + struct napi_struct napi; + struct ltq_dma_channel dma; + struct sk_buff *skb[LTQ_DESC_NUM]; + + struct xrx200_priv *priv; +}; + +struct xrx200_priv { + struct clk *clk; + + struct xrx200_chan chan_tx; + struct xrx200_chan chan_rx; + + struct net_device *net_dev; + struct device *dev; + + __iomem void *pmac_reg; +}; + +static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset) +{ + return __raw_readl(priv->pmac_reg + offset); +} + +static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset) +{ + __raw_writel(val, priv->pmac_reg + offset); +} + +static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set, + u32 offset) +{ + u32 val = xrx200_pmac_r32(priv, offset); + + val &= ~(clear); + val |= set; + xrx200_pmac_w32(priv, val, offset); +} + +/* drop all the packets from the DMA ring */ +static void xrx200_flush_dma(struct xrx200_chan *ch) +{ + int i; + + for (i = 0; i < LTQ_DESC_NUM; i++) { + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C) + break; + + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | + XRX200_DMA_DATA_LEN; + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + } +} + +static int xrx200_open(struct net_device *net_dev) +{ + struct xrx200_priv *priv = netdev_priv(net_dev); + + napi_enable(&priv->chan_tx.napi); + ltq_dma_open(&priv->chan_tx.dma); + ltq_dma_enable_irq(&priv->chan_tx.dma); + + napi_enable(&priv->chan_rx.napi); + ltq_dma_open(&priv->chan_rx.dma); + /* The boot loader does not always deactivate the receiving of frames + * on the ports and then some packets queue up in the PPE buffers. + * They already passed the PMAC so they do not have the tags + * configured here. Read the these packets here and drop them. + * The HW should have written them into memory after 10us + */ + usleep_range(20, 40); + xrx200_flush_dma(&priv->chan_rx); + ltq_dma_enable_irq(&priv->chan_rx.dma); + + netif_wake_queue(net_dev); + + return 0; +} + +static int xrx200_close(struct net_device *net_dev) +{ + struct xrx200_priv *priv = netdev_priv(net_dev); + + netif_stop_queue(net_dev); + + napi_disable(&priv->chan_rx.napi); + ltq_dma_close(&priv->chan_rx.dma); + + napi_disable(&priv->chan_tx.napi); + ltq_dma_close(&priv->chan_tx.dma); + + return 0; +} + +static int xrx200_alloc_skb(struct xrx200_chan *ch) +{ + int ret = 0; + + ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev, + XRX200_DMA_DATA_LEN); + if (!ch->skb[ch->dma.desc]) { + ret = -ENOMEM; + goto skip; + } + + ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev, + ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ch->priv->dev, + ch->dma.desc_base[ch->dma.desc].addr))) { + dev_kfree_skb_any(ch->skb[ch->dma.desc]); + ret = -ENOMEM; + goto skip; + } + +skip: + ch->dma.desc_base[ch->dma.desc].ctl = + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | + XRX200_DMA_DATA_LEN; + + return ret; +} + +static int xrx200_hw_receive(struct xrx200_chan *ch) +{ + struct xrx200_priv *priv = ch->priv; + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + struct sk_buff *skb = ch->skb[ch->dma.desc]; + int len = (desc->ctl & LTQ_DMA_SIZE_MASK); + struct net_device *net_dev = priv->net_dev; + int ret; + + ret = xrx200_alloc_skb(ch); + + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + + if (ret) { + netdev_err(net_dev, "failed to allocate new rx buffer\n"); + return ret; + } + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, net_dev); + netif_receive_skb(skb); + net_dev->stats.rx_packets++; + net_dev->stats.rx_bytes += len - ETH_FCS_LEN; + + return 0; +} + +static int xrx200_poll_rx(struct napi_struct *napi, int budget) +{ + struct xrx200_chan *ch = container_of(napi, + struct xrx200_chan, napi); + int rx = 0; + int ret; + + while (rx < budget) { + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { + ret = xrx200_hw_receive(ch); + if (ret) + return ret; + rx++; + } else { + break; + } + } + + if (rx < budget) { + napi_complete(&ch->napi); + ltq_dma_enable_irq(&ch->dma); + } + + return rx; +} + +static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget) +{ + struct xrx200_chan *ch = container_of(napi, + struct xrx200_chan, napi); + struct net_device *net_dev = ch->priv->net_dev; + int pkts = 0; + int bytes = 0; + + while (pkts < budget) { + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free]; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { + struct sk_buff *skb = ch->skb[ch->tx_free]; + + pkts++; + bytes += skb->len; + ch->skb[ch->tx_free] = NULL; + consume_skb(skb); + memset(&ch->dma.desc_base[ch->tx_free], 0, + sizeof(struct ltq_dma_desc)); + ch->tx_free++; + ch->tx_free %= LTQ_DESC_NUM; + } else { + break; + } + } + + net_dev->stats.tx_packets += pkts; + net_dev->stats.tx_bytes += bytes; + netdev_completed_queue(ch->priv->net_dev, pkts, bytes); + + if (pkts < budget) { + napi_complete(&ch->napi); + ltq_dma_enable_irq(&ch->dma); + } + + return pkts; +} + +static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *net_dev) +{ + struct xrx200_priv *priv = netdev_priv(net_dev); + struct xrx200_chan *ch = &priv->chan_tx; + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + u32 byte_offset; + dma_addr_t mapping; + int len; + + skb->dev = net_dev; + if (skb_put_padto(skb, ETH_ZLEN)) { + net_dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + len = skb->len; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { + netdev_err(net_dev, "tx ring full\n"); + netif_stop_queue(net_dev); + return NETDEV_TX_BUSY; + } + + ch->skb[ch->dma.desc] = skb; + + mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, mapping))) + goto err_drop; + + /* dma needs to start on a 16 byte aligned address */ + byte_offset = mapping % 16; + + desc->addr = mapping - byte_offset; + /* Make sure the address is written before we give it to HW */ + wmb(); + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | + LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK); + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + if (ch->dma.desc == ch->tx_free) + netif_stop_queue(net_dev); + + netdev_sent_queue(net_dev, len); + + return NETDEV_TX_OK; + +err_drop: + dev_kfree_skb(skb); + net_dev->stats.tx_dropped++; + net_dev->stats.tx_errors++; + return NETDEV_TX_OK; +} + +static const struct net_device_ops xrx200_netdev_ops = { + .ndo_open = xrx200_open, + .ndo_stop = xrx200_close, + .ndo_start_xmit = xrx200_start_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static irqreturn_t xrx200_dma_irq(int irq, void *ptr) +{ + struct xrx200_chan *ch = ptr; + + ltq_dma_disable_irq(&ch->dma); + ltq_dma_ack_irq(&ch->dma); + + napi_schedule(&ch->napi); + + return IRQ_HANDLED; +} + +static int xrx200_dma_init(struct xrx200_priv *priv) +{ + struct xrx200_chan *ch_rx = &priv->chan_rx; + struct xrx200_chan *ch_tx = &priv->chan_tx; + int ret = 0; + int i; + + ltq_dma_init_port(DMA_PORT_ETOP); + + ch_rx->dma.nr = XRX200_DMA_RX; + ch_rx->dma.dev = priv->dev; + ch_rx->priv = priv; + + ltq_dma_alloc_rx(&ch_rx->dma); + for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM; + ch_rx->dma.desc++) { + ret = xrx200_alloc_skb(ch_rx); + if (ret) + goto rx_free; + } + ch_rx->dma.desc = 0; + ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0, + "xrx200_net_rx", &priv->chan_rx); + if (ret) { + dev_err(priv->dev, "failed to request RX irq %d\n", + ch_rx->dma.irq); + goto rx_ring_free; + } + + ch_tx->dma.nr = XRX200_DMA_TX; + ch_tx->dma.dev = priv->dev; + ch_tx->priv = priv; + + ltq_dma_alloc_tx(&ch_tx->dma); + ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0, + "xrx200_net_tx", &priv->chan_tx); + if (ret) { + dev_err(priv->dev, "failed to request TX irq %d\n", + ch_tx->dma.irq); + goto tx_free; + } + + return ret; + +tx_free: + ltq_dma_free(&ch_tx->dma); + +rx_ring_free: + /* free the allocated RX ring */ + for (i = 0; i < LTQ_DESC_NUM; i++) { + if (priv->chan_rx.skb[i]) + dev_kfree_skb_any(priv->chan_rx.skb[i]); + } + +rx_free: + ltq_dma_free(&ch_rx->dma); + return ret; +} + +static void xrx200_hw_cleanup(struct xrx200_priv *priv) +{ + int i; + + ltq_dma_free(&priv->chan_tx.dma); + ltq_dma_free(&priv->chan_rx.dma); + + /* free the allocated RX ring */ + for (i = 0; i < LTQ_DESC_NUM; i++) + dev_kfree_skb_any(priv->chan_rx.skb[i]); +} + +static int xrx200_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct resource *res; + struct xrx200_priv *priv; + struct net_device *net_dev; + const u8 *mac; + int err; + + /* alloc the network device */ + net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv)); + if (!net_dev) + return -ENOMEM; + + priv = netdev_priv(net_dev); + priv->net_dev = net_dev; + priv->dev = dev; + + net_dev->netdev_ops = &xrx200_netdev_ops; + SET_NETDEV_DEV(net_dev, dev); + net_dev->min_mtu = ETH_ZLEN; + net_dev->max_mtu = XRX200_DMA_DATA_LEN; + + /* load the memory ranges */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "failed to get resources\n"); + return -ENOENT; + } + + priv->pmac_reg = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->pmac_reg)) { + dev_err(dev, "failed to request and remap io ranges\n"); + return PTR_ERR(priv->pmac_reg); + } + + priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx"); + if (priv->chan_rx.dma.irq < 0) { + dev_err(dev, "failed to get RX IRQ, %i\n", + priv->chan_rx.dma.irq); + return -ENOENT; + } + priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx"); + if (priv->chan_tx.dma.irq < 0) { + dev_err(dev, "failed to get TX IRQ, %i\n", + priv->chan_tx.dma.irq); + return -ENOENT; + } + + /* get the clock */ + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(dev, "failed to get clock\n"); + return PTR_ERR(priv->clk); + } + + mac = of_get_mac_address(np); + if (mac && is_valid_ether_addr(mac)) + ether_addr_copy(net_dev->dev_addr, mac); + else + eth_hw_addr_random(net_dev); + + /* bring up the dma engine and IP core */ + err = xrx200_dma_init(priv); + if (err) + return err; + + /* enable clock gate */ + err = clk_prepare_enable(priv->clk); + if (err) + goto err_uninit_dma; + + /* set IPG to 12 */ + xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG); + + /* enable status header, enable CRC */ + xrx200_pmac_mask(priv, 0, + PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | + PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC, + PMAC_HD_CTL); + + /* setup NAPI */ + netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32); + netif_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32); + + platform_set_drvdata(pdev, priv); + + err = register_netdev(net_dev); + if (err) + goto err_unprepare_clk; + return err; + +err_unprepare_clk: + clk_disable_unprepare(priv->clk); + +err_uninit_dma: + xrx200_hw_cleanup(priv); + + return 0; +} + +static int xrx200_remove(struct platform_device *pdev) +{ + struct xrx200_priv *priv = platform_get_drvdata(pdev); + struct net_device *net_dev = priv->net_dev; + + /* free stack related instances */ + netif_stop_queue(net_dev); + netif_napi_del(&priv->chan_tx.napi); + netif_napi_del(&priv->chan_rx.napi); + + /* remove the actual device */ + unregister_netdev(net_dev); + + /* release the clock */ + clk_disable_unprepare(priv->clk); + + /* shut down hardware */ + xrx200_hw_cleanup(priv); + + return 0; +} + +static const struct of_device_id xrx200_match[] = { + { .compatible = "lantiq,xrx200-net" }, + {}, +}; +MODULE_DEVICE_TABLE(of, xrx200_match); + +static struct platform_driver xrx200_driver = { + .probe = xrx200_probe, + .remove = xrx200_remove, + .driver = { + .name = "lantiq,xrx200-net", + .of_match_table = xrx200_match, + }, +}; + +module_platform_driver(xrx200_driver); + +MODULE_AUTHOR("John Crispin <john@phrozen.org>"); +MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index f33fd22b351c..3238aa7f5dac 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -167,4 +167,7 @@ config SKY2_DEBUG If unsure, say N. + +source "drivers/net/ethernet/marvell/octeontx2/Kconfig" + endif # NET_VENDOR_MARVELL diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile index 55d4d10aa7d3..89dea7284d5b 100644 --- a/drivers/net/ethernet/marvell/Makefile +++ b/drivers/net/ethernet/marvell/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_MVPP2) += mvpp2/ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o obj-$(CONFIG_SKGE) += skge.o obj-$(CONFIG_SKY2) += sky2.o +obj-y += octeontx2/ diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 62f204f32316..1e9bcbdc6a90 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2733,17 +2733,17 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, memset(&res, 0, sizeof(res)); if (of_irq_to_resource(pnp, 0, &res) <= 0) { - dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name); + dev_err(&pdev->dev, "missing interrupt on %pOFn\n", pnp); return -EINVAL; } if (of_property_read_u32(pnp, "reg", &ppd.port_number)) { - dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name); + dev_err(&pdev->dev, "missing reg property on %pOFn\n", pnp); return -EINVAL; } if (ppd.port_number >= 3) { - dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name); + dev_err(&pdev->dev, "invalid reg property on %pOFn\n", pnp); return -EINVAL; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index b4ed7d394d07..5bfd349bf41a 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -221,6 +221,8 @@ #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) +#define MVNETA_GMAC_CTRL_4 0x2c90 +#define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1) #define MVNETA_MIB_COUNTERS_BASE 0x3000 #define MVNETA_MIB_LATE_COLLISION 0x7c #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 @@ -2064,10 +2066,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, /* Linux processing */ rxq->skb->protocol = eth_type_trans(rxq->skb, dev); - if (dev->features & NETIF_F_GRO) - napi_gro_receive(napi, rxq->skb); - else - netif_receive_skb(rxq->skb); + napi_gro_receive(napi, rxq->skb); /* clean uncomplete skb pointer in queue */ rxq->skb = NULL; @@ -2395,7 +2394,7 @@ error: } /* Main tx processing */ -static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); u16 txq_id = skb_get_queue_mapping(skb); @@ -2509,12 +2508,13 @@ static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) { struct mvneta_tx_queue *txq; struct netdev_queue *nq; + int cpu = smp_processor_id(); while (cause_tx_done) { txq = mvneta_tx_done_policy(pp, cause_tx_done); nq = netdev_get_tx_queue(pp->dev, txq->id); - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock(nq, cpu); if (txq->count) mvneta_txq_done(pp, txq); @@ -3343,6 +3343,7 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported, if (state->interface != PHY_INTERFACE_MODE_NA && state->interface != PHY_INTERFACE_MODE_QSGMII && state->interface != PHY_INTERFACE_MODE_SGMII && + state->interface != PHY_INTERFACE_MODE_2500BASEX && !phy_interface_mode_is_8023z(state->interface) && !phy_interface_mode_is_rgmii(state->interface)) { bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); @@ -3355,9 +3356,15 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported, /* Asymmetric pause is unsupported */ phylink_set(mask, Pause); - /* Half-duplex at speeds higher than 100Mbit is unsupported */ - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); + + /* We cannot use 1Gbps when using the 2.5G interface. */ + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) { + phylink_set(mask, 2500baseT_Full); + phylink_set(mask, 2500baseX_Full); + } else { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + } if (!phy_interface_mode_is_8023z(state->interface)) { /* 10M and 100M are only supported in non-802.3z mode */ @@ -3418,12 +3425,14 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode, struct mvneta_port *pp = netdev_priv(ndev); u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); + u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X; new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE | MVNETA_GMAC2_PORT_RESET); + new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE); new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE; new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE | MVNETA_GMAC_INBAND_RESTART_AN | @@ -3456,7 +3465,7 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode, if (state->duplex) new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; - if (state->speed == SPEED_1000) + if (state->speed == SPEED_1000 || state->speed == SPEED_2500) new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED; else if (state->speed == SPEED_100) new_an |= MVNETA_GMAC_CONFIG_MII_SPEED; @@ -3495,10 +3504,18 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode, MVNETA_GMAC_FORCE_LINK_DOWN); } + /* When at 2.5G, the link partner can send frames with shortened + * preambles. + */ + if (state->speed == SPEED_2500) + new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; + if (new_ctrl0 != gmac_ctrl0) mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); if (new_ctrl2 != gmac_ctrl2) mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); + if (new_ctrl4 != gmac_ctrl4) + mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4); if (new_clk != gmac_clk) mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk); if (new_an != gmac_an) @@ -3792,9 +3809,6 @@ static int mvneta_open(struct net_device *dev) goto err_free_online_hp; } - /* In default link is down */ - netif_carrier_off(pp->dev); - ret = mvneta_mdio_probe(pp); if (ret < 0) { netdev_err(dev, "cannot probe MDIO bus\n"); @@ -4597,7 +4611,8 @@ static int mvneta_probe(struct platform_device *pdev) } } - dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO; + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_RXCSUM; dev->hw_features |= dev->features; dev->vlan_features |= dev->features; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 67b9e81b7c02..176c6b56fdcc 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -253,7 +253,8 @@ #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) -#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff +#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(version) \ + ((version) == MVPP21 ? 0xffff : 0xff) #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) @@ -330,6 +331,7 @@ #define MVPP2_TXP_SCHED_ENQ_MASK 0xff #define MVPP2_TXP_SCHED_DISQ_OFFSET 8 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 +#define MVPP2_TXP_SCHED_FIXED_PRIO_REG 0x8014 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 #define MVPP2_TXP_SCHED_MTU_REG 0x801c #define MVPP2_TXP_MTU_MAX 0x7FFFF @@ -613,6 +615,7 @@ /* Port flags */ #define MVPP2_F_LOOPBACK BIT(0) +#define MVPP2_F_DT_COMPAT BIT(1) /* Marvell tag types */ enum mvpp2_tag_type { @@ -662,7 +665,7 @@ enum mvpp2_prs_l3_cast { #define MVPP21_ADDR_SPACE_SZ 0 #define MVPP22_ADDR_SPACE_SZ SZ_64K -#define MVPP2_MAX_THREADS 8 +#define MVPP2_MAX_THREADS 9 #define MVPP2_MAX_QVECS MVPP2_MAX_THREADS /* GMAC MIB Counters register definitions */ @@ -734,6 +737,11 @@ struct mvpp2 { int port_count; struct mvpp2_port *port_list[MVPP2_MAX_PORTS]; + /* Number of Tx threads used */ + unsigned int nthreads; + /* Map of threads needing locking */ + unsigned long lock_map; + /* Aggregated TXQs */ struct mvpp2_tx_queue *aggr_txqs; @@ -823,6 +831,12 @@ struct mvpp2_port { /* Per-CPU port control */ struct mvpp2_port_pcpu __percpu *pcpu; + /* Protect the BM refills and the Tx paths when a thread is used on more + * than a single CPU. + */ + spinlock_t bm_lock[MVPP2_MAX_THREADS]; + spinlock_t tx_lock[MVPP2_MAX_THREADS]; + /* Flags */ unsigned long flags; @@ -969,7 +983,7 @@ struct mvpp2_txq_pcpu_buf { /* Per-CPU Tx queue control */ struct mvpp2_txq_pcpu { - int cpu; + unsigned int thread; /* Number of Tx DMA descriptors in the descriptor ring */ int size; @@ -1095,14 +1109,6 @@ struct mvpp2_bm_pool { void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data); u32 mvpp2_read(struct mvpp2 *priv, u32 offset); -u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset); - -void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, u32 offset, u32 data); -u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset); - -void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset, - u32 data); - void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name); void mvpp2_dbgfs_cleanup(struct mvpp2 *priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index a74002b43b51..14f9679c957c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -82,13 +82,19 @@ u32 mvpp2_read(struct mvpp2 *priv, u32 offset) return readl(priv->swth_base[0] + offset); } -u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) +static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) { return readl_relaxed(priv->swth_base[0] + offset); } + +static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) +{ + return cpu % priv->nthreads; +} + /* These accessors should be used to access: * - * - per-CPU registers, where each CPU has its own copy of the + * - per-thread registers, where each thread has its own copy of the * register. * * MVPP2_BM_VIRT_ALLOC_REG @@ -104,8 +110,8 @@ u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) * MVPP2_TXQ_SENT_REG * MVPP2_RXQ_NUM_REG * - * - global registers that must be accessed through a specific CPU - * window, because they are related to an access to a per-CPU + * - global registers that must be accessed through a specific thread + * window, because they are related to an access to a per-thread * register * * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) @@ -122,28 +128,28 @@ u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) */ -void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, +static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread, u32 offset, u32 data) { - writel(data, priv->swth_base[cpu] + offset); + writel(data, priv->swth_base[thread] + offset); } -u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, +static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, u32 offset) { - return readl(priv->swth_base[cpu] + offset); + return readl(priv->swth_base[thread] + offset); } -void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, +static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread, u32 offset, u32 data) { - writel_relaxed(data, priv->swth_base[cpu] + offset); + writel_relaxed(data, priv->swth_base[thread] + offset); } -static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu, +static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, u32 offset) { - return readl_relaxed(priv->swth_base[cpu] + offset); + return readl_relaxed(priv->swth_base[thread] + offset); } static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, @@ -385,17 +391,17 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, dma_addr_t *dma_addr, phys_addr_t *phys_addr) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); - *dma_addr = mvpp2_percpu_read(priv, cpu, + *dma_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); - *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG); + *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG); if (priv->hw_version == MVPP22) { u32 val; u32 dma_addr_highbits, phys_addr_highbits; - val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC); + val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC); dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; @@ -626,7 +632,11 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, dma_addr_t buf_dma_addr, phys_addr_t buf_phys_addr) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + unsigned long flags = 0; + + if (test_bit(thread, &port->priv->lock_map)) + spin_lock_irqsave(&port->bm_lock[thread], flags); if (port->priv->hw_version == MVPP22) { u32 val = 0; @@ -640,7 +650,7 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; - mvpp2_percpu_write_relaxed(port->priv, cpu, + mvpp2_thread_write_relaxed(port->priv, thread, MVPP22_BM_ADDR_HIGH_RLS_REG, val); } @@ -649,11 +659,14 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, * descriptor. Instead of storing the virtual address, we * store the physical address */ - mvpp2_percpu_write_relaxed(port->priv, cpu, + mvpp2_thread_write_relaxed(port->priv, thread, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); - mvpp2_percpu_write_relaxed(port->priv, cpu, + mvpp2_thread_write_relaxed(port->priv, thread, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); + if (test_bit(thread, &port->priv->lock_map)) + spin_unlock_irqrestore(&port->bm_lock[thread], flags); + put_cpu(); } @@ -886,7 +899,7 @@ static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); } -/* Mask the current CPU's Rx/Tx interrupts +/* Mask the current thread's Rx/Tx interrupts * Called by on_each_cpu(), guaranteed to run with migration disabled, * using smp_processor_id() is OK. */ @@ -894,11 +907,16 @@ static void mvpp2_interrupts_mask(void *arg) { struct mvpp2_port *port = arg; - mvpp2_percpu_write(port->priv, smp_processor_id(), + /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); } -/* Unmask the current CPU's Rx/Tx interrupts. +/* Unmask the current thread's Rx/Tx interrupts. * Called by on_each_cpu(), guaranteed to run with migration disabled, * using smp_processor_id() is OK. */ @@ -907,12 +925,17 @@ static void mvpp2_interrupts_unmask(void *arg) struct mvpp2_port *port = arg; u32 val; + /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + val = MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); if (port->has_tx_irqs) val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; - mvpp2_percpu_write(port->priv, smp_processor_id(), + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_ISR_RX_TX_MASK_REG(port->id), val); } @@ -928,7 +951,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) if (mask) val = 0; else - val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22); for (i = 0; i < port->nqvecs; i++) { struct mvpp2_queue_vector *v = port->qvecs + i; @@ -936,7 +959,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) if (v->type != MVPP2_QUEUE_VECTOR_SHARED) continue; - mvpp2_percpu_write(port->priv, v->sw_thread_id, + mvpp2_thread_write(port->priv, v->sw_thread_id, MVPP2_ISR_RX_TX_MASK_REG(port->id), val); } } @@ -1425,6 +1448,9 @@ static void mvpp2_defaults_set(struct mvpp2_port *port) tx_port_num); mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); + /* Set TXQ scheduling to Round-Robin */ + mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0); + /* Close bandwidth for all queues */ for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { ptxq = mvpp2_txq_phys(port->id, queue); @@ -1624,7 +1650,8 @@ mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) { /* aggregated access - relevant TXQ number is written in TX desc */ - mvpp2_percpu_write(port->priv, smp_processor_id(), + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_AGGR_TXQ_UPDATE_REG, pending); } @@ -1634,14 +1661,15 @@ static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) * Called only from mvpp2_tx(), so migration is disabled, using * smp_processor_id() is OK. */ -static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, +static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, struct mvpp2_tx_queue *aggr_txq, int num) { if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { /* Update number of occupied aggregated Tx descriptors */ - int cpu = smp_processor_id(); - u32 val = mvpp2_read_relaxed(priv, - MVPP2_AGGR_TXQ_STATUS_REG(cpu)); + unsigned int thread = + mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + u32 val = mvpp2_read_relaxed(port->priv, + MVPP2_AGGR_TXQ_STATUS_REG(thread)); aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; @@ -1657,16 +1685,17 @@ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, * only by mvpp2_tx(), so migration is disabled, using * smp_processor_id() is OK. */ -static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, +static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, int num) { + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2 *priv = port->priv; u32 val; - int cpu = smp_processor_id(); val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; - mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); + mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val); - val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); + val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG); return val & MVPP2_TXQ_RSVD_RSLT_MASK; } @@ -1674,12 +1703,13 @@ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, /* Check if there are enough reserved descriptors for transmission. * If not, request chunk of reserved descriptors and check again. */ -static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, +static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_txq_pcpu *txq_pcpu, int num) { - int req, cpu, desc_count; + int req, desc_count; + unsigned int thread; if (txq_pcpu->reserved_num >= num) return 0; @@ -1690,10 +1720,10 @@ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, desc_count = 0; /* Compute total of used descriptors */ - for_each_present_cpu(cpu) { + for (thread = 0; thread < port->priv->nthreads; thread++) { struct mvpp2_txq_pcpu *txq_pcpu_aux; - txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread); desc_count += txq_pcpu_aux->count; desc_count += txq_pcpu_aux->reserved_num; } @@ -1702,10 +1732,10 @@ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, desc_count += req; if (desc_count > - (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK))) + (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK))) return -ENOMEM; - txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); + txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req); /* OK, the descriptor could have been updated: check again. */ if (txq_pcpu->reserved_num < num) @@ -1759,7 +1789,7 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto, /* Get number of sent descriptors and decrement counter. * The number of sent descriptors is returned. - * Per-CPU access + * Per-thread access * * Called only from mvpp2_txq_done(), called from mvpp2_tx() * (migration disabled) and from the TX completion tasklet (migration @@ -1771,7 +1801,8 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, u32 val; /* Reading status reg resets transmitted descriptor counter */ - val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(), + val = mvpp2_thread_read_relaxed(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_TXQ_SENT_REG(txq->id)); return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> @@ -1786,10 +1817,15 @@ static void mvpp2_txq_sent_counter_clear(void *arg) struct mvpp2_port *port = arg; int queue; + /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + for (queue = 0; queue < port->ntxqs; queue++) { int id = port->txqs[queue]->id; - mvpp2_percpu_read(port->priv, smp_processor_id(), + mvpp2_thread_read(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_TXQ_SENT_REG(id)); } } @@ -1849,13 +1885,13 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal); put_cpu(); @@ -1865,15 +1901,15 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); u32 val; if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val); put_cpu(); } @@ -1974,7 +2010,7 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); int tx_done; - if (txq_pcpu->cpu != smp_processor_id()) + if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id())) netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); tx_done = mvpp2_txq_sent_desc_proc(port, txq); @@ -1990,7 +2026,7 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, } static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, - int cpu) + unsigned int thread) { struct mvpp2_tx_queue *txq; struct mvpp2_txq_pcpu *txq_pcpu; @@ -2001,7 +2037,7 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, if (!txq) break; - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); if (txq_pcpu->count) { mvpp2_txq_done(port, txq, txq_pcpu); @@ -2017,8 +2053,8 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, /* Allocate and initialize descriptors for aggr TXQ */ static int mvpp2_aggr_txq_init(struct platform_device *pdev, - struct mvpp2_tx_queue *aggr_txq, int cpu, - struct mvpp2 *priv) + struct mvpp2_tx_queue *aggr_txq, + unsigned int thread, struct mvpp2 *priv) { u32 txq_dma; @@ -2033,7 +2069,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, /* Aggr TXQ no reset WA */ aggr_txq->next_desc_to_proc = mvpp2_read(priv, - MVPP2_AGGR_TXQ_INDEX_REG(cpu)); + MVPP2_AGGR_TXQ_INDEX_REG(thread)); /* Set Tx descriptors queue starting address indirect * access @@ -2044,8 +2080,8 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, txq_dma = aggr_txq->descs_dma >> MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma); + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread), MVPP2_AGGR_TXQ_SIZE); return 0; @@ -2056,8 +2092,8 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { + unsigned int thread; u32 rxq_dma; - int cpu; rxq->size = port->rx_ring_size; @@ -2074,15 +2110,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); /* Set Rx descriptors queue starting address - indirect access */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); if (port->priv->hw_version == MVPP21) rxq_dma = rxq->descs_dma; else rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0); put_cpu(); /* Set Offset */ @@ -2127,7 +2163,7 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, static void mvpp2_rxq_deinit(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - int cpu; + unsigned int thread; mvpp2_rxq_drop_pkts(port, rxq); @@ -2146,10 +2182,10 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port, * free descriptor number */ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0); put_cpu(); } @@ -2158,7 +2194,8 @@ static int mvpp2_txq_init(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { u32 val; - int cpu, desc, desc_per_txq, tx_port_num; + unsigned int thread; + int desc, desc_per_txq, tx_port_num; struct mvpp2_txq_pcpu *txq_pcpu; txq->size = port->tx_ring_size; @@ -2173,18 +2210,18 @@ static int mvpp2_txq_init(struct mvpp2_port *port, txq->last_desc = txq->size - 1; /* Set Tx descriptors queue starting address - indirect access */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, txq->size & MVPP2_TXQ_DESC_SIZE_MASK); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG); + val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); val &= ~MVPP2_TXQ_PENDING_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val); /* Calculate base address in prefetch buffer. We reserve 16 descriptors * for each existing TXQ. @@ -2195,7 +2232,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + (txq->log_id * desc_per_txq); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); put_cpu(); @@ -2214,8 +2251,8 @@ static int mvpp2_txq_init(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), val); - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); txq_pcpu->size = txq->size; txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, sizeof(*txq_pcpu->buffs), @@ -2249,10 +2286,10 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { struct mvpp2_txq_pcpu *txq_pcpu; - int cpu; + unsigned int thread; - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); kfree(txq_pcpu->buffs); if (txq_pcpu->tso_headers) @@ -2278,10 +2315,10 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); /* Set Tx descriptors queue starting address and size */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0); put_cpu(); } @@ -2289,14 +2326,14 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { struct mvpp2_txq_pcpu *txq_pcpu; - int delay, pending, cpu; + int delay, pending; + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); u32 val; - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); val |= MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); /* The napi queue has been stopped so wait for all packets * to be transmitted. @@ -2312,17 +2349,17 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) mdelay(1); delay++; - pending = mvpp2_percpu_read(port->priv, cpu, + pending = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); pending &= MVPP2_TXQ_PENDING_MASK; } while (pending); val &= ~MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); put_cpu(); - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); /* Release all packets */ mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); @@ -2389,13 +2426,17 @@ err_cleanup: static int mvpp2_setup_txqs(struct mvpp2_port *port) { struct mvpp2_tx_queue *txq; - int queue, err; + int queue, err, cpu; for (queue = 0; queue < port->ntxqs; queue++) { txq = port->txqs[queue]; err = mvpp2_txq_init(port, txq); if (err) goto err_cleanup; + + /* Assign this queue to a CPU */ + cpu = queue % num_present_cpus(); + netif_set_xps_queue(port->dev, cpumask_of(cpu), queue); } if (port->has_tx_irqs) { @@ -2503,16 +2544,20 @@ static void mvpp2_tx_proc_cb(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + struct mvpp2_port_pcpu *port_pcpu; unsigned int tx_todo, cause; + port_pcpu = per_cpu_ptr(port->pcpu, + mvpp2_cpu_to_thread(port->priv, smp_processor_id())); + if (!netif_running(dev)) return; port_pcpu->timer_scheduled = false; /* Process all the Tx queues */ cause = (1 << port->ntxqs) - 1; - tx_todo = mvpp2_tx_done(port, cause, smp_processor_id()); + tx_todo = mvpp2_tx_done(port, cause, + mvpp2_cpu_to_thread(port->priv, smp_processor_id())); /* Set the timer in case not all the packets were processed */ if (tx_todo) @@ -2729,7 +2774,8 @@ static inline void tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_tx_desc *desc) { - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); dma_addr_t buf_dma_addr = mvpp2_txdesc_dma_addr_get(port, desc); @@ -2746,7 +2792,8 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, struct mvpp2_tx_queue *aggr_txq, struct mvpp2_tx_queue *txq) { - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); struct mvpp2_tx_desc *tx_desc; int i; dma_addr_t buf_dma_addr; @@ -2865,9 +2912,8 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, int i, len, descs = 0; /* Check number of available descriptors */ - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, - tso_count_descs(skb)) || - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu, + if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, tso_count_descs(skb))) return 0; @@ -2907,21 +2953,28 @@ release: } /* Main tx processing */ -static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_tx_queue *txq, *aggr_txq; struct mvpp2_txq_pcpu *txq_pcpu; struct mvpp2_tx_desc *tx_desc; dma_addr_t buf_dma_addr; + unsigned long flags = 0; + unsigned int thread; int frags = 0; u16 txq_id; u32 tx_cmd; + thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + txq_id = skb_get_queue_mapping(skb); txq = port->txqs[txq_id]; - txq_pcpu = this_cpu_ptr(txq->pcpu); - aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + aggr_txq = &port->priv->aggr_txqs[thread]; + + if (test_bit(thread, &port->priv->lock_map)) + spin_lock_irqsave(&port->tx_lock[thread], flags); if (skb_is_gso(skb)) { frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); @@ -2930,9 +2983,8 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) frags = skb_shinfo(skb)->nr_frags + 1; /* Check number of available descriptors */ - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) || - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, - txq_pcpu, frags)) { + if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) || + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) { frags = 0; goto out; } @@ -2974,7 +3026,7 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) out: if (frags > 0) { - struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); + struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); txq_pcpu->reserved_num -= frags; @@ -3004,11 +3056,14 @@ out: /* Set the timer in case not all frags were processed */ if (!port->has_tx_irqs && txq_pcpu->count <= frags && txq_pcpu->count > 0) { - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread); mvpp2_timer_set(port_pcpu); } + if (test_bit(thread, &port->priv->lock_map)) + spin_unlock_irqrestore(&port->tx_lock[thread], flags); + return NETDEV_TX_OK; } @@ -3028,7 +3083,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) int rx_done = 0; struct mvpp2_port *port = netdev_priv(napi->dev); struct mvpp2_queue_vector *qv; - int cpu = smp_processor_id(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); qv = container_of(napi, struct mvpp2_queue_vector, napi); @@ -3042,7 +3097,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) * * Each CPU has its own Rx/Tx cause register */ - cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id, + cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id, MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; @@ -3051,7 +3106,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) /* Clear the cause register */ mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); - mvpp2_percpu_write(port->priv, cpu, + mvpp2_thread_write(port->priv, thread, MVPP2_ISR_RX_TX_CAUSE_REG(port->id), cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); } @@ -3065,7 +3120,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) } /* Process RX packets */ - cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + cause_rx = cause_rx_tx & + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); cause_rx <<= qv->first_rxq; cause_rx |= qv->pending_cause_rx; while (cause_rx && budget > 0) { @@ -3140,14 +3196,13 @@ static void mvpp2_start_dev(struct mvpp2_port *port) for (i = 0; i < port->nqvecs; i++) napi_enable(&port->qvecs[i].napi); - /* Enable interrupts on all CPUs */ + /* Enable interrupts on all threads */ mvpp2_interrupts_enable(port); if (port->priv->hw_version == MVPP22) mvpp22_mode_reconfigure(port); if (port->phylink) { - netif_carrier_off(port->dev); phylink_start(port->phylink); } else { /* Phylink isn't used as of now for ACPI, so the MAC has to be @@ -3170,7 +3225,7 @@ static void mvpp2_stop_dev(struct mvpp2_port *port) { int i; - /* Disable interrupts on all CPUs */ + /* Disable interrupts on all threads */ mvpp2_interrupts_disable(port); for (i = 0; i < port->nqvecs; i++) @@ -3250,9 +3305,18 @@ static int mvpp2_irqs_init(struct mvpp2_port *port) if (err) goto err; - if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) - irq_set_affinity_hint(qv->irq, - cpumask_of(qv->sw_thread_id)); + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { + unsigned long mask = 0; + unsigned int cpu; + + for_each_present_cpu(cpu) { + if (mvpp2_cpu_to_thread(port->priv, cpu) == + qv->sw_thread_id) + mask |= BIT(cpu); + } + + irq_set_affinity_hint(qv->irq, to_cpumask(&mask)); + } } return 0; @@ -3396,11 +3460,11 @@ static int mvpp2_stop(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_port_pcpu *port_pcpu; - int cpu; + unsigned int thread; mvpp2_stop_dev(port); - /* Mask interrupts on all CPUs */ + /* Mask interrupts on all threads */ on_each_cpu(mvpp2_interrupts_mask, port, 1); mvpp2_shared_interrupt_mask_unmask(port, true); @@ -3411,8 +3475,8 @@ static int mvpp2_stop(struct net_device *dev) mvpp2_irqs_deinit(port); if (!port->has_tx_irqs) { - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + port_pcpu = per_cpu_ptr(port->pcpu, thread); hrtimer_cancel(&port_pcpu->tx_done_timer); port_pcpu->timer_scheduled = false; @@ -3557,7 +3621,7 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mvpp2_port *port = netdev_priv(dev); unsigned int start; - int cpu; + unsigned int cpu; for_each_possible_cpu(cpu) { struct mvpp2_pcpu_stats *cpu_stats; @@ -3984,12 +4048,18 @@ static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, struct device_node *port_node) { + struct mvpp2 *priv = port->priv; struct mvpp2_queue_vector *v; int i, ret; - port->nqvecs = num_possible_cpus(); - if (queue_mode == MVPP2_QDIST_SINGLE_MODE) - port->nqvecs += 1; + switch (queue_mode) { + case MVPP2_QDIST_SINGLE_MODE: + port->nqvecs = priv->nthreads + 1; + break; + case MVPP2_QDIST_MULTI_MODE: + port->nqvecs = priv->nthreads; + break; + } for (i = 0; i < port->nqvecs; i++) { char irqname[16]; @@ -4001,7 +4071,10 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, v->sw_thread_id = i; v->sw_thread_mask = BIT(i); - snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + if (port->flags & MVPP2_F_DT_COMPAT) + snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + else + snprintf(irqname, sizeof(irqname), "hif%d", i); if (queue_mode == MVPP2_QDIST_MULTI_MODE) { v->first_rxq = i * MVPP2_DEFAULT_RXQ; @@ -4011,7 +4084,9 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, v->first_rxq = 0; v->nrxqs = port->nrxqs; v->type = MVPP2_QUEUE_VECTOR_SHARED; - strncpy(irqname, "rx-shared", sizeof(irqname)); + + if (port->flags & MVPP2_F_DT_COMPAT) + strncpy(irqname, "rx-shared", sizeof(irqname)); } if (port_node) @@ -4088,7 +4163,8 @@ static int mvpp2_port_init(struct mvpp2_port *port) struct device *dev = port->dev->dev.parent; struct mvpp2 *priv = port->priv; struct mvpp2_txq_pcpu *txq_pcpu; - int queue, cpu, err; + unsigned int thread; + int queue, err; /* Checks for hardware constraints */ if (port->first_rxq + port->nrxqs > @@ -4132,9 +4208,9 @@ static int mvpp2_port_init(struct mvpp2_port *port) txq->id = queue_phy_id; txq->log_id = queue; txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - txq_pcpu->cpu = cpu; + for (thread = 0; thread < priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + txq_pcpu->thread = thread; } port->txqs[queue] = txq; @@ -4207,24 +4283,51 @@ err_free_percpu: return err; } -/* Checks if the port DT description has the TX interrupts - * described. On PPv2.1, there are no such interrupts. On PPv2.2, - * there are available, but we need to keep support for old DTs. +static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node, + unsigned long *flags) +{ + char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3" }; + int i; + + for (i = 0; i < 5; i++) + if (of_property_match_string(port_node, "interrupt-names", + irqs[i]) < 0) + return false; + + *flags |= MVPP2_F_DT_COMPAT; + return true; +} + +/* Checks if the port dt description has the required Tx interrupts: + * - PPv2.1: there are no such interrupts. + * - PPv2.2: + * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3] + * - The new ones have: "hifX" with X in [0..8] + * + * All those variants are supported to keep the backward compatibility. */ -static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, - struct device_node *port_node) +static bool mvpp2_port_has_irqs(struct mvpp2 *priv, + struct device_node *port_node, + unsigned long *flags) { - char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", - "tx-cpu2", "tx-cpu3" }; - int ret, i; + char name[5]; + int i; + + /* ACPI */ + if (!port_node) + return true; if (priv->hw_version == MVPP21) return false; - for (i = 0; i < 5; i++) { - ret = of_property_match_string(port_node, "interrupt-names", - irqs[i]); - if (ret < 0) + if (mvpp22_port_has_legacy_tx_irqs(port_node, flags)) + return true; + + for (i = 0; i < MVPP2_MAX_THREADS; i++) { + snprintf(name, 5, "hif%d", i); + if (of_property_match_string(port_node, "interrupt-names", + name) < 0) return false; } @@ -4601,23 +4704,21 @@ static int mvpp2_port_probe(struct platform_device *pdev, struct resource *res; struct phylink *phylink; char *mac_from = ""; - unsigned int ntxqs, nrxqs; + unsigned int ntxqs, nrxqs, thread; + unsigned long flags = 0; bool has_tx_irqs; u32 id; int features; int phy_mode; - int err, i, cpu; + int err, i; - if (port_node) { - has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); - } else { - has_tx_irqs = true; - queue_mode = MVPP2_QDIST_MULTI_MODE; + has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags); + if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) { + dev_err(&pdev->dev, + "not enough IRQs to support multi queue mode\n"); + return -EINVAL; } - if (!has_tx_irqs) - queue_mode = MVPP2_QDIST_SINGLE_MODE; - ntxqs = MVPP2_MAX_TXQ; if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); @@ -4665,6 +4766,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->nrxqs = nrxqs; port->priv = priv; port->has_tx_irqs = has_tx_irqs; + port->flags = flags; err = mvpp2_queue_vectors_init(port, port_node); if (err) @@ -4761,8 +4863,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, } if (!port->has_tx_irqs) { - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + for (thread = 0; thread < priv->nthreads; thread++) { + port_pcpu = per_cpu_ptr(port->pcpu, thread); hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); @@ -5046,13 +5148,13 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) } /* Allocate and initialize aggregated TXQs */ - priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), + priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS, sizeof(*priv->aggr_txqs), GFP_KERNEL); if (!priv->aggr_txqs) return -ENOMEM; - for_each_present_cpu(i) { + for (i = 0; i < MVPP2_MAX_THREADS; i++) { priv->aggr_txqs[i].id = i; priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); @@ -5099,7 +5201,7 @@ static int mvpp2_probe(struct platform_device *pdev) struct mvpp2 *priv; struct resource *res; void __iomem *base; - int i; + int i, shared; int err; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); @@ -5164,6 +5266,15 @@ static int mvpp2_probe(struct platform_device *pdev) mvpp2_setup_bm_pool(); + + priv->nthreads = min_t(unsigned int, num_present_cpus(), + MVPP2_MAX_THREADS); + + shared = num_present_cpus() - priv->nthreads; + if (shared > 0) + bitmap_fill(&priv->lock_map, + min_t(int, shared, MVPP2_MAX_THREADS)); + for (i = 0; i < MVPP2_MAX_THREADS; i++) { u32 addr_space_sz; @@ -5338,7 +5449,7 @@ static int mvpp2_remove(struct platform_device *pdev) mvpp2_bm_pool_destroy(pdev, priv, bm_pool); } - for_each_present_cpu(i) { + for (i = 0; i < MVPP2_MAX_THREADS; i++) { struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; dma_free_coherent(&pdev->dev, diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig new file mode 100644 index 000000000000..35827bdf1878 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -0,0 +1,17 @@ +# +# Marvell OcteonTX2 drivers configuration +# + +config OCTEONTX2_MBOX + tristate + +config OCTEONTX2_AF + tristate "Marvell OcteonTX2 RVU Admin Function driver" + select OCTEONTX2_MBOX + depends on (64BIT && COMPILE_TEST) || ARM64 + depends on PCI + help + This driver supports Marvell's OcteonTX2 Resource Virtualization + Unit's admin function manager which manages all RVU HW resources + and provides a medium to other PF/VFs to configure HW. Should be + enabled for other RVU device drivers to work. diff --git a/drivers/net/ethernet/marvell/octeontx2/Makefile b/drivers/net/ethernet/marvell/octeontx2/Makefile new file mode 100644 index 000000000000..e579dcd54c97 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Marvell OcteonTX2 device drivers. +# + +obj-$(CONFIG_OCTEONTX2_AF) += af/ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile new file mode 100644 index 000000000000..eaac26424486 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Marvell's OcteonTX2 RVU Admin Function driver +# + +obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o +obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o + +octeontx2_mbox-y := mbox.o +octeontx2_af-y := cgx.o rvu.o rvu_cgx.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c new file mode 100644 index 000000000000..2cf8e402c31e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -0,0 +1,502 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 CGX driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/acpi.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/phy.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> + +#include "cgx.h" + +#define DRV_NAME "octeontx2-cgx" +#define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver" + +/** + * struct lmac + * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion + * @cmd_lock: Lock to serialize the command interface + * @resp: command response + * @event_cb: callback for linkchange events + * @cmd_pend: flag set before new command is started + * flag cleared after command response is received + * @cgx: parent cgx port + * @lmac_id: lmac port id + * @name: lmac port name + */ +struct lmac { + wait_queue_head_t wq_cmd_cmplt; + struct mutex cmd_lock; + u64 resp; + struct cgx_event_cb event_cb; + bool cmd_pend; + struct cgx *cgx; + u8 lmac_id; + char *name; +}; + +struct cgx { + void __iomem *reg_base; + struct pci_dev *pdev; + u8 cgx_id; + u8 lmac_count; + struct lmac *lmac_idmap[MAX_LMAC_PER_CGX]; + struct list_head cgx_list; +}; + +static LIST_HEAD(cgx_list); + +/* CGX PHY management internal APIs */ +static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); + +/* Supported devices */ +static const struct pci_device_id cgx_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, + { 0, } /* end of table */ +}; + +MODULE_DEVICE_TABLE(pci, cgx_id_table); + +static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) +{ + writeq(val, cgx->reg_base + (lmac << 18) + offset); +} + +static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) +{ + return readq(cgx->reg_base + (lmac << 18) + offset); +} + +static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) +{ + if (!cgx || lmac_id >= MAX_LMAC_PER_CGX) + return NULL; + + return cgx->lmac_idmap[lmac_id]; +} + +int cgx_get_cgx_cnt(void) +{ + struct cgx *cgx_dev; + int count = 0; + + list_for_each_entry(cgx_dev, &cgx_list, cgx_list) + count++; + + return count; +} +EXPORT_SYMBOL(cgx_get_cgx_cnt); + +int cgx_get_lmac_cnt(void *cgxd) +{ + struct cgx *cgx = cgxd; + + if (!cgx) + return -ENODEV; + + return cgx->lmac_count; +} +EXPORT_SYMBOL(cgx_get_lmac_cnt); + +void *cgx_get_pdata(int cgx_id) +{ + struct cgx *cgx_dev; + + list_for_each_entry(cgx_dev, &cgx_list, cgx_list) { + if (cgx_dev->cgx_id == cgx_id) + return cgx_dev; + } + return NULL; +} +EXPORT_SYMBOL(cgx_get_pdata); + +/* CGX Firmware interface low level support */ +static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac) +{ + struct cgx *cgx = lmac->cgx; + struct device *dev; + int err = 0; + u64 cmd; + + /* Ensure no other command is in progress */ + err = mutex_lock_interruptible(&lmac->cmd_lock); + if (err) + return err; + + /* Ensure command register is free */ + cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG); + if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) { + err = -EBUSY; + goto unlock; + } + + /* Update ownership in command request */ + req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req); + + /* Mark this lmac as pending, before we start */ + lmac->cmd_pend = true; + + /* Start command in hardware */ + cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req); + + /* Ensure command is completed without errors */ + if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend, + msecs_to_jiffies(CGX_CMD_TIMEOUT))) { + dev = &cgx->pdev->dev; + dev_err(dev, "cgx port %d:%d cmd timeout\n", + cgx->cgx_id, lmac->lmac_id); + err = -EIO; + goto unlock; + } + + /* we have a valid command response */ + smp_rmb(); /* Ensure the latest updates are visible */ + *resp = lmac->resp; + +unlock: + mutex_unlock(&lmac->cmd_lock); + + return err; +} + +static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp, + struct cgx *cgx, int lmac_id) +{ + struct lmac *lmac; + int err; + + lmac = lmac_pdata(lmac_id, cgx); + if (!lmac) + return -ENODEV; + + err = cgx_fwi_cmd_send(req, resp, lmac); + + /* Check for valid response */ + if (!err) { + if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL) + return -EIO; + else + return 0; + } + + return err; +} + +/* Hardware event handlers */ +static inline void cgx_link_change_handler(u64 lstat, + struct lmac *lmac) +{ + struct cgx *cgx = lmac->cgx; + struct cgx_link_event event; + struct device *dev; + + dev = &cgx->pdev->dev; + + event.lstat.link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); + event.lstat.full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); + event.lstat.speed = FIELD_GET(RESP_LINKSTAT_SPEED, lstat); + event.lstat.err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat); + + event.cgx_id = cgx->cgx_id; + event.lmac_id = lmac->lmac_id; + + if (!lmac->event_cb.notify_link_chg) { + dev_dbg(dev, "cgx port %d:%d Link change handler null", + cgx->cgx_id, lmac->lmac_id); + if (event.lstat.err_type != CGX_ERR_NONE) { + dev_err(dev, "cgx port %d:%d Link error %d\n", + cgx->cgx_id, lmac->lmac_id, + event.lstat.err_type); + } + dev_info(dev, "cgx port %d:%d Link status %s, speed %x\n", + cgx->cgx_id, lmac->lmac_id, + event.lstat.link_up ? "UP" : "DOWN", + event.lstat.speed); + return; + } + + if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data)) + dev_err(dev, "event notification failure\n"); +} + +static inline bool cgx_cmdresp_is_linkevent(u64 event) +{ + u8 id; + + id = FIELD_GET(EVTREG_ID, event); + if (id == CGX_CMD_LINK_BRING_UP || + id == CGX_CMD_LINK_BRING_DOWN) + return true; + else + return false; +} + +static inline bool cgx_event_is_linkevent(u64 event) +{ + if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE) + return true; + else + return false; +} + +static irqreturn_t cgx_fwi_event_handler(int irq, void *data) +{ + struct lmac *lmac = data; + struct cgx *cgx; + u64 event; + + cgx = lmac->cgx; + + event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG); + + if (!FIELD_GET(EVTREG_ACK, event)) + return IRQ_NONE; + + switch (FIELD_GET(EVTREG_EVT_TYPE, event)) { + case CGX_EVT_CMD_RESP: + /* Copy the response. Since only one command is active at a + * time, there is no way a response can get overwritten + */ + lmac->resp = event; + /* Ensure response is updated before thread context starts */ + smp_wmb(); + + /* There wont be separate events for link change initiated from + * software; Hence report the command responses as events + */ + if (cgx_cmdresp_is_linkevent(event)) + cgx_link_change_handler(event, lmac); + + /* Release thread waiting for completion */ + lmac->cmd_pend = false; + wake_up_interruptible(&lmac->wq_cmd_cmplt); + break; + case CGX_EVT_ASYNC: + if (cgx_event_is_linkevent(event)) + cgx_link_change_handler(event, lmac); + break; + } + + /* Any new event or command response will be posted by firmware + * only after the current status is acked. + * Ack the interrupt register as well. + */ + cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0); + cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT); + + return IRQ_HANDLED; +} + +/* APIs for PHY management using CGX firmware interface */ + +/* callback registration for hardware events like link change */ +int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id) +{ + struct cgx *cgx = cgxd; + struct lmac *lmac; + + lmac = lmac_pdata(lmac_id, cgx); + if (!lmac) + return -ENODEV; + + lmac->event_cb = *cb; + + return 0; +} +EXPORT_SYMBOL(cgx_lmac_evh_register); + +static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable) +{ + u64 req = 0; + u64 resp; + + if (enable) + req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req); + else + req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req); + + return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); +} +EXPORT_SYMBOL(cgx_fwi_link_change); + +static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) +{ + u64 req = 0; + + req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req); + return cgx_fwi_cmd_generic(req, resp, cgx, 0); +} + +static int cgx_lmac_verify_fwi_version(struct cgx *cgx) +{ + struct device *dev = &cgx->pdev->dev; + int major_ver, minor_ver; + u64 resp; + int err; + + if (!cgx->lmac_count) + return 0; + + err = cgx_fwi_read_version(&resp, cgx); + if (err) + return err; + + major_ver = FIELD_GET(RESP_MAJOR_VER, resp); + minor_ver = FIELD_GET(RESP_MINOR_VER, resp); + dev_dbg(dev, "Firmware command interface version = %d.%d\n", + major_ver, minor_ver); + if (major_ver != CGX_FIRMWARE_MAJOR_VER || + minor_ver != CGX_FIRMWARE_MINOR_VER) + return -EIO; + else + return 0; +} + +static int cgx_lmac_init(struct cgx *cgx) +{ + struct lmac *lmac; + int i, err; + + cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7; + if (cgx->lmac_count > MAX_LMAC_PER_CGX) + cgx->lmac_count = MAX_LMAC_PER_CGX; + + for (i = 0; i < cgx->lmac_count; i++) { + lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL); + if (!lmac) + return -ENOMEM; + lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL); + if (!lmac->name) + return -ENOMEM; + sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); + lmac->lmac_id = i; + lmac->cgx = cgx; + init_waitqueue_head(&lmac->wq_cmd_cmplt); + mutex_init(&lmac->cmd_lock); + err = request_irq(pci_irq_vector(cgx->pdev, + CGX_LMAC_FWI + i * 9), + cgx_fwi_event_handler, 0, lmac->name, lmac); + if (err) + return err; + + /* Enable interrupt */ + cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S, + FW_CGX_INT); + + /* Add reference */ + cgx->lmac_idmap[i] = lmac; + } + + return cgx_lmac_verify_fwi_version(cgx); +} + +static int cgx_lmac_exit(struct cgx *cgx) +{ + struct lmac *lmac; + int i; + + /* Free all lmac related resources */ + for (i = 0; i < cgx->lmac_count; i++) { + lmac = cgx->lmac_idmap[i]; + if (!lmac) + continue; + free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac); + kfree(lmac->name); + kfree(lmac); + } + + return 0; +} + +static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct cgx *cgx; + int err, nvec; + + cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL); + if (!cgx) + return -ENOMEM; + cgx->pdev = pdev; + + pci_set_drvdata(pdev, cgx); + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + pci_set_drvdata(pdev, NULL); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + /* MAP configuration registers */ + cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!cgx->reg_base) { + dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n"); + err = -ENOMEM; + goto err_release_regions; + } + + nvec = CGX_NVEC; + err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); + if (err < 0 || err != nvec) { + dev_err(dev, "Request for %d msix vectors failed, err %d\n", + nvec, err); + goto err_release_regions; + } + + list_add(&cgx->cgx_list, &cgx_list); + cgx->cgx_id = cgx_get_cgx_cnt() - 1; + + err = cgx_lmac_init(cgx); + if (err) + goto err_release_lmac; + + return 0; + +err_release_lmac: + cgx_lmac_exit(cgx); + list_del(&cgx->cgx_list); +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void cgx_remove(struct pci_dev *pdev) +{ + struct cgx *cgx = pci_get_drvdata(pdev); + + cgx_lmac_exit(cgx); + list_del(&cgx->cgx_list); + pci_free_irq_vectors(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +struct pci_driver cgx_driver = { + .name = DRV_NAME, + .id_table = cgx_id_table, + .probe = cgx_probe, + .remove = cgx_remove, +}; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h new file mode 100644 index 000000000000..a2a7a6d72d32 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 CGX driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CGX_H +#define CGX_H + +#include "cgx_fw_if.h" + + /* PCI device IDs */ +#define PCI_DEVID_OCTEONTX2_CGX 0xA059 + +/* PCI BAR nos */ +#define PCI_CFG_REG_BAR_NUM 0 + +#define MAX_CGX 3 +#define MAX_LMAC_PER_CGX 4 +#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) + +/* Registers */ +#define CGXX_CMRX_INT 0x040 +#define FW_CGX_INT BIT_ULL(1) +#define CGXX_CMRX_INT_ENA_W1S 0x058 +#define CGXX_CMRX_RX_ID_MAP 0x060 +#define CGXX_CMRX_RX_LMACS 0x128 +#define CGXX_SCRATCH0_REG 0x1050 +#define CGXX_SCRATCH1_REG 0x1058 +#define CGX_CONST 0x2000 + +#define CGX_COMMAND_REG CGXX_SCRATCH1_REG +#define CGX_EVENT_REG CGXX_SCRATCH0_REG +#define CGX_CMD_TIMEOUT 2200 /* msecs */ + +#define CGX_NVEC 37 +#define CGX_LMAC_FWI 0 + +struct cgx_link_event { + struct cgx_lnk_sts lstat; + u8 cgx_id; + u8 lmac_id; +}; + +/** + * struct cgx_event_cb + * @notify_link_chg: callback for link change notification + * @data: data passed to callback function + */ +struct cgx_event_cb { + int (*notify_link_chg)(struct cgx_link_event *event, void *data); + void *data; +}; + +extern struct pci_driver cgx_driver; + +int cgx_get_cgx_cnt(void); +int cgx_get_lmac_cnt(void *cgxd); +void *cgx_get_pdata(int cgx_id); +int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id); +#endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h new file mode 100644 index 000000000000..fa17af3f4ba7 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 CGX driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __CGX_FW_INTF_H__ +#define __CGX_FW_INTF_H__ + +#include <linux/bitops.h> +#include <linux/bitfield.h> + +#define CGX_FIRMWARE_MAJOR_VER 1 +#define CGX_FIRMWARE_MINOR_VER 0 + +#define CGX_EVENT_ACK 1UL + +/* CGX error types. set for cmd response status as CGX_STAT_FAIL */ +enum cgx_error_type { + CGX_ERR_NONE, + CGX_ERR_LMAC_NOT_ENABLED, + CGX_ERR_LMAC_MODE_INVALID, + CGX_ERR_REQUEST_ID_INVALID, + CGX_ERR_PREV_ACK_NOT_CLEAR, + CGX_ERR_PHY_LINK_DOWN, + CGX_ERR_PCS_RESET_FAIL, + CGX_ERR_AN_CPT_FAIL, + CGX_ERR_TX_NOT_IDLE, + CGX_ERR_RX_NOT_IDLE, + CGX_ERR_SPUX_BR_BLKLOCK_FAIL, + CGX_ERR_SPUX_RX_ALIGN_FAIL, + CGX_ERR_SPUX_TX_FAULT, + CGX_ERR_SPUX_RX_FAULT, + CGX_ERR_SPUX_RESET_FAIL, + CGX_ERR_SPUX_AN_RESET_FAIL, + CGX_ERR_SPUX_USX_AN_RESET_FAIL, + CGX_ERR_SMUX_RX_LINK_NOT_OK, + CGX_ERR_PCS_RECV_LINK_FAIL, + CGX_ERR_TRAINING_FAIL, + CGX_ERR_RX_EQU_FAIL, + CGX_ERR_SPUX_BER_FAIL, + CGX_ERR_SPUX_RSFEC_ALGN_FAIL, /* = 22 */ +}; + +/* LINK speed types */ +enum cgx_link_speed { + CGX_LINK_NONE, + CGX_LINK_10M, + CGX_LINK_100M, + CGX_LINK_1G, + CGX_LINK_2HG, + CGX_LINK_5G, + CGX_LINK_10G, + CGX_LINK_20G, + CGX_LINK_25G, + CGX_LINK_40G, + CGX_LINK_50G, + CGX_LINK_100G, + CGX_LINK_SPEED_MAX, +}; + +/* REQUEST ID types. Input to firmware */ +enum cgx_cmd_id { + CGX_CMD_NONE, + CGX_CMD_GET_FW_VER, + CGX_CMD_GET_MAC_ADDR, + CGX_CMD_SET_MTU, + CGX_CMD_GET_LINK_STS, /* optional to user */ + CGX_CMD_LINK_BRING_UP, + CGX_CMD_LINK_BRING_DOWN, + CGX_CMD_INTERNAL_LBK, + CGX_CMD_EXTERNAL_LBK, + CGX_CMD_HIGIG, + CGX_CMD_LINK_STATE_CHANGE, + CGX_CMD_MODE_CHANGE, /* hot plug support */ + CGX_CMD_INTF_SHUTDOWN, + CGX_CMD_IRQ_ENABLE, + CGX_CMD_IRQ_DISABLE, +}; + +/* async event ids */ +enum cgx_evt_id { + CGX_EVT_NONE, + CGX_EVT_LINK_CHANGE, +}; + +/* event types - cause of interrupt */ +enum cgx_evt_type { + CGX_EVT_ASYNC, + CGX_EVT_CMD_RESP +}; + +enum cgx_stat { + CGX_STAT_SUCCESS, + CGX_STAT_FAIL +}; + +enum cgx_cmd_own { + CGX_CMD_OWN_NS, + CGX_CMD_OWN_FIRMWARE, +}; + +/* m - bit mask + * y - value to be written in the bitrange + * x - input value whose bitrange to be modified + */ +#define FIELD_SET(m, y, x) \ + (((x) & ~(m)) | \ + FIELD_PREP((m), (y))) + +/* scratchx(0) CSR used for ATF->non-secure SW communication. + * This acts as the status register + * Provides details on command ack/status, command response, error details + */ +#define EVTREG_ACK BIT_ULL(0) +#define EVTREG_EVT_TYPE BIT_ULL(1) +#define EVTREG_STAT BIT_ULL(2) +#define EVTREG_ID GENMASK_ULL(8, 3) + +/* Response to command IDs with command status as CGX_STAT_FAIL + * + * Not applicable for commands : + * CGX_CMD_LINK_BRING_UP/DOWN/CGX_EVT_LINK_CHANGE + */ +#define EVTREG_ERRTYPE GENMASK_ULL(18, 9) + +/* Response to cmd ID as CGX_CMD_GET_FW_VER with cmd status as + * CGX_STAT_SUCCESS + */ +#define RESP_MAJOR_VER GENMASK_ULL(12, 9) +#define RESP_MINOR_VER GENMASK_ULL(16, 13) + +/* Response to cmd ID as CGX_CMD_GET_MAC_ADDR with cmd status as + * CGX_STAT_SUCCESS + */ +#define RESP_MAC_ADDR GENMASK_ULL(56, 9) + +/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE + * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS + * + * In case of CGX_STAT_FAIL, it indicates CGX configuration failed + * when processing link up/down/change command. + * Both err_type and current link status will be updated + * + * In case of CGX_STAT_SUCCESS, err_type will be CGX_ERR_NONE and current + * link status will be updated + */ +struct cgx_lnk_sts { + uint64_t reserved1:9; + uint64_t link_up:1; + uint64_t full_duplex:1; + uint64_t speed:4; /* cgx_link_speed */ + uint64_t err_type:10; + uint64_t reserved2:39; +}; + +#define RESP_LINKSTAT_UP GENMASK_ULL(9, 9) +#define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10) +#define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11) +#define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15) + +/* scratchx(1) CSR used for non-secure SW->ATF communication + * This CSR acts as a command register + */ +#define CMDREG_OWN BIT_ULL(0) +#define CMDREG_ID GENMASK_ULL(7, 2) + +/* Any command using enable/disable as an argument need + * to set this bitfield. + * Ex: Loopback, HiGig... + */ +#define CMDREG_ENABLE BIT_ULL(8) + +/* command argument to be passed for cmd ID - CGX_CMD_SET_MTU */ +#define CMDMTU_SIZE GENMASK_ULL(23, 8) + +/* command argument to be passed for cmd ID - CGX_CMD_LINK_CHANGE */ +#define CMDLINKCHANGE_LINKUP BIT_ULL(8) +#define CMDLINKCHANGE_FULLDPLX BIT_ULL(9) +#define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10) + +#endif /* __CGX_FW_INTF_H__ */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c new file mode 100644 index 000000000000..85ba24a05774 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/pci.h> + +#include "rvu_reg.h" +#include "mbox.h" + +static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + +void otx2_mbox_reset(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + struct mbox_hdr *tx_hdr, *rx_hdr; + + tx_hdr = mdev->mbase + mbox->tx_start; + rx_hdr = mdev->mbase + mbox->rx_start; + + spin_lock(&mdev->mbox_lock); + mdev->msg_size = 0; + mdev->rsp_size = 0; + tx_hdr->num_msgs = 0; + rx_hdr->num_msgs = 0; + spin_unlock(&mdev->mbox_lock); +} +EXPORT_SYMBOL(otx2_mbox_reset); + +void otx2_mbox_destroy(struct otx2_mbox *mbox) +{ + mbox->reg_base = NULL; + mbox->hwbase = NULL; + + kfree(mbox->dev); + mbox->dev = NULL; +} +EXPORT_SYMBOL(otx2_mbox_destroy); + +int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev, + void *reg_base, int direction, int ndevs) +{ + struct otx2_mbox_dev *mdev; + int devid; + + switch (direction) { + case MBOX_DIR_AFPF: + case MBOX_DIR_PFVF: + mbox->tx_start = MBOX_DOWN_TX_START; + mbox->rx_start = MBOX_DOWN_RX_START; + mbox->tx_size = MBOX_DOWN_TX_SIZE; + mbox->rx_size = MBOX_DOWN_RX_SIZE; + break; + case MBOX_DIR_PFAF: + case MBOX_DIR_VFPF: + mbox->tx_start = MBOX_DOWN_RX_START; + mbox->rx_start = MBOX_DOWN_TX_START; + mbox->tx_size = MBOX_DOWN_RX_SIZE; + mbox->rx_size = MBOX_DOWN_TX_SIZE; + break; + case MBOX_DIR_AFPF_UP: + case MBOX_DIR_PFVF_UP: + mbox->tx_start = MBOX_UP_TX_START; + mbox->rx_start = MBOX_UP_RX_START; + mbox->tx_size = MBOX_UP_TX_SIZE; + mbox->rx_size = MBOX_UP_RX_SIZE; + break; + case MBOX_DIR_PFAF_UP: + case MBOX_DIR_VFPF_UP: + mbox->tx_start = MBOX_UP_RX_START; + mbox->rx_start = MBOX_UP_TX_START; + mbox->tx_size = MBOX_UP_RX_SIZE; + mbox->rx_size = MBOX_UP_TX_SIZE; + break; + default: + return -ENODEV; + } + + switch (direction) { + case MBOX_DIR_AFPF: + case MBOX_DIR_AFPF_UP: + mbox->trigger = RVU_AF_AFPF_MBOX0; + mbox->tr_shift = 4; + break; + case MBOX_DIR_PFAF: + case MBOX_DIR_PFAF_UP: + mbox->trigger = RVU_PF_PFAF_MBOX1; + mbox->tr_shift = 0; + break; + case MBOX_DIR_PFVF: + case MBOX_DIR_PFVF_UP: + mbox->trigger = RVU_PF_VFX_PFVF_MBOX0; + mbox->tr_shift = 12; + break; + case MBOX_DIR_VFPF: + case MBOX_DIR_VFPF_UP: + mbox->trigger = RVU_VF_VFPF_MBOX1; + mbox->tr_shift = 0; + break; + default: + return -ENODEV; + } + + mbox->reg_base = reg_base; + mbox->hwbase = hwbase; + mbox->pdev = pdev; + + mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL); + if (!mbox->dev) { + otx2_mbox_destroy(mbox); + return -ENOMEM; + } + + mbox->ndevs = ndevs; + for (devid = 0; devid < ndevs; devid++) { + mdev = &mbox->dev[devid]; + mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE); + spin_lock_init(&mdev->mbox_lock); + /* Init header to reset value */ + otx2_mbox_reset(mbox, devid); + } + + return 0; +} +EXPORT_SYMBOL(otx2_mbox_init); + +int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + int timeout = 0, sleep = 1; + + while (mdev->num_msgs != mdev->msgs_acked) { + msleep(sleep); + timeout += sleep; + if (timeout >= MBOX_RSP_TIMEOUT) + return -EIO; + } + return 0; +} +EXPORT_SYMBOL(otx2_mbox_wait_for_rsp); + +int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + unsigned long timeout = jiffies + 1 * HZ; + + while (!time_after(jiffies, timeout)) { + if (mdev->num_msgs == mdev->msgs_acked) + return 0; + cpu_relax(); + } + return -EIO; +} +EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp); + +void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + struct mbox_hdr *tx_hdr, *rx_hdr; + + tx_hdr = mdev->mbase + mbox->tx_start; + rx_hdr = mdev->mbase + mbox->rx_start; + + spin_lock(&mdev->mbox_lock); + /* Reset header for next messages */ + mdev->msg_size = 0; + mdev->rsp_size = 0; + mdev->msgs_acked = 0; + + /* Sync mbox data into memory */ + smp_wmb(); + + /* num_msgs != 0 signals to the peer that the buffer has a number of + * messages. So this should be written after writing all the messages + * to the shared memory. + */ + tx_hdr->num_msgs = mdev->num_msgs; + rx_hdr->num_msgs = 0; + spin_unlock(&mdev->mbox_lock); + + /* The interrupt should be fired after num_msgs is written + * to the shared memory + */ + writeq(1, (void __iomem *)mbox->reg_base + + (mbox->trigger | (devid << mbox->tr_shift))); +} +EXPORT_SYMBOL(otx2_mbox_msg_send); + +struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, + int size, int size_rsp) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + struct mbox_msghdr *msghdr = NULL; + + spin_lock(&mdev->mbox_lock); + size = ALIGN(size, MBOX_MSG_ALIGN); + size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN); + /* Check if there is space in mailbox */ + if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset) + goto exit; + if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset) + goto exit; + + if (mdev->msg_size == 0) + mdev->num_msgs = 0; + mdev->num_msgs++; + + msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size; + + /* Clear the whole msg region */ + memset(msghdr, 0, sizeof(*msghdr) + size); + /* Init message header with reset values */ + msghdr->ver = OTX2_MBOX_VERSION; + mdev->msg_size += size; + mdev->rsp_size += size_rsp; + msghdr->next_msgoff = mdev->msg_size + msgs_offset; +exit: + spin_unlock(&mdev->mbox_lock); + + return msghdr; +} +EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp); + +struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, + struct mbox_msghdr *msg) +{ + unsigned long imsg = mbox->tx_start + msgs_offset; + unsigned long irsp = mbox->rx_start + msgs_offset; + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + u16 msgs; + + if (mdev->num_msgs != mdev->msgs_acked) + return ERR_PTR(-ENODEV); + + for (msgs = 0; msgs < mdev->msgs_acked; msgs++) { + struct mbox_msghdr *pmsg = mdev->mbase + imsg; + struct mbox_msghdr *prsp = mdev->mbase + irsp; + + if (msg == pmsg) { + if (pmsg->id != prsp->id) + return ERR_PTR(-ENODEV); + return prsp; + } + + imsg = pmsg->next_msgoff; + irsp = prsp->next_msgoff; + } + + return ERR_PTR(-ENODEV); +} +EXPORT_SYMBOL(otx2_mbox_get_rsp); + +int +otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id) +{ + struct msg_rsp *rsp; + + rsp = (struct msg_rsp *) + otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp)); + if (!rsp) + return -ENOMEM; + rsp->hdr.id = id; + rsp->hdr.sig = OTX2_MBOX_RSP_SIG; + rsp->hdr.rc = MBOX_MSG_INVALID; + rsp->hdr.pcifunc = pcifunc; + return 0; +} +EXPORT_SYMBOL(otx2_reply_invalid_msg); + +bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + bool ret; + + spin_lock(&mdev->mbox_lock); + ret = mdev->num_msgs != 0; + spin_unlock(&mdev->mbox_lock); + + return ret; +} +EXPORT_SYMBOL(otx2_mbox_nonempty); + +const char *otx2_mbox_id2name(u16 id) +{ + switch (id) { +#define M(_name, _id, _1, _2) case _id: return # _name; + MBOX_MESSAGES +#undef M + default: + return "INVALID ID"; + } +} +EXPORT_SYMBOL(otx2_mbox_id2name); + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h new file mode 100644 index 000000000000..bedf0ee62450 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MBOX_H +#define MBOX_H + +#include <linux/etherdevice.h> +#include <linux/sizes.h> + +#include "rvu_struct.h" + +#define MBOX_SIZE SZ_64K + +/* AF/PF: PF initiated, PF/VF VF initiated */ +#define MBOX_DOWN_RX_START 0 +#define MBOX_DOWN_RX_SIZE (46 * SZ_1K) +#define MBOX_DOWN_TX_START (MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE) +#define MBOX_DOWN_TX_SIZE (16 * SZ_1K) +/* AF/PF: AF initiated, PF/VF PF initiated */ +#define MBOX_UP_RX_START (MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE) +#define MBOX_UP_RX_SIZE SZ_1K +#define MBOX_UP_TX_START (MBOX_UP_RX_START + MBOX_UP_RX_SIZE) +#define MBOX_UP_TX_SIZE SZ_1K + +#if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE +# error "incorrect mailbox area sizes" +#endif + +#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull)) + +#define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */ + +#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */ + +/* Mailbox directions */ +#define MBOX_DIR_AFPF 0 /* AF replies to PF */ +#define MBOX_DIR_PFAF 1 /* PF sends messages to AF */ +#define MBOX_DIR_PFVF 2 /* PF replies to VF */ +#define MBOX_DIR_VFPF 3 /* VF sends messages to PF */ +#define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */ +#define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */ +#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */ +#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */ + +struct otx2_mbox_dev { + void *mbase; /* This dev's mbox region */ + spinlock_t mbox_lock; + u16 msg_size; /* Total msg size to be sent */ + u16 rsp_size; /* Total rsp size to be sure the reply is ok */ + u16 num_msgs; /* No of msgs sent or waiting for response */ + u16 msgs_acked; /* No of msgs for which response is received */ +}; + +struct otx2_mbox { + struct pci_dev *pdev; + void *hwbase; /* Mbox region advertised by HW */ + void *reg_base;/* CSR base for this dev */ + u64 trigger; /* Trigger mbox notification */ + u16 tr_shift; /* Mbox trigger shift */ + u64 rx_start; /* Offset of Rx region in mbox memory */ + u64 tx_start; /* Offset of Tx region in mbox memory */ + u16 rx_size; /* Size of Rx region */ + u16 tx_size; /* Size of Tx region */ + u16 ndevs; /* The number of peers */ + struct otx2_mbox_dev *dev; +}; + +/* Header which preceeds all mbox messages */ +struct mbox_hdr { + u16 num_msgs; /* No of msgs embedded */ +}; + +/* Header which preceeds every msg and is also part of it */ +struct mbox_msghdr { + u16 pcifunc; /* Who's sending this msg */ + u16 id; /* Mbox message ID */ +#define OTX2_MBOX_REQ_SIG (0xdead) +#define OTX2_MBOX_RSP_SIG (0xbeef) + u16 sig; /* Signature, for validating corrupted msgs */ +#define OTX2_MBOX_VERSION (0x0001) + u16 ver; /* Version of msg's structure for this ID */ + u16 next_msgoff; /* Offset of next msg within mailbox region */ + int rc; /* Msg process'ed response code */ +}; + +void otx2_mbox_reset(struct otx2_mbox *mbox, int devid); +void otx2_mbox_destroy(struct otx2_mbox *mbox); +int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase, + struct pci_dev *pdev, void __force *reg_base, + int direction, int ndevs); +void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid); +int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid); +int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid); +struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, + int size, int size_rsp); +struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, + struct mbox_msghdr *msg); +int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, + u16 pcifunc, u16 id); +bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid); +const char *otx2_mbox_id2name(u16 id); +static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox, + int devid, int size) +{ + return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0); +} + +/* Mailbox message types */ +#define MBOX_MSG_MASK 0xFFFF +#define MBOX_MSG_INVALID 0xFFFE +#define MBOX_MSG_MAX 0xFFFF + +#define MBOX_MESSAGES \ +/* Generic mbox IDs (range 0x000 - 0x1FF) */ \ +M(READY, 0x001, msg_req, ready_msg_rsp) \ +M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \ +M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \ +M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \ +/* CGX mbox IDs (range 0x200 - 0x3FF) */ \ +/* NPA mbox IDs (range 0x400 - 0x5FF) */ \ +/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \ +/* TIM mbox IDs (range 0x800 - 0x9FF) */ \ +/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \ +/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \ +/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \ + +enum { +#define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id, +MBOX_MESSAGES +#undef M +}; + +/* Mailbox message formats */ + +/* Generic request msg used for those mbox messages which + * don't send any data in the request. + */ +struct msg_req { + struct mbox_msghdr hdr; +}; + +/* Generic rsponse msg used a ack or response for those mbox + * messages which doesn't have a specific rsp msg format. + */ +struct msg_rsp { + struct mbox_msghdr hdr; +}; + +struct ready_msg_rsp { + struct mbox_msghdr hdr; + u16 sclk_feq; /* SCLK frequency */ +}; + +/* Structure for requesting resource provisioning. + * 'modify' flag to be used when either requesting more + * or to detach partial of a cetain resource type. + * Rest of the fields specify how many of what type to + * be attached. + */ +struct rsrc_attach { + struct mbox_msghdr hdr; + u8 modify:1; + u8 npalf:1; + u8 nixlf:1; + u16 sso; + u16 ssow; + u16 timlfs; + u16 cptlfs; +}; + +/* Structure for relinquishing resources. + * 'partial' flag to be used when relinquishing all resources + * but only of a certain type. If not set, all resources of all + * types provisioned to the RVU function will be detached. + */ +struct rsrc_detach { + struct mbox_msghdr hdr; + u8 partial:1; + u8 npalf:1; + u8 nixlf:1; + u8 sso:1; + u8 ssow:1; + u8 timlfs:1; + u8 cptlfs:1; +}; + +#define MSIX_VECTOR_INVALID 0xFFFF +#define MAX_RVU_BLKLF_CNT 256 + +struct msix_offset_rsp { + struct mbox_msghdr hdr; + u16 npa_msixoff; + u16 nix_msixoff; + u8 sso; + u8 ssow; + u8 timlfs; + u8 cptlfs; + u16 sso_msixoff[MAX_RVU_BLKLF_CNT]; + u16 ssow_msixoff[MAX_RVU_BLKLF_CNT]; + u16 timlf_msixoff[MAX_RVU_BLKLF_CNT]; + u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT]; +}; + +#endif /* MBOX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c new file mode 100644 index 000000000000..2033f42c3226 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -0,0 +1,1637 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/pci.h> +#include <linux/sysfs.h> + +#include "cgx.h" +#include "rvu.h" +#include "rvu_reg.h" + +#define DRV_NAME "octeontx2-af" +#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" +#define DRV_VERSION "1.0" + +static int rvu_get_hwvf(struct rvu *rvu, int pcifunc); + +static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf); +static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf); + +/* Supported devices */ +static const struct pci_device_id rvu_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) }, + { 0, } /* end of table */ +}; + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_DESCRIPTION(DRV_STRING); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, rvu_id_table); + +/* Poll a RVU block's register 'offset', for a 'zero' + * or 'nonzero' at bits specified by 'mask' + */ +int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) +{ + void __iomem *reg; + int timeout = 100; + u64 reg_val; + + reg = rvu->afreg_base + ((block << 28) | offset); + while (timeout) { + reg_val = readq(reg); + if (zero && !(reg_val & mask)) + return 0; + if (!zero && (reg_val & mask)) + return 0; + usleep_range(1, 2); + timeout--; + } + return -EBUSY; +} + +int rvu_alloc_rsrc(struct rsrc_bmap *rsrc) +{ + int id; + + if (!rsrc->bmap) + return -EINVAL; + + id = find_first_zero_bit(rsrc->bmap, rsrc->max); + if (id >= rsrc->max) + return -ENOSPC; + + __set_bit(id, rsrc->bmap); + + return id; +} + +static int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc) +{ + int start; + + if (!rsrc->bmap) + return -EINVAL; + + start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); + if (start >= rsrc->max) + return -ENOSPC; + + bitmap_set(rsrc->bmap, start, nrsrc); + return start; +} + +static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start) +{ + if (!rsrc->bmap) + return; + if (start >= rsrc->max) + return; + + bitmap_clear(rsrc->bmap, start, nrsrc); +} + +static bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc) +{ + int start; + + if (!rsrc->bmap) + return false; + + start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); + if (start >= rsrc->max) + return false; + + return true; +} + +void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id) +{ + if (!rsrc->bmap) + return; + + __clear_bit(id, rsrc->bmap); +} + +int rvu_rsrc_free_count(struct rsrc_bmap *rsrc) +{ + int used; + + if (!rsrc->bmap) + return 0; + + used = bitmap_weight(rsrc->bmap, rsrc->max); + return (rsrc->max - used); +} + +int rvu_alloc_bitmap(struct rsrc_bmap *rsrc) +{ + rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max), + sizeof(long), GFP_KERNEL); + if (!rsrc->bmap) + return -ENOMEM; + return 0; +} + +/* Get block LF's HW index from a PF_FUNC's block slot number */ +int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) +{ + u16 match = 0; + int lf; + + spin_lock(&rvu->rsrc_lock); + for (lf = 0; lf < block->lf.max; lf++) { + if (block->fn_map[lf] == pcifunc) { + if (slot == match) { + spin_unlock(&rvu->rsrc_lock); + return lf; + } + match++; + } + } + spin_unlock(&rvu->rsrc_lock); + return -ENODEV; +} + +/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E. + * Some silicon variants of OcteonTX2 supports + * multiple blocks of same type. + * + * @pcifunc has to be zero when no LF is yet attached. + */ +int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) +{ + int devnum, blkaddr = -ENODEV; + u64 cfg, reg; + bool is_pf; + + switch (blktype) { + case BLKTYPE_NPA: + blkaddr = BLKADDR_NPA; + goto exit; + case BLKTYPE_NIX: + /* For now assume NIX0 */ + if (!pcifunc) { + blkaddr = BLKADDR_NIX0; + goto exit; + } + break; + case BLKTYPE_SSO: + blkaddr = BLKADDR_SSO; + goto exit; + case BLKTYPE_SSOW: + blkaddr = BLKADDR_SSOW; + goto exit; + case BLKTYPE_TIM: + blkaddr = BLKADDR_TIM; + goto exit; + case BLKTYPE_CPT: + /* For now assume CPT0 */ + if (!pcifunc) { + blkaddr = BLKADDR_CPT0; + goto exit; + } + break; + } + + /* Check if this is a RVU PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) { + is_pf = false; + devnum = rvu_get_hwvf(rvu, pcifunc); + } else { + is_pf = true; + devnum = rvu_get_pf(pcifunc); + } + + /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */ + if (blktype == BLKTYPE_NIX) { + reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG; + cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); + if (cfg) + blkaddr = BLKADDR_NIX0; + } + + /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */ + if (blktype == BLKTYPE_CPT) { + reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG; + cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); + if (cfg) + blkaddr = BLKADDR_CPT0; + } + +exit: + if (is_block_implemented(rvu->hw, blkaddr)) + return blkaddr; + return -ENODEV; +} + +static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, u16 pcifunc, + u16 lf, bool attach) +{ + int devnum, num_lfs = 0; + bool is_pf; + u64 reg; + + if (lf >= block->lf.max) { + dev_err(&rvu->pdev->dev, + "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n", + __func__, lf, block->name, block->lf.max); + return; + } + + /* Check if this is for a RVU PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) { + is_pf = false; + devnum = rvu_get_hwvf(rvu, pcifunc); + } else { + is_pf = true; + devnum = rvu_get_pf(pcifunc); + } + + block->fn_map[lf] = attach ? pcifunc : 0; + + switch (block->type) { + case BLKTYPE_NPA: + pfvf->npalf = attach ? true : false; + num_lfs = pfvf->npalf; + break; + case BLKTYPE_NIX: + pfvf->nixlf = attach ? true : false; + num_lfs = pfvf->nixlf; + break; + case BLKTYPE_SSO: + attach ? pfvf->sso++ : pfvf->sso--; + num_lfs = pfvf->sso; + break; + case BLKTYPE_SSOW: + attach ? pfvf->ssow++ : pfvf->ssow--; + num_lfs = pfvf->ssow; + break; + case BLKTYPE_TIM: + attach ? pfvf->timlfs++ : pfvf->timlfs--; + num_lfs = pfvf->timlfs; + break; + case BLKTYPE_CPT: + attach ? pfvf->cptlfs++ : pfvf->cptlfs--; + num_lfs = pfvf->cptlfs; + break; + } + + reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg; + rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs); +} + +inline int rvu_get_pf(u16 pcifunc) +{ + return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; +} + +void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) +{ + u64 cfg; + + /* Get numVFs attached to this PF and first HWVF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + *numvfs = (cfg >> 12) & 0xFF; + *hwvf = cfg & 0xFFF; +} + +static int rvu_get_hwvf(struct rvu *rvu, int pcifunc) +{ + int pf, func; + u64 cfg; + + pf = rvu_get_pf(pcifunc); + func = pcifunc & RVU_PFVF_FUNC_MASK; + + /* Get first HWVF attached to this PF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + + return ((cfg & 0xFFF) + func - 1); +} + +struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) +{ + /* Check if it is a PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) + return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; + else + return &rvu->pf[rvu_get_pf(pcifunc)]; +} + +bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr) +{ + struct rvu_block *block; + + if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT) + return false; + + block = &hw->block[blkaddr]; + return block->implemented; +} + +static void rvu_check_block_implemented(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkid; + u64 cfg; + + /* For each block check if 'implemented' bit is set */ + for (blkid = 0; blkid < BLK_COUNT; blkid++) { + block = &hw->block[blkid]; + cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid)); + if (cfg & BIT_ULL(11)) + block->implemented = true; + } +} + +static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg) +{ + struct rvu_block *block = &rvu->hw->block[blkaddr]; + + if (!block->implemented) + return; + + rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0)); + rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true); +} + +static void rvu_reset_all_blocks(struct rvu *rvu) +{ + /* Do a HW reset of all RVU blocks */ + rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST); +} + +static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block) +{ + struct rvu_pfvf *pfvf; + u64 cfg; + int lf; + + for (lf = 0; lf < block->lf.max; lf++) { + cfg = rvu_read64(rvu, block->addr, + block->lfcfg_reg | (lf << block->lfshift)); + if (!(cfg & BIT_ULL(63))) + continue; + + /* Set this resource as being used */ + __set_bit(lf, block->lf.bmap); + + /* Get, to whom this LF is attached */ + pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF); + rvu_update_rsrc_map(rvu, pfvf, block, + (cfg >> 8) & 0xFFFF, lf, true); + + /* Set start MSIX vector for this LF within this PF/VF */ + rvu_set_msix_offset(rvu, pfvf, block, lf); + } +} + +static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf) +{ + int min_vecs; + + if (!vf) + goto check_pf; + + if (!nvecs) { + dev_warn(rvu->dev, + "PF%d:VF%d is configured with zero msix vectors, %d\n", + pf, vf - 1, nvecs); + } + return; + +check_pf: + if (pf == 0) + min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT; + else + min_vecs = RVU_PF_INT_VEC_CNT; + + if (!(nvecs < min_vecs)) + return; + dev_warn(rvu->dev, + "PF%d is configured with too few vectors, %d, min is %d\n", + pf, nvecs, min_vecs); +} + +static int rvu_setup_msix_resources(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + int pf, vf, numvfs, hwvf, err; + int nvecs, offset, max_msix; + struct rvu_pfvf *pfvf; + u64 cfg, phy_addr; + dma_addr_t iova; + + for (pf = 0; pf < hw->total_pfs; pf++) { + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + /* If PF is not enabled, nothing to do */ + if (!((cfg >> 20) & 0x01)) + continue; + + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); + + pfvf = &rvu->pf[pf]; + /* Get num of MSIX vectors attached to this PF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf)); + pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1; + rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0); + + /* Alloc msix bitmap for this PF */ + err = rvu_alloc_bitmap(&pfvf->msix); + if (err) + return err; + + /* Allocate memory for MSIX vector to RVU block LF mapping */ + pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max, + sizeof(u16), GFP_KERNEL); + if (!pfvf->msix_lfmap) + return -ENOMEM; + + /* For PF0 (AF) firmware will set msix vector offsets for + * AF, block AF and PF0_INT vectors, so jump to VFs. + */ + if (!pf) + goto setup_vfmsix; + + /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors. + * These are allocated on driver init and never freed, + * so no need to set 'msix_lfmap' for these. + */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf)); + nvecs = (cfg >> 12) & 0xFF; + cfg &= ~0x7FFULL; + offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); + rvu_write64(rvu, BLKADDR_RVUM, + RVU_PRIV_PFX_INT_CFG(pf), cfg | offset); +setup_vfmsix: + /* Alloc msix bitmap for VFs */ + for (vf = 0; vf < numvfs; vf++) { + pfvf = &rvu->hwvf[hwvf + vf]; + /* Get num of MSIX vectors attached to this VF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, + RVU_PRIV_PFX_MSIX_CFG(pf)); + pfvf->msix.max = (cfg & 0xFFF) + 1; + rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1); + + /* Alloc msix bitmap for this VF */ + err = rvu_alloc_bitmap(&pfvf->msix); + if (err) + return err; + + pfvf->msix_lfmap = + devm_kcalloc(rvu->dev, pfvf->msix.max, + sizeof(u16), GFP_KERNEL); + if (!pfvf->msix_lfmap) + return -ENOMEM; + + /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors. + * These are allocated on driver init and never freed, + * so no need to set 'msix_lfmap' for these. + */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, + RVU_PRIV_HWVFX_INT_CFG(hwvf + vf)); + nvecs = (cfg >> 12) & 0xFF; + cfg &= ~0x7FFULL; + offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); + rvu_write64(rvu, BLKADDR_RVUM, + RVU_PRIV_HWVFX_INT_CFG(hwvf + vf), + cfg | offset); + } + } + + /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence + * create a IOMMU mapping for the physcial address configured by + * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA. + */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); + max_msix = cfg & 0xFFFFF; + phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE); + iova = dma_map_resource(rvu->dev, phy_addr, + max_msix * PCI_MSIX_ENTRY_SIZE, + DMA_BIDIRECTIONAL, 0); + + if (dma_mapping_error(rvu->dev, iova)) + return -ENOMEM; + + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova); + rvu->msix_base_iova = iova; + + return 0; +} + +static void rvu_free_hw_resources(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + struct rvu_pfvf *pfvf; + int id, max_msix; + u64 cfg; + + /* Free block LF bitmaps */ + for (id = 0; id < BLK_COUNT; id++) { + block = &hw->block[id]; + kfree(block->lf.bmap); + } + + /* Free MSIX bitmaps */ + for (id = 0; id < hw->total_pfs; id++) { + pfvf = &rvu->pf[id]; + kfree(pfvf->msix.bmap); + } + + for (id = 0; id < hw->total_vfs; id++) { + pfvf = &rvu->hwvf[id]; + kfree(pfvf->msix.bmap); + } + + /* Unmap MSIX vector base IOVA mapping */ + if (!rvu->msix_base_iova) + return; + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); + max_msix = cfg & 0xFFFFF; + dma_unmap_resource(rvu->dev, rvu->msix_base_iova, + max_msix * PCI_MSIX_ENTRY_SIZE, + DMA_BIDIRECTIONAL, 0); +} + +static int rvu_setup_hw_resources(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkid, err; + u64 cfg; + + /* Get HW supported max RVU PF & VF count */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); + hw->total_pfs = (cfg >> 32) & 0xFF; + hw->total_vfs = (cfg >> 20) & 0xFFF; + hw->max_vfs_per_pf = (cfg >> 40) & 0xFF; + + /* Init NPA LF's bitmap */ + block = &hw->block[BLKADDR_NPA]; + if (!block->implemented) + goto nix; + cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST); + block->lf.max = (cfg >> 16) & 0xFFF; + block->addr = BLKADDR_NPA; + block->type = BLKTYPE_NPA; + block->lfshift = 8; + block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG; + block->lfcfg_reg = NPA_PRIV_LFX_CFG; + block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG; + block->lfreset_reg = NPA_AF_LF_RST; + sprintf(block->name, "NPA"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +nix: + /* Init NIX LF's bitmap */ + block = &hw->block[BLKADDR_NIX0]; + if (!block->implemented) + goto sso; + cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2); + block->lf.max = cfg & 0xFFF; + block->addr = BLKADDR_NIX0; + block->type = BLKTYPE_NIX; + block->lfshift = 8; + block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG; + block->lfcfg_reg = NIX_PRIV_LFX_CFG; + block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG; + block->lfreset_reg = NIX_AF_LF_RST; + sprintf(block->name, "NIX"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +sso: + /* Init SSO group's bitmap */ + block = &hw->block[BLKADDR_SSO]; + if (!block->implemented) + goto ssow; + cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST); + block->lf.max = cfg & 0xFFFF; + block->addr = BLKADDR_SSO; + block->type = BLKTYPE_SSO; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG; + block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG; + block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG; + block->lfreset_reg = SSO_AF_LF_HWGRP_RST; + sprintf(block->name, "SSO GROUP"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +ssow: + /* Init SSO workslot's bitmap */ + block = &hw->block[BLKADDR_SSOW]; + if (!block->implemented) + goto tim; + block->lf.max = (cfg >> 56) & 0xFF; + block->addr = BLKADDR_SSOW; + block->type = BLKTYPE_SSOW; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG; + block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG; + block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG; + block->lfreset_reg = SSOW_AF_LF_HWS_RST; + sprintf(block->name, "SSOWS"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +tim: + /* Init TIM LF's bitmap */ + block = &hw->block[BLKADDR_TIM]; + if (!block->implemented) + goto cpt; + cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST); + block->lf.max = cfg & 0xFFFF; + block->addr = BLKADDR_TIM; + block->type = BLKTYPE_TIM; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG; + block->lfcfg_reg = TIM_PRIV_LFX_CFG; + block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG; + block->lfreset_reg = TIM_AF_LF_RST; + sprintf(block->name, "TIM"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +cpt: + /* Init CPT LF's bitmap */ + block = &hw->block[BLKADDR_CPT0]; + if (!block->implemented) + goto init; + cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0); + block->lf.max = cfg & 0xFF; + block->addr = BLKADDR_CPT0; + block->type = BLKTYPE_CPT; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG; + block->lfcfg_reg = CPT_PRIV_LFX_CFG; + block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG; + block->lfreset_reg = CPT_AF_LF_RST; + sprintf(block->name, "CPT"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +init: + /* Allocate memory for PFVF data */ + rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs, + sizeof(struct rvu_pfvf), GFP_KERNEL); + if (!rvu->pf) + return -ENOMEM; + + rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs, + sizeof(struct rvu_pfvf), GFP_KERNEL); + if (!rvu->hwvf) + return -ENOMEM; + + spin_lock_init(&rvu->rsrc_lock); + + err = rvu_setup_msix_resources(rvu); + if (err) + return err; + + for (blkid = 0; blkid < BLK_COUNT; blkid++) { + block = &hw->block[blkid]; + if (!block->lf.bmap) + continue; + + /* Allocate memory for block LF/slot to pcifunc mapping info */ + block->fn_map = devm_kcalloc(rvu->dev, block->lf.max, + sizeof(u16), GFP_KERNEL); + if (!block->fn_map) + return -ENOMEM; + + /* Scan all blocks to check if low level firmware has + * already provisioned any of the resources to a PF/VF. + */ + rvu_scan_block(rvu, block); + } + + return 0; +} + +static int rvu_mbox_handler_READY(struct rvu *rvu, struct msg_req *req, + struct ready_msg_rsp *rsp) +{ + return 0; +} + +/* Get current count of a RVU block's LF/slots + * provisioned to a given RVU func. + */ +static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype) +{ + switch (blktype) { + case BLKTYPE_NPA: + return pfvf->npalf ? 1 : 0; + case BLKTYPE_NIX: + return pfvf->nixlf ? 1 : 0; + case BLKTYPE_SSO: + return pfvf->sso; + case BLKTYPE_SSOW: + return pfvf->ssow; + case BLKTYPE_TIM: + return pfvf->timlfs; + case BLKTYPE_CPT: + return pfvf->cptlfs; + } + return 0; +} + +static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, + int pcifunc, int slot) +{ + u64 val; + + val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13); + rvu_write64(rvu, block->addr, block->lookup_reg, val); + /* Wait for the lookup to finish */ + /* TODO: put some timeout here */ + while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13)) + ; + + val = rvu_read64(rvu, block->addr, block->lookup_reg); + + /* Check LF valid bit */ + if (!(val & (1ULL << 12))) + return -1; + + return (val & 0xFFF); +} + +static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int slot, lf, num_lfs; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc); + if (blkaddr < 0) + return; + + block = &hw->block[blkaddr]; + + num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type); + if (!num_lfs) + return; + + for (slot = 0; slot < num_lfs; slot++) { + lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot); + if (lf < 0) /* This should never happen */ + continue; + + /* Disable the LF */ + rvu_write64(rvu, blkaddr, block->lfcfg_reg | + (lf << block->lfshift), 0x00ULL); + + /* Update SW maintained mapping info as well */ + rvu_update_rsrc_map(rvu, pfvf, block, + pcifunc, lf, false); + + /* Free the resource */ + rvu_free_rsrc(&block->lf, lf); + + /* Clear MSIX vector offset for this LF */ + rvu_clear_msix_offset(rvu, pfvf, block, lf); + } +} + +static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, + u16 pcifunc) +{ + struct rvu_hwinfo *hw = rvu->hw; + bool is_pf, detach_all = true; + struct rvu_block *block; + int devnum, blkid; + + /* Check if this is for a RVU PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) { + is_pf = false; + devnum = rvu_get_hwvf(rvu, pcifunc); + } else { + is_pf = true; + devnum = rvu_get_pf(pcifunc); + } + + spin_lock(&rvu->rsrc_lock); + + /* Check for partial resource detach */ + if (detach && detach->partial) + detach_all = false; + + /* Check for RVU block's LFs attached to this func, + * if so, detach them. + */ + for (blkid = 0; blkid < BLK_COUNT; blkid++) { + block = &hw->block[blkid]; + if (!block->lf.bmap) + continue; + if (!detach_all && detach) { + if (blkid == BLKADDR_NPA && !detach->npalf) + continue; + else if ((blkid == BLKADDR_NIX0) && !detach->nixlf) + continue; + else if ((blkid == BLKADDR_SSO) && !detach->sso) + continue; + else if ((blkid == BLKADDR_SSOW) && !detach->ssow) + continue; + else if ((blkid == BLKADDR_TIM) && !detach->timlfs) + continue; + else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs) + continue; + } + rvu_detach_block(rvu, pcifunc, block->type); + } + + spin_unlock(&rvu->rsrc_lock); + return 0; +} + +static int rvu_mbox_handler_DETACH_RESOURCES(struct rvu *rvu, + struct rsrc_detach *detach, + struct msg_rsp *rsp) +{ + return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); +} + +static void rvu_attach_block(struct rvu *rvu, int pcifunc, + int blktype, int num_lfs) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int slot, lf; + int blkaddr; + u64 cfg; + + if (!num_lfs) + return; + + blkaddr = rvu_get_blkaddr(rvu, blktype, 0); + if (blkaddr < 0) + return; + + block = &hw->block[blkaddr]; + if (!block->lf.bmap) + return; + + for (slot = 0; slot < num_lfs; slot++) { + /* Allocate the resource */ + lf = rvu_alloc_rsrc(&block->lf); + if (lf < 0) + return; + + cfg = (1ULL << 63) | (pcifunc << 8) | slot; + rvu_write64(rvu, blkaddr, block->lfcfg_reg | + (lf << block->lfshift), cfg); + rvu_update_rsrc_map(rvu, pfvf, block, + pcifunc, lf, true); + + /* Set start MSIX vector for this LF within this PF/VF */ + rvu_set_msix_offset(rvu, pfvf, block, lf); + } +} + +static int rvu_check_rsrc_availability(struct rvu *rvu, + struct rsrc_attach *req, u16 pcifunc) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int free_lfs, mappedlfs; + + /* Only one NPA LF can be attached */ + if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) { + block = &hw->block[BLKADDR_NPA]; + free_lfs = rvu_rsrc_free_count(&block->lf); + if (!free_lfs) + goto fail; + } else if (req->npalf) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid req, already has NPA\n", + pcifunc); + return -EINVAL; + } + + /* Only one NIX LF can be attached */ + if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) { + block = &hw->block[BLKADDR_NIX0]; + free_lfs = rvu_rsrc_free_count(&block->lf); + if (!free_lfs) + goto fail; + } else if (req->nixlf) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid req, already has NIX\n", + pcifunc); + return -EINVAL; + } + + if (req->sso) { + block = &hw->block[BLKADDR_SSO]; + /* Is request within limits ? */ + if (req->sso > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid SSO req, %d > max %d\n", + pcifunc, req->sso, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + free_lfs = rvu_rsrc_free_count(&block->lf); + /* Check if additional resources are available */ + if (req->sso > mappedlfs && + ((req->sso - mappedlfs) > free_lfs)) + goto fail; + } + + if (req->ssow) { + block = &hw->block[BLKADDR_SSOW]; + if (req->ssow > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid SSOW req, %d > max %d\n", + pcifunc, req->sso, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + free_lfs = rvu_rsrc_free_count(&block->lf); + if (req->ssow > mappedlfs && + ((req->ssow - mappedlfs) > free_lfs)) + goto fail; + } + + if (req->timlfs) { + block = &hw->block[BLKADDR_TIM]; + if (req->timlfs > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid TIMLF req, %d > max %d\n", + pcifunc, req->timlfs, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + free_lfs = rvu_rsrc_free_count(&block->lf); + if (req->timlfs > mappedlfs && + ((req->timlfs - mappedlfs) > free_lfs)) + goto fail; + } + + if (req->cptlfs) { + block = &hw->block[BLKADDR_CPT0]; + if (req->cptlfs > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid CPTLF req, %d > max %d\n", + pcifunc, req->cptlfs, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + free_lfs = rvu_rsrc_free_count(&block->lf); + if (req->cptlfs > mappedlfs && + ((req->cptlfs - mappedlfs) > free_lfs)) + goto fail; + } + + return 0; + +fail: + dev_info(rvu->dev, "Request for %s failed\n", block->name); + return -ENOSPC; +} + +static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu, + struct rsrc_attach *attach, + struct msg_rsp *rsp) +{ + u16 pcifunc = attach->hdr.pcifunc; + int devnum, err; + bool is_pf; + + /* If first request, detach all existing attached resources */ + if (!attach->modify) + rvu_detach_rsrcs(rvu, NULL, pcifunc); + + /* Check if this is for a RVU PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) { + is_pf = false; + devnum = rvu_get_hwvf(rvu, pcifunc); + } else { + is_pf = true; + devnum = rvu_get_pf(pcifunc); + } + + spin_lock(&rvu->rsrc_lock); + + /* Check if the request can be accommodated */ + err = rvu_check_rsrc_availability(rvu, attach, pcifunc); + if (err) + goto exit; + + /* Now attach the requested resources */ + if (attach->npalf) + rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1); + + if (attach->nixlf) + rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1); + + if (attach->sso) { + /* RVU func doesn't know which exact LF or slot is attached + * to it, it always sees as slot 0,1,2. So for a 'modify' + * request, simply detach all existing attached LFs/slots + * and attach a fresh. + */ + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO); + rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso); + } + + if (attach->ssow) { + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW); + rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow); + } + + if (attach->timlfs) { + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM); + rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs); + } + + if (attach->cptlfs) { + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT); + rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs); + } + +exit: + spin_unlock(&rvu->rsrc_lock); + return err; +} + +static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + int blkaddr, int lf) +{ + u16 vec; + + if (lf < 0) + return MSIX_VECTOR_INVALID; + + for (vec = 0; vec < pfvf->msix.max; vec++) { + if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf)) + return vec; + } + return MSIX_VECTOR_INVALID; +} + +static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf) +{ + u16 nvecs, vec, offset; + u64 cfg; + + cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift)); + nvecs = (cfg >> 12) & 0xFF; + + /* Check and alloc MSIX vectors, must be contiguous */ + if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs)) + return; + + offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); + + /* Config MSIX offset in LF */ + rvu_write64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift), (cfg & ~0x7FFULL) | offset); + + /* Update the bitmap as well */ + for (vec = 0; vec < nvecs; vec++) + pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf); +} + +static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf) +{ + u16 nvecs, vec, offset; + u64 cfg; + + cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift)); + nvecs = (cfg >> 12) & 0xFF; + + /* Clear MSIX offset in LF */ + rvu_write64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift), cfg & ~0x7FFULL); + + offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf); + + /* Update the mapping */ + for (vec = 0; vec < nvecs; vec++) + pfvf->msix_lfmap[offset + vec] = 0; + + /* Free the same in MSIX bitmap */ + rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset); +} + +static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req, + struct msix_offset_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + int lf, slot; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (!pfvf->msix.bmap) + return 0; + + /* Set MSIX offsets for each block's LFs attached to this PF/VF */ + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0); + rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf); + + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0); + rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf); + + rsp->sso = pfvf->sso; + for (slot = 0; slot < rsp->sso; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot); + rsp->sso_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf); + } + + rsp->ssow = pfvf->ssow; + for (slot = 0; slot < rsp->ssow; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot); + rsp->ssow_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf); + } + + rsp->timlfs = pfvf->timlfs; + for (slot = 0; slot < rsp->timlfs; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot); + rsp->timlf_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf); + } + + rsp->cptlfs = pfvf->cptlfs; + for (slot = 0; slot < rsp->cptlfs; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot); + rsp->cptlf_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf); + } + return 0; +} + +static int rvu_process_mbox_msg(struct rvu *rvu, int devid, + struct mbox_msghdr *req) +{ + /* Check if valid, if not reply with a invalid msg */ + if (req->sig != OTX2_MBOX_REQ_SIG) + goto bad_message; + + switch (req->id) { +#define M(_name, _id, _req_type, _rsp_type) \ + case _id: { \ + struct _rsp_type *rsp; \ + int err; \ + \ + rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ + &rvu->mbox, devid, \ + sizeof(struct _rsp_type)); \ + if (rsp) { \ + rsp->hdr.id = _id; \ + rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ + rsp->hdr.pcifunc = req->pcifunc; \ + rsp->hdr.rc = 0; \ + } \ + \ + err = rvu_mbox_handler_ ## _name(rvu, \ + (struct _req_type *)req, \ + rsp); \ + if (rsp && err) \ + rsp->hdr.rc = err; \ + \ + return rsp ? err : -ENOMEM; \ + } +MBOX_MESSAGES +#undef M + break; +bad_message: + default: + otx2_reply_invalid_msg(&rvu->mbox, devid, req->pcifunc, + req->id); + return -ENODEV; + } +} + +static void rvu_mbox_handler(struct work_struct *work) +{ + struct rvu_work *mwork = container_of(work, struct rvu_work, work); + struct rvu *rvu = mwork->rvu; + struct otx2_mbox_dev *mdev; + struct mbox_hdr *req_hdr; + struct mbox_msghdr *msg; + struct otx2_mbox *mbox; + int offset, id, err; + u16 pf; + + mbox = &rvu->mbox; + pf = mwork - rvu->mbox_wrk; + mdev = &mbox->dev[pf]; + + /* Process received mbox messages */ + req_hdr = mdev->mbase + mbox->rx_start; + if (req_hdr->num_msgs == 0) + return; + + offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < req_hdr->num_msgs; id++) { + msg = mdev->mbase + offset; + + /* Set which PF sent this message based on mbox IRQ */ + msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); + msg->pcifunc |= (pf << RVU_PFVF_PF_SHIFT); + err = rvu_process_mbox_msg(rvu, pf, msg); + if (!err) { + offset = mbox->rx_start + msg->next_msgoff; + continue; + } + + if (msg->pcifunc & RVU_PFVF_FUNC_MASK) + dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", + err, otx2_mbox_id2name(msg->id), msg->id, pf, + (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); + else + dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", + err, otx2_mbox_id2name(msg->id), msg->id, pf); + } + + /* Send mbox responses to PF */ + otx2_mbox_msg_send(mbox, pf); +} + +static int rvu_mbox_init(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + void __iomem *hwbase = NULL; + struct rvu_work *mwork; + u64 bar4_addr; + int err, pf; + + rvu->mbox_wq = alloc_workqueue("rvu_afpf_mailbox", + WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, + hw->total_pfs); + if (!rvu->mbox_wq) + return -ENOMEM; + + rvu->mbox_wrk = devm_kcalloc(rvu->dev, hw->total_pfs, + sizeof(struct rvu_work), GFP_KERNEL); + if (!rvu->mbox_wrk) { + err = -ENOMEM; + goto exit; + } + + /* Map mbox region shared with PFs */ + bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); + /* Mailbox is a reserved memory (in RAM) region shared between + * RVU devices, shouldn't be mapped as device memory to allow + * unaligned accesses. + */ + hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * hw->total_pfs); + if (!hwbase) { + dev_err(rvu->dev, "Unable to map mailbox region\n"); + err = -ENOMEM; + goto exit; + } + + err = otx2_mbox_init(&rvu->mbox, hwbase, rvu->pdev, rvu->afreg_base, + MBOX_DIR_AFPF, hw->total_pfs); + if (err) + goto exit; + + for (pf = 0; pf < hw->total_pfs; pf++) { + mwork = &rvu->mbox_wrk[pf]; + mwork->rvu = rvu; + INIT_WORK(&mwork->work, rvu_mbox_handler); + } + + return 0; +exit: + if (hwbase) + iounmap((void __iomem *)hwbase); + destroy_workqueue(rvu->mbox_wq); + return err; +} + +static void rvu_mbox_destroy(struct rvu *rvu) +{ + if (rvu->mbox_wq) { + flush_workqueue(rvu->mbox_wq); + destroy_workqueue(rvu->mbox_wq); + rvu->mbox_wq = NULL; + } + + if (rvu->mbox.hwbase) + iounmap((void __iomem *)rvu->mbox.hwbase); + + otx2_mbox_destroy(&rvu->mbox); +} + +static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + u64 intr; + u8 pf; + + intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); + /* Clear interrupts */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr); + + /* Sync with mbox memory region */ + smp_wmb(); + + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + if (intr & (1ULL << pf)) { + mbox = &rvu->mbox; + mdev = &mbox->dev[pf]; + hdr = mdev->mbase + mbox->rx_start; + if (hdr->num_msgs) + queue_work(rvu->mbox_wq, + &rvu->mbox_wrk[pf].work); + } + } + + return IRQ_HANDLED; +} + +static void rvu_enable_mbox_intr(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + + /* Clear spurious irqs, if any */ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs)); + + /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S, + INTR_MASK(hw->total_pfs) & ~1ULL); +} + +static void rvu_unregister_interrupts(struct rvu *rvu) +{ + int irq; + + /* Disable the Mbox interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + for (irq = 0; irq < rvu->num_vec; irq++) { + if (rvu->irq_allocated[irq]) + free_irq(pci_irq_vector(rvu->pdev, irq), rvu); + } + + pci_free_irq_vectors(rvu->pdev); + rvu->num_vec = 0; +} + +static int rvu_register_interrupts(struct rvu *rvu) +{ + int ret; + + rvu->num_vec = pci_msix_vec_count(rvu->pdev); + + rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec, + NAME_SIZE, GFP_KERNEL); + if (!rvu->irq_name) + return -ENOMEM; + + rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec, + sizeof(bool), GFP_KERNEL); + if (!rvu->irq_allocated) + return -ENOMEM; + + /* Enable MSI-X */ + ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec, + rvu->num_vec, PCI_IRQ_MSIX); + if (ret < 0) { + dev_err(rvu->dev, + "RVUAF: Request for %d msix vectors failed, ret %d\n", + rvu->num_vec, ret); + return ret; + } + + /* Register mailbox interrupt handler */ + sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); + ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), + rvu_mbox_intr_handler, 0, + &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for mbox irq\n"); + goto fail; + } + + rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; + + /* Enable mailbox interrupts from all PFs */ + rvu_enable_mbox_intr(rvu); + + return 0; + +fail: + pci_free_irq_vectors(rvu->pdev); + return ret; +} + +static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct rvu *rvu; + int err; + + rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL); + if (!rvu) + return -ENOMEM; + + rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL); + if (!rvu->hw) { + devm_kfree(dev, rvu); + return -ENOMEM; + } + + pci_set_drvdata(pdev, rvu); + rvu->pdev = pdev; + rvu->dev = &pdev->dev; + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + goto err_freemem; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to set DMA mask\n"); + goto err_release_regions; + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to set consistent DMA mask\n"); + goto err_release_regions; + } + + /* Map Admin function CSRs */ + rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0); + rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); + if (!rvu->afreg_base || !rvu->pfreg_base) { + dev_err(dev, "Unable to map admin function CSRs, aborting\n"); + err = -ENOMEM; + goto err_release_regions; + } + + /* Check which blocks the HW supports */ + rvu_check_block_implemented(rvu); + + rvu_reset_all_blocks(rvu); + + err = rvu_setup_hw_resources(rvu); + if (err) + goto err_release_regions; + + err = rvu_mbox_init(rvu); + if (err) + goto err_hwsetup; + + err = rvu_cgx_probe(rvu); + if (err) + goto err_mbox; + + err = rvu_register_interrupts(rvu); + if (err) + goto err_cgx; + + return 0; +err_cgx: + rvu_cgx_wq_destroy(rvu); +err_mbox: + rvu_mbox_destroy(rvu); +err_hwsetup: + rvu_reset_all_blocks(rvu); + rvu_free_hw_resources(rvu); +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err_freemem: + pci_set_drvdata(pdev, NULL); + devm_kfree(&pdev->dev, rvu->hw); + devm_kfree(dev, rvu); + return err; +} + +static void rvu_remove(struct pci_dev *pdev) +{ + struct rvu *rvu = pci_get_drvdata(pdev); + + rvu_unregister_interrupts(rvu); + rvu_cgx_wq_destroy(rvu); + rvu_mbox_destroy(rvu); + rvu_reset_all_blocks(rvu); + rvu_free_hw_resources(rvu); + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + devm_kfree(&pdev->dev, rvu->hw); + devm_kfree(&pdev->dev, rvu); +} + +static struct pci_driver rvu_driver = { + .name = DRV_NAME, + .id_table = rvu_id_table, + .probe = rvu_probe, + .remove = rvu_remove, +}; + +static int __init rvu_init_module(void) +{ + int err; + + pr_info("%s: %s\n", DRV_NAME, DRV_STRING); + + err = pci_register_driver(&cgx_driver); + if (err < 0) + return err; + + err = pci_register_driver(&rvu_driver); + if (err < 0) + pci_unregister_driver(&cgx_driver); + + return err; +} + +static void __exit rvu_cleanup_module(void) +{ + pci_unregister_driver(&rvu_driver); + pci_unregister_driver(&cgx_driver); +} + +module_init(rvu_init_module); +module_exit(rvu_cleanup_module); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h new file mode 100644 index 000000000000..d169fa9eb45e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RVU_H +#define RVU_H + +#include "rvu_struct.h" +#include "mbox.h" + +/* PCI device IDs */ +#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065 + +/* PCI BAR nos */ +#define PCI_AF_REG_BAR_NUM 0 +#define PCI_PF_REG_BAR_NUM 2 +#define PCI_MBOX_BAR_NUM 4 + +#define NAME_SIZE 32 + +/* PF_FUNC */ +#define RVU_PFVF_PF_SHIFT 10 +#define RVU_PFVF_PF_MASK 0x3F +#define RVU_PFVF_FUNC_SHIFT 0 +#define RVU_PFVF_FUNC_MASK 0x3FF + +struct rvu_work { + struct work_struct work; + struct rvu *rvu; +}; + +struct rsrc_bmap { + unsigned long *bmap; /* Pointer to resource bitmap */ + u16 max; /* Max resource id or count */ +}; + +struct rvu_block { + struct rsrc_bmap lf; + u16 *fn_map; /* LF to pcifunc mapping */ + bool multislot; + bool implemented; + u8 addr; /* RVU_BLOCK_ADDR_E */ + u8 type; /* RVU_BLOCK_TYPE_E */ + u8 lfshift; + u64 lookup_reg; + u64 pf_lfcnt_reg; + u64 vf_lfcnt_reg; + u64 lfcfg_reg; + u64 msixcfg_reg; + u64 lfreset_reg; + unsigned char name[NAME_SIZE]; +}; + +/* Structure for per RVU func info ie PF/VF */ +struct rvu_pfvf { + bool npalf; /* Only one NPALF per RVU_FUNC */ + bool nixlf; /* Only one NIXLF per RVU_FUNC */ + u16 sso; + u16 ssow; + u16 cptlfs; + u16 timlfs; + + /* Block LF's MSIX vector info */ + struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */ +#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF)) + u16 *msix_lfmap; /* Vector to block LF mapping */ +}; + +struct rvu_hwinfo { + u8 total_pfs; /* MAX RVU PFs HW supports */ + u16 total_vfs; /* Max RVU VFs HW supports */ + u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */ + + struct rvu_block block[BLK_COUNT]; /* Block info */ +}; + +struct rvu { + void __iomem *afreg_base; + void __iomem *pfreg_base; + struct pci_dev *pdev; + struct device *dev; + struct rvu_hwinfo *hw; + struct rvu_pfvf *pf; + struct rvu_pfvf *hwvf; + spinlock_t rsrc_lock; /* Serialize resource alloc/free */ + + /* Mbox */ + struct otx2_mbox mbox; + struct rvu_work *mbox_wrk; + struct workqueue_struct *mbox_wq; + + /* MSI-X */ + u16 num_vec; + char *irq_name; + bool *irq_allocated; + dma_addr_t msix_base_iova; + + /* CGX */ +#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */ + u8 cgx_mapped_pfs; + u8 cgx_cnt; /* available cgx ports */ + u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ + u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for + * every cgx lmac port + */ + void **cgx_idmap; /* cgx id to cgx data map table */ + struct work_struct cgx_evh_work; + struct workqueue_struct *cgx_evh_wq; + spinlock_t cgx_evq_lock; /* cgx event queue lock */ + struct list_head cgx_evq_head; /* cgx event queue head */ +}; + +static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) +{ + writeq(val, rvu->afreg_base + ((block << 28) | offset)); +} + +static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset) +{ + return readq(rvu->afreg_base + ((block << 28) | offset)); +} + +static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val) +{ + writeq(val, rvu->pfreg_base + offset); +} + +static inline u64 rvupf_read64(struct rvu *rvu, u64 offset) +{ + return readq(rvu->pfreg_base + offset); +} + +/* Function Prototypes + * RVU + */ + +int rvu_alloc_bitmap(struct rsrc_bmap *rsrc); +int rvu_alloc_rsrc(struct rsrc_bmap *rsrc); +void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id); +int rvu_rsrc_free_count(struct rsrc_bmap *rsrc); +int rvu_get_pf(u16 pcifunc); +struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); +void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf); +bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr); +int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot); +int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); +int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); + +/* CGX APIs */ +int rvu_cgx_probe(struct rvu *rvu); +void rvu_cgx_wq_destroy(struct rvu *rvu); +#endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c new file mode 100644 index 000000000000..5ecc22308b22 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu.h" +#include "cgx.h" + +struct cgx_evq_entry { + struct list_head evq_node; + struct cgx_link_event link_event; +}; + +static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) +{ + return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF); +} + +static void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) +{ + if (cgx_id >= rvu->cgx_cnt) + return NULL; + + return rvu->cgx_idmap[cgx_id]; +} + +static int rvu_map_cgx_lmac_pf(struct rvu *rvu) +{ + int cgx_cnt = rvu->cgx_cnt; + int cgx, lmac_cnt, lmac; + int pf = PF_CGXMAP_BASE; + int size; + + if (!cgx_cnt) + return 0; + + if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF) + return -EINVAL; + + /* Alloc map table + * An additional entry is required since PF id starts from 1 and + * hence entry at offset 0 is invalid. + */ + size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8); + rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL); + if (!rvu->pf2cgxlmac_map) + return -ENOMEM; + + /* Initialize offset 0 with an invalid cgx and lmac id */ + rvu->pf2cgxlmac_map[0] = 0xFF; + + /* Reverse map table */ + rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev, + cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16), + GFP_KERNEL); + if (!rvu->cgxlmac2pf_map) + return -ENOMEM; + + rvu->cgx_mapped_pfs = 0; + for (cgx = 0; cgx < cgx_cnt; cgx++) { + lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); + for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) { + rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); + rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf; + rvu->cgx_mapped_pfs++; + } + } + return 0; +} + +/* This is called from interrupt context and is expected to be atomic */ +static int cgx_lmac_postevent(struct cgx_link_event *event, void *data) +{ + struct cgx_evq_entry *qentry; + struct rvu *rvu = data; + + /* post event to the event queue */ + qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); + if (!qentry) + return -ENOMEM; + qentry->link_event = *event; + spin_lock(&rvu->cgx_evq_lock); + list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); + spin_unlock(&rvu->cgx_evq_lock); + + /* start worker to process the events */ + queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); + + return 0; +} + +static void cgx_evhandler_task(struct work_struct *work) +{ + struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work); + struct cgx_evq_entry *qentry; + struct cgx_link_event *event; + unsigned long flags; + + do { + /* Dequeue an event */ + spin_lock_irqsave(&rvu->cgx_evq_lock, flags); + qentry = list_first_entry_or_null(&rvu->cgx_evq_head, + struct cgx_evq_entry, + evq_node); + if (qentry) + list_del(&qentry->evq_node); + spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); + if (!qentry) + break; /* nothing more to process */ + + event = &qentry->link_event; + + /* Do nothing for now */ + kfree(qentry); + } while (1); +} + +static void cgx_lmac_event_handler_init(struct rvu *rvu) +{ + struct cgx_event_cb cb; + int cgx, lmac, err; + void *cgxd; + + spin_lock_init(&rvu->cgx_evq_lock); + INIT_LIST_HEAD(&rvu->cgx_evq_head); + INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task); + rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0); + if (!rvu->cgx_evh_wq) { + dev_err(rvu->dev, "alloc workqueue failed"); + return; + } + + cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */ + cb.data = rvu; + + for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) { + err = cgx_lmac_evh_register(&cb, cgxd, lmac); + if (err) + dev_err(rvu->dev, + "%d:%d handler register failed\n", + cgx, lmac); + } + } +} + +void rvu_cgx_wq_destroy(struct rvu *rvu) +{ + if (rvu->cgx_evh_wq) { + flush_workqueue(rvu->cgx_evh_wq); + destroy_workqueue(rvu->cgx_evh_wq); + rvu->cgx_evh_wq = NULL; + } +} + +int rvu_cgx_probe(struct rvu *rvu) +{ + int i, err; + + /* find available cgx ports */ + rvu->cgx_cnt = cgx_get_cgx_cnt(); + if (!rvu->cgx_cnt) { + dev_info(rvu->dev, "No CGX devices found!\n"); + return -ENODEV; + } + + rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *), + GFP_KERNEL); + if (!rvu->cgx_idmap) + return -ENOMEM; + + /* Initialize the cgxdata table */ + for (i = 0; i < rvu->cgx_cnt; i++) + rvu->cgx_idmap[i] = cgx_get_pdata(i); + + /* Map CGX LMAC interfaces to RVU PFs */ + err = rvu_map_cgx_lmac_pf(rvu); + if (err) + return err; + + /* Register for CGX events */ + cgx_lmac_event_handler_init(rvu); + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h new file mode 100644 index 000000000000..d871a394e72b --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -0,0 +1,441 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RVU_REG_H +#define RVU_REG_H + +/* Admin function registers */ +#define RVU_AF_MSIXTR_BASE (0x10) +#define RVU_AF_ECO (0x20) +#define RVU_AF_BLK_RST (0x30) +#define RVU_AF_PF_BAR4_ADDR (0x40) +#define RVU_AF_RAS (0x100) +#define RVU_AF_RAS_W1S (0x108) +#define RVU_AF_RAS_ENA_W1S (0x110) +#define RVU_AF_RAS_ENA_W1C (0x118) +#define RVU_AF_GEN_INT (0x120) +#define RVU_AF_GEN_INT_W1S (0x128) +#define RVU_AF_GEN_INT_ENA_W1S (0x130) +#define RVU_AF_GEN_INT_ENA_W1C (0x138) +#define RVU_AF_AFPF_MBOX0 (0x02000) +#define RVU_AF_AFPF_MBOX1 (0x02008) +#define RVU_AF_AFPFX_MBOXX(a, b) (0x2000 | (a) << 4 | (b) << 3) +#define RVU_AF_PFME_STATUS (0x2800) +#define RVU_AF_PFTRPEND (0x2810) +#define RVU_AF_PFTRPEND_W1S (0x2820) +#define RVU_AF_PF_RST (0x2840) +#define RVU_AF_HWVF_RST (0x2850) +#define RVU_AF_PFAF_MBOX_INT (0x2880) +#define RVU_AF_PFAF_MBOX_INT_W1S (0x2888) +#define RVU_AF_PFAF_MBOX_INT_ENA_W1S (0x2890) +#define RVU_AF_PFAF_MBOX_INT_ENA_W1C (0x2898) +#define RVU_AF_PFFLR_INT (0x28a0) +#define RVU_AF_PFFLR_INT_W1S (0x28a8) +#define RVU_AF_PFFLR_INT_ENA_W1S (0x28b0) +#define RVU_AF_PFFLR_INT_ENA_W1C (0x28b8) +#define RVU_AF_PFME_INT (0x28c0) +#define RVU_AF_PFME_INT_W1S (0x28c8) +#define RVU_AF_PFME_INT_ENA_W1S (0x28d0) +#define RVU_AF_PFME_INT_ENA_W1C (0x28d8) + +/* Admin function's privileged PF/VF registers */ +#define RVU_PRIV_CONST (0x8000000) +#define RVU_PRIV_GEN_CFG (0x8000010) +#define RVU_PRIV_CLK_CFG (0x8000020) +#define RVU_PRIV_ACTIVE_PC (0x8000030) +#define RVU_PRIV_PFX_CFG(a) (0x8000100 | (a) << 16) +#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110 | (a) << 16) +#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120 | (a) << 16) +#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200 | (a) << 16) +#define RVU_PRIV_PFX_NIX0_CFG (0x8000300) +#define RVU_PRIV_PFX_NPA_CFG (0x8000310) +#define RVU_PRIV_PFX_SSO_CFG (0x8000320) +#define RVU_PRIV_PFX_SSOW_CFG (0x8000330) +#define RVU_PRIV_PFX_TIM_CFG (0x8000340) +#define RVU_PRIV_PFX_CPT0_CFG (0x8000350) +#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400 | (a) << 3) +#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280 | (a) << 16) +#define RVU_PRIV_HWVFX_NIX0_CFG (0x8001300) +#define RVU_PRIV_HWVFX_NPA_CFG (0x8001310) +#define RVU_PRIV_HWVFX_SSO_CFG (0x8001320) +#define RVU_PRIV_HWVFX_SSOW_CFG (0x8001330) +#define RVU_PRIV_HWVFX_TIM_CFG (0x8001340) +#define RVU_PRIV_HWVFX_CPT0_CFG (0x8001350) + +/* RVU PF registers */ +#define RVU_PF_VFX_PFVF_MBOX0 (0x00000) +#define RVU_PF_VFX_PFVF_MBOX1 (0x00008) +#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3) +#define RVU_PF_VF_BAR4_ADDR (0x10) +#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3) +#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3) +#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3) +#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3) +#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3) +#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3) +#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3) +#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3) +#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3) +#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3) +#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3) +#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3) +#define RVU_PF_PFAF_MBOX0 (0xC00) +#define RVU_PF_PFAF_MBOX1 (0xC08) +#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3) +#define RVU_PF_INT (0xc20) +#define RVU_PF_INT_W1S (0xc28) +#define RVU_PF_INT_ENA_W1S (0xc30) +#define RVU_PF_INT_ENA_W1C (0xc38) +#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4) +#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4) +#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3) + +/* RVU VF registers */ +#define RVU_VF_VFPF_MBOX0 (0x00000) +#define RVU_VF_VFPF_MBOX1 (0x00008) + +/* NPA block's admin function registers */ +#define NPA_AF_BLK_RST (0x0000) +#define NPA_AF_CONST (0x0010) +#define NPA_AF_CONST1 (0x0018) +#define NPA_AF_LF_RST (0x0020) +#define NPA_AF_GEN_CFG (0x0030) +#define NPA_AF_NDC_CFG (0x0040) +#define NPA_AF_INP_CTL (0x00D0) +#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0) +#define NPA_AF_AVG_DELAY (0x0100) +#define NPA_AF_GEN_INT (0x0140) +#define NPA_AF_GEN_INT_W1S (0x0148) +#define NPA_AF_GEN_INT_ENA_W1S (0x0150) +#define NPA_AF_GEN_INT_ENA_W1C (0x0158) +#define NPA_AF_RVU_INT (0x0160) +#define NPA_AF_RVU_INT_W1S (0x0168) +#define NPA_AF_RVU_INT_ENA_W1S (0x0170) +#define NPA_AF_RVU_INT_ENA_W1C (0x0178) +#define NPA_AF_ERR_INT (0x0180) +#define NPA_AF_ERR_INT_W1S (0x0188) +#define NPA_AF_ERR_INT_ENA_W1S (0x0190) +#define NPA_AF_ERR_INT_ENA_W1C (0x0198) +#define NPA_AF_RAS (0x01A0) +#define NPA_AF_RAS_W1S (0x01A8) +#define NPA_AF_RAS_ENA_W1S (0x01B0) +#define NPA_AF_RAS_ENA_W1C (0x01B8) +#define NPA_AF_BP_TEST (0x0200) +#define NPA_AF_ECO (0x0300) +#define NPA_AF_AQ_CFG (0x0600) +#define NPA_AF_AQ_BASE (0x0610) +#define NPA_AF_AQ_STATUS (0x0620) +#define NPA_AF_AQ_DOOR (0x0630) +#define NPA_AF_AQ_DONE_WAIT (0x0640) +#define NPA_AF_AQ_DONE (0x0650) +#define NPA_AF_AQ_DONE_ACK (0x0660) +#define NPA_AF_AQ_DONE_INT (0x0680) +#define NPA_AF_AQ_DONE_INT_W1S (0x0688) +#define NPA_AF_AQ_DONE_ENA_W1S (0x0690) +#define NPA_AF_AQ_DONE_ENA_W1C (0x0698) +#define NPA_AF_LFX_AURAS_CFG(a) (0x4000 | (a) << 18) +#define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010 | (a) << 18) +#define NPA_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 18) +#define NPA_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 18) +#define NPA_PRIV_AF_INT_CFG (0x10000) +#define NPA_PRIV_LFX_CFG (0x10010) +#define NPA_PRIV_LFX_INT_CFG (0x10020) +#define NPA_AF_RVU_LF_CFG_DEBUG (0x10030) + +/* NIX block's admin function registers */ +#define NIX_AF_CFG (0x0000) +#define NIX_AF_STATUS (0x0010) +#define NIX_AF_NDC_CFG (0x0018) +#define NIX_AF_CONST (0x0020) +#define NIX_AF_CONST1 (0x0028) +#define NIX_AF_CONST2 (0x0030) +#define NIX_AF_CONST3 (0x0038) +#define NIX_AF_SQ_CONST (0x0040) +#define NIX_AF_CQ_CONST (0x0048) +#define NIX_AF_RQ_CONST (0x0050) +#define NIX_AF_PSE_CONST (0x0060) +#define NIX_AF_TL1_CONST (0x0070) +#define NIX_AF_TL2_CONST (0x0078) +#define NIX_AF_TL3_CONST (0x0080) +#define NIX_AF_TL4_CONST (0x0088) +#define NIX_AF_MDQ_CONST (0x0090) +#define NIX_AF_MC_MIRROR_CONST (0x0098) +#define NIX_AF_LSO_CFG (0x00A8) +#define NIX_AF_BLK_RST (0x00B0) +#define NIX_AF_TX_TSTMP_CFG (0x00C0) +#define NIX_AF_RX_CFG (0x00D0) +#define NIX_AF_AVG_DELAY (0x00E0) +#define NIX_AF_CINT_DELAY (0x00F0) +#define NIX_AF_RX_MCAST_BASE (0x0100) +#define NIX_AF_RX_MCAST_CFG (0x0110) +#define NIX_AF_RX_MCAST_BUF_BASE (0x0120) +#define NIX_AF_RX_MCAST_BUF_CFG (0x0130) +#define NIX_AF_RX_MIRROR_BUF_BASE (0x0140) +#define NIX_AF_RX_MIRROR_BUF_CFG (0x0148) +#define NIX_AF_LF_RST (0x0150) +#define NIX_AF_GEN_INT (0x0160) +#define NIX_AF_GEN_INT_W1S (0x0168) +#define NIX_AF_GEN_INT_ENA_W1S (0x0170) +#define NIX_AF_GEN_INT_ENA_W1C (0x0178) +#define NIX_AF_ERR_INT (0x0180) +#define NIX_AF_ERR_INT_W1S (0x0188) +#define NIX_AF_ERR_INT_ENA_W1S (0x0190) +#define NIX_AF_ERR_INT_ENA_W1C (0x0198) +#define NIX_AF_RAS (0x01A0) +#define NIX_AF_RAS_W1S (0x01A8) +#define NIX_AF_RAS_ENA_W1S (0x01B0) +#define NIX_AF_RAS_ENA_W1C (0x01B8) +#define NIX_AF_RVU_INT (0x01C0) +#define NIX_AF_RVU_INT_W1S (0x01C8) +#define NIX_AF_RVU_INT_ENA_W1S (0x01D0) +#define NIX_AF_RVU_INT_ENA_W1C (0x01D8) +#define NIX_AF_TCP_TIMER (0x01E0) +#define NIX_AF_RX_WQE_TAG_CTL (0x01F0) +#define NIX_AF_RX_DEF_OL2 (0x0200) +#define NIX_AF_RX_DEF_OIP4 (0x0210) +#define NIX_AF_RX_DEF_IIP4 (0x0220) +#define NIX_AF_RX_DEF_OIP6 (0x0230) +#define NIX_AF_RX_DEF_IIP6 (0x0240) +#define NIX_AF_RX_DEF_OTCP (0x0250) +#define NIX_AF_RX_DEF_ITCP (0x0260) +#define NIX_AF_RX_DEF_OUDP (0x0270) +#define NIX_AF_RX_DEF_IUDP (0x0280) +#define NIX_AF_RX_DEF_OSCTP (0x0290) +#define NIX_AF_RX_DEF_ISCTP (0x02A0) +#define NIX_AF_RX_DEF_IPSECX (0x02B0) +#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300) +#define NIX_AF_RX_CPTX_INST_ADDR (0x0310) +#define NIX_AF_NDC_TX_SYNC (0x03F0) +#define NIX_AF_AQ_CFG (0x0400) +#define NIX_AF_AQ_BASE (0x0410) +#define NIX_AF_AQ_STATUS (0x0420) +#define NIX_AF_AQ_DOOR (0x0430) +#define NIX_AF_AQ_DONE_WAIT (0x0440) +#define NIX_AF_AQ_DONE (0x0450) +#define NIX_AF_AQ_DONE_ACK (0x0460) +#define NIX_AF_AQ_DONE_TIMER (0x0470) +#define NIX_AF_AQ_DONE_INT (0x0480) +#define NIX_AF_AQ_DONE_INT_W1S (0x0488) +#define NIX_AF_AQ_DONE_ENA_W1S (0x0490) +#define NIX_AF_AQ_DONE_ENA_W1C (0x0498) +#define NIX_AF_RX_LINKX_SLX_SPKT_CNT (0x0500) +#define NIX_AF_RX_LINKX_SLX_SXQE_CNT (0x0510) +#define NIX_AF_RX_MCAST_JOBSX_SW_CNT (0x0520) +#define NIX_AF_RX_MIRROR_JOBSX_SW_CNT (0x0530) +#define NIX_AF_RX_LINKX_CFG(a) (0x0540 | (a) << 16) +#define NIX_AF_RX_SW_SYNC (0x0550) +#define NIX_AF_RX_SW_SYNC_DONE (0x0560) +#define NIX_AF_SEB_ECO (0x0600) +#define NIX_AF_SEB_TEST_BP (0x0610) +#define NIX_AF_NORM_TX_FIFO_STATUS (0x0620) +#define NIX_AF_EXPR_TX_FIFO_STATUS (0x0630) +#define NIX_AF_SDP_TX_FIFO_STATUS (0x0640) +#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660) +#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670) + +#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3) +#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16) +#define NIX_AF_PSE_CHANNEL_LEVEL (0x800) +#define NIX_AF_PSE_SHAPER_CFG (0x810) +#define NIX_AF_TX_EXPR_CREDIT (0x830) +#define NIX_AF_MARK_FORMATX_CTL(a) (0x900 | (a) << 18) +#define NIX_AF_TX_LINKX_NORM_CREDIT(a) (0xA00 | (a) << 16) +#define NIX_AF_TX_LINKX_EXPR_CREDIT(a) (0xA10 | (a) << 16) +#define NIX_AF_TX_LINKX_SW_XOFF(a) (0xA20 | (a) << 16) +#define NIX_AF_TX_LINKX_HW_XOFF(a) (0xA30 | (a) << 16) +#define NIX_AF_SDP_LINK_CREDIT (0xa40) +#define NIX_AF_SDP_SW_XOFFX(a) (0xA60 | (a) << 3) +#define NIX_AF_SDP_HW_XOFFX(a) (0xAC0 | (a) << 3) +#define NIX_AF_TL4X_BP_STATUS(a) (0xB00 | (a) << 16) +#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (a) << 16) +#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16) +#define NIX_AF_TL1X_SHAPE(a) (0xC10 | (a) << 16) +#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16) +#define NIX_AF_TL1X_SHAPE_STATE(a) (0xC50 | (a) << 16) +#define NIX_AF_TL1X_SW_XOFF(a) (0xC70 | (a) << 16) +#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16) +#define NIX_AF_TL1X_GREEN(a) (0xC90 | (a) << 16) +#define NIX_AF_TL1X_YELLOW(a) (0xCA0 | (a) << 16) +#define NIX_AF_TL1X_RED(a) (0xCB0 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG0(a) (0xCC0 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG1(a) (0xCC8 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG2(a) (0xCD0 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG3(a) (0xCD8 | (a) << 16) +#define NIX_AF_TL1A_DEBUG (0xce0) +#define NIX_AF_TL1B_DEBUG (0xcf0) +#define NIX_AF_TL1_DEBUG_GREEN (0xd00) +#define NIX_AF_TL1_DEBUG_NODE (0xd10) +#define NIX_AF_TL1X_DROPPED_PACKETS(a) (0xD20 | (a) << 16) +#define NIX_AF_TL1X_DROPPED_BYTES(a) (0xD30 | (a) << 16) +#define NIX_AF_TL1X_RED_PACKETS(a) (0xD40 | (a) << 16) +#define NIX_AF_TL1X_RED_BYTES(a) (0xD50 | (a) << 16) +#define NIX_AF_TL1X_YELLOW_PACKETS(a) (0xD60 | (a) << 16) +#define NIX_AF_TL1X_YELLOW_BYTES(a) (0xD70 | (a) << 16) +#define NIX_AF_TL1X_GREEN_PACKETS(a) (0xD80 | (a) << 16) +#define NIX_AF_TL1X_GREEN_BYTES(a) (0xD90 | (a) << 16) +#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16) +#define NIX_AF_TL2X_SHAPE(a) (0xE10 | (a) << 16) +#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16) +#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16) +#define NIX_AF_TL2X_SCHED_STATE(a) (0xE40 | (a) << 16) +#define NIX_AF_TL2X_SHAPE_STATE(a) (0xE50 | (a) << 16) +#define NIX_AF_TL2X_POINTERS(a) (0xE60 | (a) << 16) +#define NIX_AF_TL2X_SW_XOFF(a) (0xE70 | (a) << 16) +#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16) +#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16) +#define NIX_AF_TL2X_GREEN(a) (0xE90 | (a) << 16) +#define NIX_AF_TL2X_YELLOW(a) (0xEA0 | (a) << 16) +#define NIX_AF_TL2X_RED(a) (0xEB0 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG0(a) (0xEC0 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG1(a) (0xEC8 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG2(a) (0xED0 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG3(a) (0xED8 | (a) << 16) +#define NIX_AF_TL2A_DEBUG (0xee0) +#define NIX_AF_TL2B_DEBUG (0xef0) +#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16) +#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16) +#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16) +#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16) +#define NIX_AF_TL3X_SCHED_STATE(a) (0x1040 | (a) << 16) +#define NIX_AF_TL3X_SHAPE_STATE(a) (0x1050 | (a) << 16) +#define NIX_AF_TL3X_POINTERS(a) (0x1060 | (a) << 16) +#define NIX_AF_TL3X_SW_XOFF(a) (0x1070 | (a) << 16) +#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16) +#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16) +#define NIX_AF_TL3X_GREEN(a) (0x1090 | (a) << 16) +#define NIX_AF_TL3X_YELLOW(a) (0x10A0 | (a) << 16) +#define NIX_AF_TL3X_RED(a) (0x10B0 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG0(a) (0x10C0 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG1(a) (0x10C8 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG2(a) (0x10D0 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG3(a) (0x10D8 | (a) << 16) +#define NIX_AF_TL3A_DEBUG (0x10e0) +#define NIX_AF_TL3B_DEBUG (0x10f0) +#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16) +#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16) +#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16) +#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16) +#define NIX_AF_TL4X_SCHED_STATE(a) (0x1240 | (a) << 16) +#define NIX_AF_TL4X_SHAPE_STATE(a) (0x1250 | (a) << 16) +#define NIX_AF_TL4X_POINTERS(a) (0x1260 | (a) << 16) +#define NIX_AF_TL4X_SW_XOFF(a) (0x1270 | (a) << 16) +#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16) +#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16) +#define NIX_AF_TL4X_GREEN(a) (0x1290 | (a) << 16) +#define NIX_AF_TL4X_YELLOW(a) (0x12A0 | (a) << 16) +#define NIX_AF_TL4X_RED(a) (0x12B0 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG0(a) (0x12C0 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG1(a) (0x12C8 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG2(a) (0x12D0 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG3(a) (0x12D8 | (a) << 16) +#define NIX_AF_TL4A_DEBUG (0x12e0) +#define NIX_AF_TL4B_DEBUG (0x12f0) +#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16) +#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16) +#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16) +#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16) +#define NIX_AF_MDQX_SCHED_STATE(a) (0x1440 | (a) << 16) +#define NIX_AF_MDQX_SHAPE_STATE(a) (0x1450 | (a) << 16) +#define NIX_AF_MDQX_POINTERS(a) (0x1460 | (a) << 16) +#define NIX_AF_MDQX_SW_XOFF(a) (0x1470 | (a) << 16) +#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16) +#define NIX_AF_MDQX_MD_DEBUG(a) (0x14C0 | (a) << 16) +#define NIX_AF_MDQX_PTR_FIFO(a) (0x14D0 | (a) << 16) +#define NIX_AF_MDQA_DEBUG (0x14e0) +#define NIX_AF_MDQB_DEBUG (0x14f0) +#define NIX_AF_TL3_TL2X_CFG(a) (0x1600 | (a) << 18) +#define NIX_AF_TL3_TL2X_BP_STATUS(a) (0x1610 | (a) << 16) +#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3) +#define NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(a, b) (0x1800 | (a) << 18 | (b) << 3) +#define NIX_AF_TX_MCASTX(a) (0x1900 | (a) << 15) +#define NIX_AF_TX_VTAG_DEFX_CTL(a) (0x1A00 | (a) << 16) +#define NIX_AF_TX_VTAG_DEFX_DATA(a) (0x1A10 | (a) << 16) +#define NIX_AF_RX_BPIDX_STATUS(a) (0x1A20 | (a) << 17) +#define NIX_AF_RX_CHANX_CFG(a) (0x1A30 | (a) << 15) +#define NIX_AF_CINT_TIMERX(a) (0x1A40 | (a) << 18) +#define NIX_AF_LSO_FORMATX_FIELDX(a, b) (0x1B00 | (a) << 16 | (b) << 3) +#define NIX_AF_LFX_CFG(a) (0x4000 | (a) << 17) +#define NIX_AF_LFX_SQS_CFG(a) (0x4020 | (a) << 17) +#define NIX_AF_LFX_TX_CFG2(a) (0x4028 | (a) << 17) +#define NIX_AF_LFX_SQS_BASE(a) (0x4030 | (a) << 17) +#define NIX_AF_LFX_RQS_CFG(a) (0x4040 | (a) << 17) +#define NIX_AF_LFX_RQS_BASE(a) (0x4050 | (a) << 17) +#define NIX_AF_LFX_CQS_CFG(a) (0x4060 | (a) << 17) +#define NIX_AF_LFX_CQS_BASE(a) (0x4070 | (a) << 17) +#define NIX_AF_LFX_TX_CFG(a) (0x4080 | (a) << 17) +#define NIX_AF_LFX_TX_PARSE_CFG(a) (0x4090 | (a) << 17) +#define NIX_AF_LFX_RX_CFG(a) (0x40A0 | (a) << 17) +#define NIX_AF_LFX_RSS_CFG(a) (0x40C0 | (a) << 17) +#define NIX_AF_LFX_RSS_BASE(a) (0x40D0 | (a) << 17) +#define NIX_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 17) +#define NIX_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 17) +#define NIX_AF_LFX_CINTS_CFG(a) (0x4120 | (a) << 17) +#define NIX_AF_LFX_CINTS_BASE(a) (0x4130 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_CFG0(a) (0x4140 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_CFG1(a) (0x4148 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a) (0x4150 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a) (0x4158 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a) (0x4170 | (a) << 17) +#define NIX_AF_LFX_TX_STATUS(a) (0x4180 | (a) << 17) +#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b) (0x4200 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_LOCKX(a, b) (0x4300 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_TX_STATX(a, b) (0x4400 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_RX_STATX(a, b) (0x4500 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_RSS_GRPX(a, b) (0x4600 | (a) << 17 | (b) << 3) +#define NIX_AF_RX_NPC_MC_RCV (0x4700) +#define NIX_AF_RX_NPC_MC_DROP (0x4710) +#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720) +#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730) +#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16) + +#define NIX_PRIV_AF_INT_CFG (0x8000000) +#define NIX_PRIV_LFX_CFG (0x8000010) +#define NIX_PRIV_LFX_INT_CFG (0x8000020) +#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030) + +/* SSO */ +#define SSO_AF_CONST (0x1000) +#define SSO_AF_CONST1 (0x1008) +#define SSO_AF_BLK_RST (0x10f8) +#define SSO_AF_LF_HWGRP_RST (0x10e0) +#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800) +#define SSO_PRIV_LFX_HWGRP_CFG (0x10000) +#define SSO_PRIV_LFX_HWGRP_INT_CFG (0x20000) + +/* SSOW */ +#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x0010) +#define SSOW_AF_LF_HWS_RST (0x0030) +#define SSOW_PRIV_LFX_HWS_CFG (0x1000) +#define SSOW_PRIV_LFX_HWS_INT_CFG (0x2000) + +/* TIM */ +#define TIM_AF_CONST (0x90) +#define TIM_PRIV_LFX_CFG (0x20000) +#define TIM_PRIV_LFX_INT_CFG (0x24000) +#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000) +#define TIM_AF_BLK_RST (0x10) +#define TIM_AF_LF_RST (0x20) + +/* CPT */ +#define CPT_AF_CONSTANTS0 (0x0000) +#define CPT_PRIV_LFX_CFG (0x41000) +#define CPT_PRIV_LFX_INT_CFG (0x43000) +#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000) +#define CPT_AF_LF_RST (0x44000) +#define CPT_AF_BLK_RST (0x46000) + +#define NDC_AF_BLK_RST (0x002F0) +#define NPC_AF_BLK_RST (0x00040) + +#endif /* RVU_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h new file mode 100644 index 000000000000..92e05817ffe7 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RVU_STRUCT_H +#define RVU_STRUCT_H + +/* RVU Block Address Enumeration */ +enum rvu_block_addr_e { + BLKADDR_RVUM = 0x0ULL, + BLKADDR_LMT = 0x1ULL, + BLKADDR_MSIX = 0x2ULL, + BLKADDR_NPA = 0x3ULL, + BLKADDR_NIX0 = 0x4ULL, + BLKADDR_NIX1 = 0x5ULL, + BLKADDR_NPC = 0x6ULL, + BLKADDR_SSO = 0x7ULL, + BLKADDR_SSOW = 0x8ULL, + BLKADDR_TIM = 0x9ULL, + BLKADDR_CPT0 = 0xaULL, + BLKADDR_CPT1 = 0xbULL, + BLKADDR_NDC0 = 0xcULL, + BLKADDR_NDC1 = 0xdULL, + BLKADDR_NDC2 = 0xeULL, + BLK_COUNT = 0xfULL, +}; + +/* RVU Block Type Enumeration */ +enum rvu_block_type_e { + BLKTYPE_RVUM = 0x0, + BLKTYPE_MSIX = 0x1, + BLKTYPE_LMT = 0x2, + BLKTYPE_NIX = 0x3, + BLKTYPE_NPA = 0x4, + BLKTYPE_NPC = 0x5, + BLKTYPE_SSO = 0x6, + BLKTYPE_SSOW = 0x7, + BLKTYPE_TIM = 0x8, + BLKTYPE_CPT = 0x9, + BLKTYPE_NDC = 0xa, + BLKTYPE_MAX = 0xa, +}; + +/* RVU Admin function Interrupt Vector Enumeration */ +enum rvu_af_int_vec_e { + RVU_AF_INT_VEC_POISON = 0x0, + RVU_AF_INT_VEC_PFFLR = 0x1, + RVU_AF_INT_VEC_PFME = 0x2, + RVU_AF_INT_VEC_GEN = 0x3, + RVU_AF_INT_VEC_MBOX = 0x4, + RVU_AF_INT_VEC_CNT = 0x5, +}; + +/** + * RVU PF Interrupt Vector Enumeration + */ +enum rvu_pf_int_vec_e { + RVU_PF_INT_VEC_VFFLR0 = 0x0, + RVU_PF_INT_VEC_VFFLR1 = 0x1, + RVU_PF_INT_VEC_VFME0 = 0x2, + RVU_PF_INT_VEC_VFME1 = 0x3, + RVU_PF_INT_VEC_VFPF_MBOX0 = 0x4, + RVU_PF_INT_VEC_VFPF_MBOX1 = 0x5, + RVU_PF_INT_VEC_AFPF_MBOX = 0x6, + RVU_PF_INT_VEC_CNT = 0x7, +}; + +#endif /* RVU_STRUCT_H */ diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 3a9730612a70..0bd4351b2a49 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -988,8 +988,8 @@ static int pxa168_init_phy(struct net_device *dev) cmd.base.phy_address = pep->phy_addr; cmd.base.speed = pep->phy_speed; cmd.base.duplex = pep->phy_duplex; - ethtool_convert_legacy_u32_to_link_mode(cmd.link_modes.advertising, - PHY_BASIC_FEATURES); + bitmap_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES, + __ETHTOOL_LINK_MODE_MASK_NBITS); cmd.base.autoneg = AUTONEG_ENABLE; if (cmd.base.speed != 0) @@ -1260,7 +1260,8 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget) return work_done; } -static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 6e6abdc399de..7dbfdac4067a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -243,11 +243,7 @@ static void mtk_phy_link_adjust(struct net_device *dev) if (dev->phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - if (dev->phydev->advertising & ADVERTISED_Pause) - lcl_adv |= ADVERTISE_PAUSE_CAP; - if (dev->phydev->advertising & ADVERTISED_Asym_Pause) - lcl_adv |= ADVERTISE_PAUSE_ASYM; - + lcl_adv = ethtool_adv_to_lcl_adv_t(dev->phydev->advertising); flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (flowctrl & FLOW_CTRL_TX) @@ -355,12 +351,8 @@ static int mtk_phy_connect(struct net_device *dev) dev->phydev->speed = 0; dev->phydev->duplex = 0; - if (of_phy_is_fixed_link(mac->of_node)) - dev->phydev->supported |= - SUPPORTED_Pause | SUPPORTED_Asym_Pause; - - dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause; + phy_set_max_speed(dev->phydev, SPEED_1000); + phy_support_asym_pause(dev->phydev); dev->phydev->advertising = dev->phydev->supported | ADVERTISED_Autoneg; phy_start_aneg(dev->phydev); @@ -405,7 +397,7 @@ static int mtk_mdio_init(struct mtk_eth *eth) eth->mii_bus->priv = eth; eth->mii_bus->parent = eth->dev; - snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); + snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); ret = of_mdiobus_register(eth->mii_bus, mii_np); err_put_node: diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index 4bdf25059542..deef5a998985 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -614,7 +614,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, int i; buf->direct.buf = NULL; - buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; + buf->nbufs = DIV_ROUND_UP(size, PAGE_SIZE); buf->npages = buf->nbufs; buf->page_shift = PAGE_SHIFT; buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index d25e16d2c319..109472d6b61f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -167,8 +167,13 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) params->prof[i].rx_ppp = pfcrx; params->prof[i].tx_pause = !(pfcrx || pfctx); params->prof[i].tx_ppp = pfctx; - params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; - params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; + if (mlx4_low_memory_profile()) { + params->prof[i].tx_ring_size = MLX4_EN_MIN_TX_SIZE; + params->prof[i].rx_ring_size = MLX4_EN_MIN_RX_SIZE; + } else { + params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; + params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; + } params->prof[i].num_up = MLX4_EN_NUM_UP_LOW; params->prof[i].num_tx_rings_p_up = params->max_num_tx_rings_p_up; params->prof[i].tx_ring_num[TX] = params->max_num_tx_rings_p_up * diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 7262c6310650..4b4351141b94 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -406,7 +406,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; if (WARN_ON(!obj_per_chunk)) return -EINVAL; - num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; + num_icm = DIV_ROUND_UP(nobj, obj_per_chunk); table->icm = kvcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL); if (!table->icm) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index c3228b89df46..485d856546c6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -72,7 +72,7 @@ #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) #define DEF_RX_RINGS 16 #define MAX_RX_RINGS 128 -#define MIN_RX_RINGS 4 +#define MIN_RX_RINGS 1 #define LOG_TXBB_SIZE 6 #define TXBB_SIZE BIT(LOG_TXBB_SIZE) #define HEADROOM (2048 / TXBB_SIZE + 1) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 0f189f873859..1d743bd5d212 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -205,18 +205,12 @@ struct mlx5e_umr_wqe { extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; -static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { - "rx_cqe_moder", - "tx_cqe_moder", - "rx_cqe_compress", - "rx_striding_rq", -}; - enum mlx5e_priv_flag { MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1), MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2), MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3), + MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4), }; #define MLX5E_SET_PFLAG(params, pflag, enable) \ @@ -298,6 +292,7 @@ struct mlx5e_dcbx_dp { enum { MLX5E_RQ_STATE_ENABLED, MLX5E_RQ_STATE_AM, + MLX5E_RQ_STATE_NO_CSUM_COMPLETE, }; struct mlx5e_cq { @@ -906,10 +901,16 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb); /* common netdev helpers */ +void mlx5e_create_q_counters(struct mlx5e_priv *priv); +void mlx5e_destroy_q_counters(struct mlx5e_priv *priv); +int mlx5e_open_drop_rq(struct mlx5e_priv *priv, + struct mlx5e_rq *drop_rq); +void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); + int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); -int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv); -void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv); +int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); +void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); int mlx5e_create_direct_rqts(struct mlx5e_priv *priv); void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv); @@ -953,6 +954,8 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal); int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal); +u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); +u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, struct ethtool_ts_info *info); int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, @@ -968,6 +971,9 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv); void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u16 max_channels, u16 mtu); +void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); +void mlx5e_build_rss_params(struct mlx5e_params *params); u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); void mlx5e_rx_dim_work(struct work_struct *work); void mlx5e_tx_dim_work(struct work_struct *work); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 45cdde694d20..8657e0f26995 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -543,8 +543,11 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); - netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n", - __func__, arfs_rule->filter_id, arfs_rule->rxq, err); + priv->channel_stats[arfs_rule->rxq].rq.arfs_err++; + mlx5e_dbg(HW, priv, + "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n", + __func__, arfs_rule->filter_id, arfs_rule->rxq, + tuple->ip_proto, err); } out: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 98dd3e0ada72..c86fd770c463 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -135,6 +135,14 @@ void mlx5e_build_ptys2ethtool_map(void) ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); } +static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { + "rx_cqe_moder", + "tx_cqe_moder", + "rx_cqe_compress", + "rx_striding_rq", + "rx_no_csum_complete", +}; + int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) { int i, num_stats = 0; @@ -852,18 +860,30 @@ out: return err; } +u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv) +{ + return sizeof(priv->channels.params.toeplitz_hash_key); +} + static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - return sizeof(priv->channels.params.toeplitz_hash_key); + return mlx5e_ethtool_get_rxfh_key_size(priv); } -static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev) +u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv) { return MLX5E_INDIR_RQT_SIZE; } +static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5e_ethtool_get_rxfh_indir_size(priv); +} + static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { @@ -1512,6 +1532,27 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) return 0; } +static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_channels *channels = &priv->channels; + struct mlx5e_channel *c; + int i; + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + return 0; + + for (i = 0; i < channels->num; i++) { + c = channels->c[i]; + if (enable) + __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); + else + __clear_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); + } + + return 0; +} + static int mlx5e_handle_pflag(struct net_device *netdev, u32 wanted_flags, enum mlx5e_priv_flag flag, @@ -1563,6 +1604,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) err = mlx5e_handle_pflag(netdev, pflags, MLX5E_PFLAG_RX_STRIDING_RQ, set_pflag_rx_striding_rq); + if (err) + goto out; + + err = mlx5e_handle_pflag(netdev, pflags, + MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, + set_pflag_rx_no_csum_complete); out: mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 41cde926cdab..c18dcebe1462 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -131,14 +131,14 @@ set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m, if (ip4src_m) { memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4), &ip4src_v, sizeof(ip4src_v)); - memset(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4), - 0xff, sizeof(ip4src_m)); + memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4), + &ip4src_m, sizeof(ip4src_m)); } if (ip4dst_m) { memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), &ip4dst_v, sizeof(ip4dst_v)); - memset(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), - 0xff, sizeof(ip4dst_m)); + memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &ip4dst_m, sizeof(ip4dst_m)); } MLX5E_FTE_SET(headers_c, ethertype, 0xffff); @@ -173,11 +173,11 @@ set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v, __be16 pdst_m, __be16 pdst_v) { if (psrc_m) { - MLX5E_FTE_SET(headers_c, tcp_sport, 0xffff); + MLX5E_FTE_SET(headers_c, tcp_sport, ntohs(psrc_m)); MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v)); } if (pdst_m) { - MLX5E_FTE_SET(headers_c, tcp_dport, 0xffff); + MLX5E_FTE_SET(headers_c, tcp_dport, ntohs(pdst_m)); MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v)); } @@ -190,12 +190,12 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v, __be16 pdst_m, __be16 pdst_v) { if (psrc_m) { - MLX5E_FTE_SET(headers_c, udp_sport, 0xffff); + MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_m)); MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v)); } if (pdst_m) { - MLX5E_FTE_SET(headers_c, udp_dport, 0xffff); + MLX5E_FTE_SET(headers_c, udp_dport, ntohs(pdst_m)); MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v)); } @@ -508,26 +508,14 @@ static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs) if (l4_mask->tos) return -EINVAL; - if (l4_mask->ip4src) { - if (!all_ones(l4_mask->ip4src)) - return -EINVAL; + if (l4_mask->ip4src) ntuples++; - } - if (l4_mask->ip4dst) { - if (!all_ones(l4_mask->ip4dst)) - return -EINVAL; + if (l4_mask->ip4dst) ntuples++; - } - if (l4_mask->psrc) { - if (!all_ones(l4_mask->psrc)) - return -EINVAL; + if (l4_mask->psrc) ntuples++; - } - if (l4_mask->pdst) { - if (!all_ones(l4_mask->pdst)) - return -EINVAL; + if (l4_mask->pdst) ntuples++; - } /* Flow is TCP/UDP */ return ++ntuples; } @@ -540,16 +528,10 @@ static int validate_ip4(struct ethtool_rx_flow_spec *fs) if (l3_mask->l4_4_bytes || l3_mask->tos || fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) return -EINVAL; - if (l3_mask->ip4src) { - if (!all_ones(l3_mask->ip4src)) - return -EINVAL; + if (l3_mask->ip4src) ntuples++; - } - if (l3_mask->ip4dst) { - if (!all_ones(l3_mask->ip4dst)) - return -EINVAL; + if (l3_mask->ip4dst) ntuples++; - } if (l3_mask->proto) ntuples++; /* Flow is IPv4 */ @@ -588,16 +570,10 @@ static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs) if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst)) ntuples++; - if (l4_mask->psrc) { - if (!all_ones(l4_mask->psrc)) - return -EINVAL; + if (l4_mask->psrc) ntuples++; - } - if (l4_mask->pdst) { - if (!all_ones(l4_mask->pdst)) - return -EINVAL; + if (l4_mask->pdst) ntuples++; - } /* Flow is TCP/UDP */ return ++ntuples; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f291d1bf1558..bc034958c846 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -929,6 +929,9 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (params->rx_dim_enabled) __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); + if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) + __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); + return 0; err_destroy_rq: @@ -3049,8 +3052,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, return mlx5e_alloc_cq_common(mdev, param, cq); } -static int mlx5e_open_drop_rq(struct mlx5e_priv *priv, - struct mlx5e_rq *drop_rq) +int mlx5e_open_drop_rq(struct mlx5e_priv *priv, + struct mlx5e_rq *drop_rq) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_cq_param cq_param = {}; @@ -3094,7 +3097,7 @@ err_free_cq: return err; } -static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) +void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) { mlx5e_destroy_rq(drop_rq); mlx5e_free_rq(drop_rq); @@ -3175,7 +3178,7 @@ static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *t MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); } -int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) +int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) { struct mlx5e_tir *tir; void *tirc; @@ -3202,7 +3205,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) } } - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) goto out; for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) { @@ -3273,14 +3276,14 @@ err_destroy_ch_tirs: return err; } -void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) +void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) { int i; for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) return; for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) @@ -4480,6 +4483,31 @@ static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeo return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]); } +void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + /* Prefer Striding RQ, unless any of the following holds: + * - Striding RQ configuration is not possible/supported. + * - Slow PCI heuristic. + * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. + */ + if (!slow_pci_heuristic(mdev) && + mlx5e_striding_rq_possible(mdev, params) && + (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) || + !mlx5e_rx_is_linear_skb(mdev, params))) + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); + mlx5e_set_rq_type(mdev, params); + mlx5e_init_rq_type_params(mdev, params); +} + +void mlx5e_build_rss_params(struct mlx5e_params *params) +{ + params->rss_hfunc = ETH_RSS_HASH_XOR; + netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key)); + mlx5e_build_default_indir_rqt(params->indirection_rqt, + MLX5E_INDIR_RQT_SIZE, params->num_channels); +} + void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u16 max_channels, u16 mtu) @@ -4503,20 +4531,10 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, params->rx_cqe_compress_def = slow_pci_heuristic(mdev); MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false); /* RQ */ - /* Prefer Striding RQ, unless any of the following holds: - * - Striding RQ configuration is not possible/supported. - * - Slow PCI heuristic. - * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. - */ - if (!slow_pci_heuristic(mdev) && - mlx5e_striding_rq_possible(mdev, params) && - (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) || - !mlx5e_rx_is_linear_skb(mdev, params))) - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); - mlx5e_set_rq_type(mdev, params); - mlx5e_init_rq_type_params(mdev, params); + mlx5e_build_rq_params(mdev, params); /* HW LRO */ @@ -4539,10 +4557,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev); /* RSS */ - params->rss_hfunc = ETH_RSS_HASH_XOR; - netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key)); - mlx5e_build_default_indir_rqt(params->indirection_rqt, - MLX5E_INDIR_RQT_SIZE, max_channels); + mlx5e_build_rss_params(params); } static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, @@ -4707,7 +4722,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_tls_build_netdev(priv); } -static void mlx5e_create_q_counters(struct mlx5e_priv *priv) +void mlx5e_create_q_counters(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; int err; @@ -4725,7 +4740,7 @@ static void mlx5e_create_q_counters(struct mlx5e_priv *priv) } } -static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) +void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) { if (priv->q_counter) mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); @@ -4764,15 +4779,23 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) struct mlx5_core_dev *mdev = priv->mdev; int err; + mlx5e_create_q_counters(priv); + + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); + if (err) { + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + goto err_destroy_q_counters; + } + err = mlx5e_create_indirect_rqt(priv); if (err) - return err; + goto err_close_drop_rq; err = mlx5e_create_direct_rqts(priv); if (err) goto err_destroy_indirect_rqts; - err = mlx5e_create_indirect_tirs(priv); + err = mlx5e_create_indirect_tirs(priv, true); if (err) goto err_destroy_direct_rqts; @@ -4797,11 +4820,15 @@ err_destroy_flow_steering: err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv); + mlx5e_destroy_indirect_tirs(priv, true); err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv); err_destroy_indirect_rqts: mlx5e_destroy_rqt(priv, &priv->indir_rqt); +err_close_drop_rq: + mlx5e_close_drop_rq(&priv->drop_rq); +err_destroy_q_counters: + mlx5e_destroy_q_counters(priv); return err; } @@ -4810,9 +4837,11 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) mlx5e_tc_nic_cleanup(priv); mlx5e_destroy_flow_steering(priv); mlx5e_destroy_direct_tirs(priv); - mlx5e_destroy_indirect_tirs(priv); + mlx5e_destroy_indirect_tirs(priv, true); mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); + mlx5e_destroy_q_counters(priv); } static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) @@ -4956,7 +4985,6 @@ err_cleanup_nic: int mlx5e_attach_netdev(struct mlx5e_priv *priv) { - struct mlx5_core_dev *mdev = priv->mdev; const struct mlx5e_profile *profile; int err; @@ -4967,28 +4995,16 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) if (err) goto out; - mlx5e_create_q_counters(priv); - - err = mlx5e_open_drop_rq(priv, &priv->drop_rq); - if (err) { - mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - goto err_destroy_q_counters; - } - err = profile->init_rx(priv); if (err) - goto err_close_drop_rq; + goto err_cleanup_tx; if (profile->enable) profile->enable(priv); return 0; -err_close_drop_rq: - mlx5e_close_drop_rq(&priv->drop_rq); - -err_destroy_q_counters: - mlx5e_destroy_q_counters(priv); +err_cleanup_tx: profile->cleanup_tx(priv); out: @@ -5006,8 +5022,6 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv) flush_workqueue(priv->wq); profile->cleanup_rx(priv); - mlx5e_close_drop_rq(&priv->drop_rq); - mlx5e_destroy_q_counters(priv); profile->cleanup_tx(priv); cancel_delayed_work_sync(&priv->update_stats_work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index c9cc9747d21d..9264c3332aa6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -46,8 +46,6 @@ #define MLX5E_REP_PARAMS_LOG_SQ_SIZE \ max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) -#define MLX5E_REP_PARAMS_LOG_RQ_SIZE \ - max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE) static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; @@ -182,12 +180,108 @@ static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset) } } +static void mlx5e_rep_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + mlx5e_ethtool_get_ringparam(priv, param); +} + +static int mlx5e_rep_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + return mlx5e_ethtool_set_ringparam(priv, param); +} + +static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv, + struct mlx5_flow_destination *dest) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; + struct mlx5_flow_handle *flow_rule; + + flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, + rep->vport, + dest); + if (IS_ERR(flow_rule)) + return PTR_ERR(flow_rule); + + mlx5_del_flow_rules(rpriv->vport_rx_rule); + rpriv->vport_rx_rule = flow_rule; + return 0; +} + +static void mlx5e_rep_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + mlx5e_ethtool_get_channels(priv, ch); +} + +static int mlx5e_rep_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + u16 curr_channels_amount = priv->channels.params.num_channels; + u32 new_channels_amount = ch->combined_count; + struct mlx5_flow_destination new_dest; + int err = 0; + + err = mlx5e_ethtool_set_channels(priv, ch); + if (err) + return err; + + if (curr_channels_amount == 1 && new_channels_amount > 1) { + new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + new_dest.ft = priv->fs.ttc.ft.t; + } else if (new_channels_amount == 1 && curr_channels_amount > 1) { + new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + new_dest.tir_num = priv->direct_tir[0].tirn; + } else { + return 0; + } + + err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest); + if (err) { + netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n", + curr_channels_amount, new_channels_amount); + return err; + } + + return 0; +} + +static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5e_ethtool_get_rxfh_key_size(priv); +} + +static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5e_ethtool_get_rxfh_indir_size(priv); +} + static const struct ethtool_ops mlx5e_rep_ethtool_ops = { .get_drvinfo = mlx5e_rep_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = mlx5e_rep_get_strings, .get_sset_count = mlx5e_rep_get_sset_count, .get_ethtool_stats = mlx5e_rep_get_ethtool_stats, + .get_ringparam = mlx5e_rep_get_ringparam, + .set_ringparam = mlx5e_rep_set_ringparam, + .get_channels = mlx5e_rep_get_channels, + .set_channels = mlx5e_rep_set_channels, + .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size, + .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, }; int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) @@ -934,16 +1028,20 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, params->hard_mtu = MLX5E_ETH_HARD_MTU; params->sw_mtu = mtu; params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; - params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC; - params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE; + /* RQ */ + mlx5e_build_rq_params(mdev, params); + + /* CQ moderation params */ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); params->num_tc = 1; - params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); + + /* RSS */ + mlx5e_build_rss_params(params); } static void mlx5e_build_rep_netdev(struct net_device *netdev) @@ -963,6 +1061,16 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; netdev->hw_features |= NETIF_F_HW_TC; + netdev->hw_features |= NETIF_F_SG; + netdev->hw_features |= NETIF_F_IP_CSUM; + netdev->hw_features |= NETIF_F_IPV6_CSUM; + netdev->hw_features |= NETIF_F_GRO; + netdev->hw_features |= NETIF_F_TSO; + netdev->hw_features |= NETIF_F_TSO6; + netdev->hw_features |= NETIF_F_RXCSUM; + + netdev->features |= netdev->hw_features; + eth_hw_addr_random(netdev); netdev->min_mtu = ETH_MIN_MTU; @@ -986,7 +1094,7 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev, INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); - priv->channels.params.num_channels = profile->max_nch(mdev); + priv->channels.params.num_channels = 1; mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu); mlx5e_build_rep_netdev(netdev); @@ -994,39 +1102,98 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev, mlx5e_timestamp_init(priv); } -static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) +static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv) +{ + struct ttc_params ttc_params = {}; + int tt, err; + + priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, + MLX5_FLOW_NAMESPACE_KERNEL); + + /* The inner_ttc in the ttc params is intentionally not set */ + ttc_params.any_tt_tirn = priv->direct_tir[0].tirn; + mlx5e_set_ttc_ft_params(&ttc_params); + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) + ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; + + err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); + if (err) { + netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err); + return err; + } + return 0; +} + +static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_flow_handle *flow_rule; + struct mlx5_flow_destination dest; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dest.tir_num = priv->direct_tir[0].tirn; + flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, + rep->vport, + &dest); + if (IS_ERR(flow_rule)) + return PTR_ERR(flow_rule); + rpriv->vport_rx_rule = flow_rule; + return 0; +} + +static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; int err; mlx5e_init_l2_addr(priv); + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); + if (err) { + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + return err; + } + + err = mlx5e_create_indirect_rqt(priv); + if (err) + goto err_close_drop_rq; + err = mlx5e_create_direct_rqts(priv); if (err) - return err; + goto err_destroy_indirect_rqts; - err = mlx5e_create_direct_tirs(priv); + err = mlx5e_create_indirect_tirs(priv, false); if (err) goto err_destroy_direct_rqts; - flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, - rep->vport, - priv->direct_tir[0].tirn); - if (IS_ERR(flow_rule)) { - err = PTR_ERR(flow_rule); + err = mlx5e_create_direct_tirs(priv); + if (err) + goto err_destroy_indirect_tirs; + + err = mlx5e_create_rep_ttc_table(priv); + if (err) goto err_destroy_direct_tirs; - } - rpriv->vport_rx_rule = flow_rule; + + err = mlx5e_create_rep_vport_rx_rule(priv); + if (err) + goto err_destroy_ttc_table; return 0; +err_destroy_ttc_table: + mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); +err_destroy_indirect_tirs: + mlx5e_destroy_indirect_tirs(priv, false); err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv); +err_destroy_indirect_rqts: + mlx5e_destroy_rqt(priv, &priv->indir_rqt); +err_close_drop_rq: + mlx5e_close_drop_rq(&priv->drop_rq); return err; } @@ -1035,8 +1202,12 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; mlx5_del_flow_rules(rpriv->vport_rx_rule); + mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); mlx5e_destroy_direct_tirs(priv); + mlx5e_destroy_indirect_tirs(priv, false); mlx5e_destroy_direct_rqts(priv); + mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); } static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) @@ -1051,12 +1222,6 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) return 0; } -static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev) -{ -#define MLX5E_PORT_REPRESENTOR_NCH 1 - return MLX5E_PORT_REPRESENTOR_NCH; -} - static const struct mlx5e_profile mlx5e_rep_profile = { .init = mlx5e_init_rep, .init_rx = mlx5e_init_rep_rx, @@ -1064,10 +1229,10 @@ static const struct mlx5e_profile mlx5e_rep_profile = { .init_tx = mlx5e_init_rep_tx, .cleanup_tx = mlx5e_cleanup_nic_tx, .update_stats = mlx5e_rep_update_hw_counters, - .max_nch = mlx5e_get_rep_max_num_channels, + .max_nch = mlx5e_get_max_num_channels, .update_carrier = NULL, .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, - .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */, + .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, .max_tc = 1, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 15d8ae28c040..f19067c94272 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -37,6 +37,7 @@ #include <net/busy_poll.h> #include <net/ip6_checksum.h> #include <net/page_pool.h> +#include <net/inet_ecn.h> #include "en.h" #include "en_tc.h" #include "eswitch.h" @@ -690,12 +691,29 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); } -static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth) +static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, + __be16 *proto) { - __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto; + *proto = ((struct ethhdr *)skb->data)->h_proto; + *proto = __vlan_get_protocol(skb, *proto, network_depth); + return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6)); +} + +static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) +{ + int network_depth = 0; + __be16 proto; + void *ip; + int rc; - ethertype = __vlan_get_protocol(skb, ethertype, network_depth); - return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); + if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto))) + return; + + ip = skb->data + network_depth; + rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) : + IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip)); + + rq->stats->ecn_mark += !!rc; } static __be32 mlx5e_get_fcs(struct sk_buff *skb) @@ -737,6 +755,14 @@ static __be32 mlx5e_get_fcs(struct sk_buff *skb) return fcs_bytes; } +static u8 get_ip_proto(struct sk_buff *skb, __be16 proto) +{ + void *ip_p = skb->data + sizeof(struct ethhdr); + + return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol : + ((struct ipv6hdr *)ip_p)->nexthdr; +} + static inline void mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, @@ -745,6 +771,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, { struct mlx5e_rq_stats *stats = rq->stats; int network_depth = 0; + __be16 proto; if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) goto csum_none; @@ -755,7 +782,13 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, return; } - if (likely(is_last_ethertype_ip(skb, &network_depth))) { + if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) + goto csum_unnecessary; + + if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { + if (unlikely(get_ip_proto(skb, proto) == IPPROTO_SCTP)) + goto csum_unnecessary; + skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); if (network_depth > ETH_HLEN) @@ -773,8 +806,10 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, return; } +csum_unnecessary: if (likely((cqe->hds_ip_ext & CQE_L3_OK) && - (cqe->hds_ip_ext & CQE_L4_OK))) { + ((cqe->hds_ip_ext & CQE_L4_OK) || + (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) { skb->ip_summed = CHECKSUM_UNNECESSARY; if (cqe_is_tunneled(cqe)) { skb->csum_level = 1; @@ -790,6 +825,8 @@ csum_none: stats->csum_none++; } +#define MLX5E_CE_BIT_MASK 0x80 + static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct mlx5e_rq *rq, @@ -834,6 +871,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); + /* checking CE bit in cqe - MSB in ml_path field */ + if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK)) + mlx5e_enable_ecn(rq, skb); + skb->protocol = eth_type_trans(skb, netdev); } @@ -1230,8 +1271,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { - struct mlx5e_rq_stats *stats = rq->stats; struct hwtstamp_config *tstamp; + struct mlx5e_rq_stats *stats; struct net_device *netdev; struct mlx5e_priv *priv; char *pseudo_header; @@ -1254,6 +1295,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, priv = mlx5i_epriv(netdev); tstamp = &priv->tstamp; + stats = &priv->channel_stats[rq->ix].rq; g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 6839481f7697..b7d4896c7c7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -53,6 +53,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, @@ -92,6 +93,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, @@ -144,6 +146,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_bytes += rq_stats->bytes; s->rx_lro_packets += rq_stats->lro_packets; s->rx_lro_bytes += rq_stats->lro_bytes; + s->rx_ecn_mark += rq_stats->ecn_mark; s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_csum_none += rq_stats->csum_none; s->rx_csum_complete += rq_stats->csum_complete; @@ -168,6 +171,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_cache_busy += rq_stats->cache_busy; s->rx_cache_waive += rq_stats->cache_waive; s->rx_congst_umr += rq_stats->congst_umr; + s->rx_arfs_err += rq_stats->arfs_err; s->ch_events += ch_stats->events; s->ch_poll += ch_stats->poll; s->ch_arm += ch_stats->arm; @@ -1144,6 +1148,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, @@ -1158,6 +1163,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, }; static const struct counter_desc sq_stats_desc[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index a4c035aedd46..77f74ce11280 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -66,6 +66,7 @@ struct mlx5e_sw_stats { u64 tx_nop; u64 rx_lro_packets; u64 rx_lro_bytes; + u64 rx_ecn_mark; u64 rx_removed_vlan_packets; u64 rx_csum_unnecessary; u64 rx_csum_none; @@ -105,6 +106,7 @@ struct mlx5e_sw_stats { u64 rx_cache_busy; u64 rx_cache_waive; u64 rx_congst_umr; + u64 rx_arfs_err; u64 ch_events; u64 ch_poll; u64 ch_arm; @@ -184,6 +186,7 @@ struct mlx5e_rq_stats { u64 csum_none; u64 lro_packets; u64 lro_bytes; + u64 ecn_mark; u64 removed_vlan_packets; u64 xdp_drop; u64 xdp_redirect; @@ -200,6 +203,7 @@ struct mlx5e_rq_stats { u64 cache_busy; u64 cache_waive; u64 congst_umr; + u64 arfs_err; }; struct mlx5e_sq_stats { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 85796727093e..6de21d9f4fad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -532,7 +532,8 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv, #define UNKNOWN_MATCH_PRIO 8 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, - struct mlx5_flow_spec *spec, u8 *match_prio) + struct mlx5_flow_spec *spec, u8 *match_prio, + struct netlink_ext_ack *extack) { void *headers_c, *headers_v; u8 prio_val, prio_mask = 0; @@ -540,8 +541,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, #ifdef CONFIG_MLX5_CORE_EN_DCB if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) { - netdev_warn(priv->netdev, - "only PCP trust state supported for hairpin\n"); + NL_SET_ERR_MSG_MOD(extack, + "only PCP trust state supported for hairpin"); return -EOPNOTSUPP; } #endif @@ -557,8 +558,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, if (!vlan_present || !prio_mask) { prio_val = UNKNOWN_MATCH_PRIO; } else if (prio_mask != 0x7) { - netdev_warn(priv->netdev, - "masked priority match not supported for hairpin\n"); + NL_SET_ERR_MSG_MOD(extack, + "masked priority match not supported for hairpin"); return -EOPNOTSUPP; } @@ -568,7 +569,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, - struct mlx5e_tc_flow_parse_attr *parse_attr) + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct netlink_ext_ack *extack) { int peer_ifindex = parse_attr->mirred_ifindex; struct mlx5_hairpin_params params; @@ -583,12 +585,13 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) { - netdev_warn(priv->netdev, "hairpin is not supported\n"); + NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported"); return -EOPNOTSUPP; } peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id); - err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio); + err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio, + extack); if (err) return err; hpe = mlx5e_hairpin_get(priv, peer_id, match_prio); @@ -677,7 +680,8 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv, static struct mlx5_flow_handle * mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { struct mlx5_nic_flow_attr *attr = flow->nic_attr; struct mlx5_core_dev *dev = priv->mdev; @@ -694,7 +698,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, int err, dest_ix = 0; if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) { - err = mlx5e_hairpin_flow_add(priv, flow, parse_attr); + err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack); if (err) { rule = ERR_PTR(err); goto err_add_hairpin_flow; @@ -753,6 +757,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, MLX5E_TC_TABLE_NUM_GROUPS, MLX5E_TC_FT_LEVEL, 0); if (IS_ERR(priv->fs.tc.t)) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to create tc offload table\n"); netdev_err(priv->netdev, "Failed to create tc offload table\n"); rule = ERR_CAST(priv->fs.tc.t); @@ -819,12 +825,14 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct ip_tunnel_info *tun_info, struct net_device *mirred_dev, struct net_device **encap_dev, - struct mlx5e_tc_flow *flow); + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack); static struct mlx5_flow_handle * mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_esw_flow_attr *attr = flow->esw_attr; @@ -838,7 +846,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, out_dev = __dev_get_by_index(dev_net(priv->netdev), attr->parse_attr->mirred_ifindex); err = mlx5e_attach_encap(priv, &parse_attr->tun_info, - out_dev, &encap_dev, flow); + out_dev, &encap_dev, flow, extack); if (err) { rule = ERR_PTR(err); if (err != -EAGAIN) @@ -1105,6 +1113,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f) { + struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, @@ -1133,6 +1142,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) parse_vxlan_attr(spec, f); else { + NL_SET_ERR_MSG_MOD(extack, + "port isn't an offloaded vxlan udp dport"); netdev_warn(priv->netdev, "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst)); return -EOPNOTSUPP; @@ -1149,6 +1160,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, udp_sport, ntohs(key->src)); } else { /* udp dst port must be given */ vxlan_match_offload_err: + NL_SET_ERR_MSG_MOD(extack, + "IP tunnel decap offload supported only for vxlan, must set UDP dport"); netdev_warn(priv->netdev, "IP tunnel decap offload supported only for vxlan, must set UDP dport\n"); return -EOPNOTSUPP; @@ -1225,6 +1238,16 @@ vxlan_match_offload_err: MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl); + + if (mask->ttl && + !MLX5_CAP_ESW_FLOWTABLE_FDB + (priv->mdev, + ft_field_support.outer_ipv4_ttl)) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on TTL is not supported"); + return -EOPNOTSUPP; + } + } /* Enforce DMAC when offloading incoming tunneled flows. @@ -1247,6 +1270,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, u8 *match_level) { + struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, @@ -1277,6 +1301,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, BIT(FLOW_DISSECTOR_KEY_TCP) | BIT(FLOW_DISSECTOR_KEY_IP) | BIT(FLOW_DISSECTOR_KEY_ENC_IP))) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", f->dissector->used_keys); return -EOPNOTSUPP; @@ -1553,8 +1578,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, if (mask->ttl && !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, - ft_field_support.outer_ipv4_ttl)) + ft_field_support.outer_ipv4_ttl)) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on TTL is not supported"); return -EOPNOTSUPP; + } if (mask->tos || mask->ttl) *match_level = MLX5_MATCH_L3; @@ -1596,6 +1624,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, udp_dport, ntohs(key->dst)); break; default: + NL_SET_ERR_MSG_MOD(extack, + "Only UDP and TCP transports are supported for L4 matching"); netdev_err(priv->netdev, "Only UDP and TCP transport are supported\n"); return -EINVAL; @@ -1632,6 +1662,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f) { + struct netlink_ext_ack *extack = f->common.extack; struct mlx5_core_dev *dev = priv->mdev; struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; @@ -1646,6 +1677,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv, if (rep->vport != FDB_UPLINK_VPORT && (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && esw->offloads.inline_mode < match_level)) { + NL_SET_ERR_MSG_MOD(extack, + "Flow is not offloaded due to min inline setting"); netdev_warn(priv->netdev, "Flow is not offloaded due to min inline setting, required %d actual %d\n", match_level, esw->offloads.inline_mode); @@ -1747,7 +1780,8 @@ static struct mlx5_fields fields[] = { */ static int offload_pedit_fields(struct pedit_headers *masks, struct pedit_headers *vals, - struct mlx5e_tc_flow_parse_attr *parse_attr) + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct netlink_ext_ack *extack) { struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; int i, action_size, nactions, max_actions, first, last, next_z; @@ -1786,11 +1820,15 @@ static int offload_pedit_fields(struct pedit_headers *masks, continue; if (s_mask && a_mask) { + NL_SET_ERR_MSG_MOD(extack, + "can't set and add to the same HW field"); printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field); return -EOPNOTSUPP; } if (nactions == max_actions) { + NL_SET_ERR_MSG_MOD(extack, + "too many pedit actions, can't offload"); printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions); return -EOPNOTSUPP; } @@ -1823,6 +1861,8 @@ static int offload_pedit_fields(struct pedit_headers *masks, next_z = find_next_zero_bit(&mask, field_bsize, first); last = find_last_bit(&mask, field_bsize); if (first < next_z && next_z < last) { + NL_SET_ERR_MSG_MOD(extack, + "rewrite of few sub-fields isn't supported"); printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n", mask); return -EOPNOTSUPP; @@ -1881,7 +1921,8 @@ static const struct pedit_headers zero_masks = {}; static int parse_tc_pedit_action(struct mlx5e_priv *priv, const struct tc_action *a, int namespace, - struct mlx5e_tc_flow_parse_attr *parse_attr) + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct netlink_ext_ack *extack) { struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks; int nkeys, i, err = -EOPNOTSUPP; @@ -1899,12 +1940,13 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, err = -EOPNOTSUPP; /* can't be all optimistic */ if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) { - netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n"); + NL_SET_ERR_MSG_MOD(extack, + "legacy pedit isn't offloaded"); goto out_err; } if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) { - netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd); + NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded"); goto out_err; } @@ -1921,13 +1963,15 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, if (err) goto out_err; - err = offload_pedit_fields(masks, vals, parse_attr); + err = offload_pedit_fields(masks, vals, parse_attr, extack); if (err < 0) goto out_dealloc_parsed_actions; for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) { cmd_masks = &masks[cmd]; if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) { + NL_SET_ERR_MSG_MOD(extack, + "attempt to offload an unsupported field"); netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd); print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS, 16, 1, cmd_masks, sizeof(zero_masks), true); @@ -1944,19 +1988,26 @@ out_err: return err; } -static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags) +static bool csum_offload_supported(struct mlx5e_priv *priv, + u32 action, + u32 update_flags, + struct netlink_ext_ack *extack) { u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP; /* The HW recalcs checksums only if re-writing headers */ if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) { + NL_SET_ERR_MSG_MOD(extack, + "TC csum action is only offloaded with pedit"); netdev_warn(priv->netdev, "TC csum action is only offloaded with pedit\n"); return false; } if (update_flags & ~prot_flags) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload TC csum action for some header/s"); netdev_warn(priv->netdev, "can't offload TC csum action for some header/s - flags %#x\n", update_flags); @@ -1967,7 +2018,8 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda } static bool modify_header_match_supported(struct mlx5_flow_spec *spec, - struct tcf_exts *exts) + struct tcf_exts *exts, + struct netlink_ext_ack *extack) { const struct tc_action *a; bool modify_ip_header; @@ -2005,6 +2057,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload re-write of non TCP/UDP"); pr_info("can't offload re-write of ip proto %d\n", ip_proto); return false; } @@ -2016,7 +2070,8 @@ out_ok: static bool actions_match_supported(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { u32 actions; @@ -2030,7 +2085,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv, return false; if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - return modify_header_match_supported(&parse_attr->spec, exts); + return modify_header_match_supported(&parse_attr->spec, exts, + extack); return true; } @@ -2043,15 +2099,16 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) fmdev = priv->mdev; pmdev = peer_priv->mdev; - mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid); - mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid); + fsystem_guid = mlx5_query_nic_system_image_guid(fmdev); + psystem_guid = mlx5_query_nic_system_image_guid(pmdev); return (fsystem_guid == psystem_guid); } static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { struct mlx5_nic_flow_attr *attr = flow->nic_attr; const struct tc_action *a; @@ -2075,7 +2132,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (is_tcf_pedit(a)) { err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL, - parse_attr); + parse_attr, extack); if (err) return err; @@ -2086,7 +2143,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (is_tcf_csum(a)) { if (csum_offload_supported(priv, action, - tcf_csum_update_flags(a))) + tcf_csum_update_flags(a), + extack)) continue; return -EOPNOTSUPP; @@ -2102,6 +2160,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; } else { + NL_SET_ERR_MSG_MOD(extack, + "device is not on same HW, can't offload"); netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n", peer_dev->name); return -EINVAL; @@ -2113,8 +2173,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, u32 mark = tcf_skbedit_mark(a); if (mark & ~MLX5E_TC_FLOW_ID_MASK) { - netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n", - mark); + NL_SET_ERR_MSG_MOD(extack, + "Bad flow mark - only 16 bit is supported"); return -EINVAL; } @@ -2127,7 +2187,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } attr->action = action; - if (!actions_match_supported(priv, exts, parse_attr, flow)) + if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) return -EOPNOTSUPP; return 0; @@ -2529,7 +2589,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct ip_tunnel_info *tun_info, struct net_device *mirred_dev, struct net_device **encap_dev, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; unsigned short family = ip_tunnel_info_af(tun_info); @@ -2547,6 +2608,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, /* setting udp src port isn't supported */ if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) { vxlan_encap_offload_err: + NL_SET_ERR_MSG_MOD(extack, + "must set udp dst port and not set udp src port"); netdev_warn(priv->netdev, "must set udp dst port and not set udp src port\n"); return -EOPNOTSUPP; @@ -2556,6 +2619,8 @@ vxlan_encap_offload_err: MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { tunnel_type = MLX5_HEADER_TYPE_VXLAN; } else { + NL_SET_ERR_MSG_MOD(extack, + "port isn't an offloaded vxlan udp dport"); netdev_warn(priv->netdev, "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst)); return -EOPNOTSUPP; @@ -2660,7 +2725,8 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5e_rep_priv *rpriv = priv->ppriv; @@ -2686,7 +2752,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (is_tcf_pedit(a)) { err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB, - parse_attr); + parse_attr, extack); if (err) return err; @@ -2697,7 +2763,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (is_tcf_csum(a)) { if (csum_offload_supported(priv, action, - tcf_csum_update_flags(a))) + tcf_csum_update_flags(a), + extack)) continue; return -EOPNOTSUPP; @@ -2710,6 +2777,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, out_dev = tcf_mirred_dev(a); if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) { + NL_SET_ERR_MSG_MOD(extack, + "can't support more output ports, can't offload forwarding"); pr_err("can't support more than %d output ports, can't offload forwarding\n", attr->out_count); return -EOPNOTSUPP; @@ -2733,6 +2802,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, MLX5_FLOW_CONTEXT_ACTION_COUNT; /* attr->out_rep is resolved when we handle encap */ } else { + NL_SET_ERR_MSG_MOD(extack, + "devices are not on same switch HW, can't offload forwarding"); pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", priv->netdev->name, out_dev->name); return -EINVAL; @@ -2769,10 +2840,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } attr->action = action; - if (!actions_match_supported(priv, exts, parse_attr, flow)) + if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) return -EOPNOTSUPP; if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { + NL_SET_ERR_MSG_MOD(extack, + "current firmware doesn't support split rule for port mirroring"); netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); return -EOPNOTSUPP; } @@ -2814,6 +2887,7 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv) int mlx5e_configure_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, int flags) { + struct netlink_ext_ack *extack = f->common.extack; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_flow_parse_attr *parse_attr; struct rhashtable *tc_ht = get_tc_ht(priv); @@ -2825,6 +2899,8 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); if (flow) { + NL_SET_ERR_MSG_MOD(extack, + "flow cookie already exists, ignoring"); netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie); return 0; } @@ -2853,15 +2929,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, goto err_free; if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { - err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow); + err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, + extack); if (err < 0) goto err_free; - flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); + flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, + extack); } else { - err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow); + err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, + extack); if (err < 0) goto err_free; - flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow); + flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, + extack); } if (IS_ERR(flow->rule[0])) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index c17bfcab517c..dfc642de4e6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -230,7 +230,8 @@ mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr); struct mlx5_flow_handle * -mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); +mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, + struct mlx5_flow_destination *dest); enum { SET_VLAN_STRIP = BIT(0), @@ -268,12 +269,15 @@ struct mlx5_esw_flow_attr { struct mlx5e_tc_flow_parse_attr *parse_attr; }; -int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode); +int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack); int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); -int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode); +int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, + struct netlink_ext_ack *extack); int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode); -int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap); +int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap, + struct netlink_ext_ack *extack); int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 3028e8d90920..a35a2310f871 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -775,10 +775,10 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) } struct mlx5_flow_handle * -mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) +mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, + struct mlx5_flow_destination *dest) { struct mlx5_flow_act flow_act = {0}; - struct mlx5_flow_destination dest = {}; struct mlx5_flow_handle *flow_rule; struct mlx5_flow_spec *spec; void *misc; @@ -796,12 +796,10 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; - dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; - dest.tir_num = tirn; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, - &flow_act, &dest, 1); + &flow_act, dest, 1); if (IS_ERR(flow_rule)) { esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); goto out; @@ -812,29 +810,35 @@ out: return flow_rule; } -static int esw_offloads_start(struct mlx5_eswitch *esw) +static int esw_offloads_start(struct mlx5_eswitch *esw, + struct netlink_ext_ack *extack) { int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; if (esw->mode != SRIOV_LEGACY) { - esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n"); + NL_SET_ERR_MSG_MOD(extack, + "Can't set offloads mode, SRIOV legacy not enabled"); return -EINVAL; } mlx5_eswitch_disable_sriov(esw); err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); if (err) { - esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); + NL_SET_ERR_MSG_MOD(extack, + "Failed setting eswitch to offloads"); err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); - if (err1) - esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1); + if (err1) { + NL_SET_ERR_MSG_MOD(extack, + "Failed setting eswitch back to legacy"); + } } if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { if (mlx5_eswitch_inline_mode_get(esw, num_vfs, &esw->offloads.inline_mode)) { esw->offloads.inline_mode = MLX5_INLINE_MODE_L2; - esw_warn(esw->dev, "Inline mode is different between vports\n"); + NL_SET_ERR_MSG_MOD(extack, + "Inline mode is different between vports"); } } return err; @@ -975,17 +979,20 @@ create_ft_err: return err; } -static int esw_offloads_stop(struct mlx5_eswitch *esw) +static int esw_offloads_stop(struct mlx5_eswitch *esw, + struct netlink_ext_ack *extack) { int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; mlx5_eswitch_disable_sriov(esw); err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); if (err) { - esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err); + NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); - if (err1) - esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); + if (err1) { + NL_SET_ERR_MSG_MOD(extack, + "Failed setting eswitch back to offloads"); + } } /* enable back PF RoCE */ @@ -1094,7 +1101,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink) return 0; } -int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) +int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); u16 cur_mlx5_mode, mlx5_mode = 0; @@ -1113,9 +1121,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) return 0; if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) - return esw_offloads_start(dev->priv.eswitch); + return esw_offloads_start(dev->priv.eswitch, extack); else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) - return esw_offloads_stop(dev->priv.eswitch); + return esw_offloads_stop(dev->priv.eswitch, extack); else return -EINVAL; } @@ -1132,7 +1140,8 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); } -int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) +int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_eswitch *esw = dev->priv.eswitch; @@ -1149,14 +1158,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) return 0; /* fall through */ case MLX5_CAP_INLINE_MODE_L2: - esw_warn(dev, "Inline mode can't be set\n"); + NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); return -EOPNOTSUPP; case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: break; } if (esw->offloads.num_flows > 0) { - esw_warn(dev, "Can't set inline mode when flows are configured\n"); + NL_SET_ERR_MSG_MOD(extack, + "Can't set inline mode when flows are configured"); return -EOPNOTSUPP; } @@ -1167,8 +1177,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) for (vport = 1; vport < esw->enabled_vports; vport++) { err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); if (err) { - esw_warn(dev, "Failed to set min inline on vport %d\n", - vport); + NL_SET_ERR_MSG_MOD(extack, + "Failed to set min inline on vport"); goto revert_inline_mode; } } @@ -1234,7 +1244,8 @@ out: return 0; } -int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) +int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_eswitch *esw = dev->priv.eswitch; @@ -1261,7 +1272,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) return 0; if (esw->offloads.num_flows > 0) { - esw_warn(dev, "Can't set encapsulation when flows are configured\n"); + NL_SET_ERR_MSG_MOD(extack, + "Can't set encapsulation when flows are configured"); return -EOPNOTSUPP; } @@ -1270,7 +1282,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) esw->offloads.encap = encap; err = esw_create_offloads_fast_fdb_table(esw); if (err) { - esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err); + NL_SET_ERR_MSG_MOD(extack, + "Failed re-creating fast FDB table"); esw->offloads.encap = !encap; (void)esw_create_offloads_fast_fdb_table(esw); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 32070e5d993d..a06f83c0c2b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -36,6 +36,7 @@ #include <linux/refcount.h> #include <linux/mlx5/fs.h> #include <linux/rhashtable.h> +#include <linux/llist.h> enum fs_node_type { FS_TYPE_NAMESPACE, @@ -138,8 +139,9 @@ struct mlx5_fc_cache { }; struct mlx5_fc { - struct rb_node node; struct list_head list; + struct llist_node addlist; + struct llist_node dellist; /* last{packets,bytes} members are used when calculating the delta since * last reading @@ -148,7 +150,6 @@ struct mlx5_fc { u64 lastbytes; u32 id; - bool deleted; bool aging; struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 58af6be13dfa..09206c4acd9a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -52,11 +52,13 @@ * access to counter list: * - create (user context) * - mlx5_fc_create() only adds to an addlist to be used by - * mlx5_fc_stats_query_work(). addlist is protected by a spinlock. + * mlx5_fc_stats_query_work(). addlist is a lockless single linked list + * that doesn't require any additional synchronization when adding single + * node. * - spawn thread to do the actual destroy * * - destroy (user context) - * - mark a counter as deleted + * - add a counter to lockless dellist * - spawn thread to do the actual del * * - dump (user context) @@ -71,36 +73,43 @@ * elapsed, the thread will actually query the hardware. */ -static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter) +static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev, + u32 id) { - struct rb_node **new = &root->rb_node; - struct rb_node *parent = NULL; - - while (*new) { - struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node); - int result = counter->id - this->id; - - parent = *new; - if (result < 0) - new = &((*new)->rb_left); - else - new = &((*new)->rb_right); - } + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + unsigned long next_id = (unsigned long)id + 1; + struct mlx5_fc *counter; + + rcu_read_lock(); + /* skip counters that are in idr, but not yet in counters list */ + while ((counter = idr_get_next_ul(&fc_stats->counters_idr, + &next_id)) != NULL && + list_empty(&counter->list)) + next_id++; + rcu_read_unlock(); + + return counter ? &counter->list : &fc_stats->counters; +} - /* Add new node and rebalance tree. */ - rb_link_node(&counter->node, parent, new); - rb_insert_color(&counter->node, root); +static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev, + struct mlx5_fc *counter) +{ + struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id); + + list_add_tail(&counter->list, next); } -/* The function returns the last node that was queried so the caller +/* The function returns the last counter that was queried so the caller * function can continue calling it till all counters are queried. */ -static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, +static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev, struct mlx5_fc *first, u32 last_id) { + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc *counter = NULL; struct mlx5_cmd_fc_bulk *b; - struct rb_node *node = NULL; + bool more = false; u32 afirst_id; int num; int err; @@ -130,14 +139,16 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, goto out; } - for (node = &first->node; node; node = rb_next(node)) { - struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node); + counter = first; + list_for_each_entry_from(counter, &fc_stats->counters, list) { struct mlx5_fc_cache *c = &counter->cache; u64 packets; u64 bytes; - if (counter->id > last_id) + if (counter->id > last_id) { + more = true; break; + } mlx5_cmd_fc_bulk_get(dev, b, counter->id, &packets, &bytes); @@ -153,7 +164,14 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, out: mlx5_cmd_fc_bulk_free(b); - return node; + return more ? counter : NULL; +} + +static void mlx5_free_fc(struct mlx5_core_dev *dev, + struct mlx5_fc *counter) +{ + mlx5_cmd_fc_free(dev, counter->id); + kfree(counter); } static void mlx5_fc_stats_work(struct work_struct *work) @@ -161,52 +179,33 @@ static void mlx5_fc_stats_work(struct work_struct *work) struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, priv.fc_stats.work.work); struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct llist_node *tmplist = llist_del_all(&fc_stats->addlist); + struct mlx5_fc *counter = NULL, *last = NULL, *tmp; unsigned long now = jiffies; - struct mlx5_fc *counter = NULL; - struct mlx5_fc *last = NULL; - struct rb_node *node; - LIST_HEAD(tmplist); - - spin_lock(&fc_stats->addlist_lock); - list_splice_tail_init(&fc_stats->addlist, &tmplist); - - if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters)) + if (tmplist || !list_empty(&fc_stats->counters)) queue_delayed_work(fc_stats->wq, &fc_stats->work, fc_stats->sampling_interval); - spin_unlock(&fc_stats->addlist_lock); - - list_for_each_entry(counter, &tmplist, list) - mlx5_fc_stats_insert(&fc_stats->counters, counter); - - node = rb_first(&fc_stats->counters); - while (node) { - counter = rb_entry(node, struct mlx5_fc, node); - - node = rb_next(node); - - if (counter->deleted) { - rb_erase(&counter->node, &fc_stats->counters); + llist_for_each_entry(counter, tmplist, addlist) + mlx5_fc_stats_insert(dev, counter); - mlx5_cmd_fc_free(dev, counter->id); - - kfree(counter); - continue; - } + tmplist = llist_del_all(&fc_stats->dellist); + llist_for_each_entry_safe(counter, tmp, tmplist, dellist) { + list_del(&counter->list); - last = counter; + mlx5_free_fc(dev, counter); } - if (time_before(now, fc_stats->next_query) || !last) + if (time_before(now, fc_stats->next_query) || + list_empty(&fc_stats->counters)) return; + last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list); - node = rb_first(&fc_stats->counters); - while (node) { - counter = rb_entry(node, struct mlx5_fc, node); - - node = mlx5_fc_stats_query(dev, counter, last->id); - } + counter = list_first_entry(&fc_stats->counters, struct mlx5_fc, + list); + while (counter) + counter = mlx5_fc_stats_query(dev, counter, last->id); fc_stats->next_query = now + fc_stats->sampling_interval; } @@ -220,24 +219,38 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) counter = kzalloc(sizeof(*counter), GFP_KERNEL); if (!counter) return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&counter->list); err = mlx5_cmd_fc_alloc(dev, &counter->id); if (err) goto err_out; if (aging) { + u32 id = counter->id; + counter->cache.lastuse = jiffies; counter->aging = true; - spin_lock(&fc_stats->addlist_lock); - list_add(&counter->list, &fc_stats->addlist); - spin_unlock(&fc_stats->addlist_lock); + idr_preload(GFP_KERNEL); + spin_lock(&fc_stats->counters_idr_lock); + + err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id, + GFP_NOWAIT); + + spin_unlock(&fc_stats->counters_idr_lock); + idr_preload_end(); + if (err) + goto err_out_alloc; + + llist_add(&counter->addlist, &fc_stats->addlist); mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); } return counter; +err_out_alloc: + mlx5_cmd_fc_free(dev, counter->id); err_out: kfree(counter); @@ -253,13 +266,16 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter) return; if (counter->aging) { - counter->deleted = true; + spin_lock(&fc_stats->counters_idr_lock); + WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id)); + spin_unlock(&fc_stats->counters_idr_lock); + + llist_add(&counter->dellist, &fc_stats->dellist); mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); return; } - mlx5_cmd_fc_free(dev, counter->id); - kfree(counter); + mlx5_free_fc(dev, counter); } EXPORT_SYMBOL(mlx5_fc_destroy); @@ -267,9 +283,11 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) { struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - fc_stats->counters = RB_ROOT; - INIT_LIST_HEAD(&fc_stats->addlist); - spin_lock_init(&fc_stats->addlist_lock); + spin_lock_init(&fc_stats->counters_idr_lock); + idr_init(&fc_stats->counters_idr); + INIT_LIST_HEAD(&fc_stats->counters); + init_llist_head(&fc_stats->addlist); + init_llist_head(&fc_stats->dellist); fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); if (!fc_stats->wq) @@ -284,34 +302,22 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) { struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct llist_node *tmplist; struct mlx5_fc *counter; struct mlx5_fc *tmp; - struct rb_node *node; cancel_delayed_work_sync(&dev->priv.fc_stats.work); destroy_workqueue(dev->priv.fc_stats.wq); dev->priv.fc_stats.wq = NULL; - list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) { - list_del(&counter->list); - - mlx5_cmd_fc_free(dev, counter->id); + idr_destroy(&fc_stats->counters_idr); - kfree(counter); - } + tmplist = llist_del_all(&fc_stats->addlist); + llist_for_each_entry_safe(counter, tmp, tmplist, addlist) + mlx5_free_fc(dev, counter); - node = rb_first(&fc_stats->counters); - while (node) { - counter = rb_entry(node, struct mlx5_fc, node); - - node = rb_next(node); - - rb_erase(&counter->node, &fc_stats->counters); - - mlx5_cmd_fc_free(dev, counter->id); - - kfree(counter); - } + list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list) + mlx5_free_fc(dev, counter); } int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 41ad24f0de2c..1ab6f7e3bec6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -250,7 +250,7 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) if (ret) return ret; - force_state = MLX5_GET(teardown_hca_out, out, force_state); + force_state = MLX5_GET(teardown_hca_out, out, state); if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n"); return -EIO; @@ -259,6 +259,54 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) return 0; } +#define MLX5_FAST_TEARDOWN_WAIT_MS 3000 +int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev) +{ + unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS; + u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0}; + int state; + int ret; + + if (!MLX5_CAP_GEN(dev, fast_teardown)) { + mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n"); + return -EOPNOTSUPP; + } + + MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); + MLX5_SET(teardown_hca_in, in, profile, + MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN); + + ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (ret) + return ret; + + state = MLX5_GET(teardown_hca_out, out, state); + if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { + mlx5_core_warn(dev, "teardown with fast mode failed\n"); + return -EIO; + } + + mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED); + + /* Loop until device state turns to disable */ + end = jiffies + msecs_to_jiffies(delay_ms); + do { + if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) + break; + + cond_resched(); + } while (!time_after(jiffies, end)); + + if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) { + dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n", + mlx5_get_nic_state(dev), delay_ms); + return -EIO; + } + + return 0; +} + enum mlxsw_reg_mcc_instruction { MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01, MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 9f39aeca863f..43118de8ee99 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -59,22 +59,25 @@ enum { }; enum { - MLX5_NIC_IFC_FULL = 0, - MLX5_NIC_IFC_DISABLED = 1, - MLX5_NIC_IFC_NO_DRAM_NIC = 2, - MLX5_NIC_IFC_INVALID = 3 -}; - -enum { MLX5_DROP_NEW_HEALTH_WORK, MLX5_DROP_NEW_RECOVERY_WORK, }; -static u8 get_nic_state(struct mlx5_core_dev *dev) +u8 mlx5_get_nic_state(struct mlx5_core_dev *dev) { return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3; } +void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state) +{ + u32 cur_cmdq_addr_l_sz; + + cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz); + iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) | + state << MLX5_NIC_IFC_OFFSET, + &dev->iseg->cmdq_addr_l_sz); +} + static void trigger_cmd_completions(struct mlx5_core_dev *dev) { unsigned long flags; @@ -103,7 +106,7 @@ static int in_fatal(struct mlx5_core_dev *dev) struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; - if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) + if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) return 1; if (ioread32be(&h->fw_ver) == 0xffffffff) @@ -133,7 +136,7 @@ unlock: static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) { - u8 nic_interface = get_nic_state(dev); + u8 nic_interface = mlx5_get_nic_state(dev); switch (nic_interface) { case MLX5_NIC_IFC_FULL: @@ -168,7 +171,7 @@ static void health_recover(struct work_struct *work) priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); - nic_state = get_nic_state(dev); + nic_state = mlx5_get_nic_state(dev); if (nic_state == MLX5_NIC_IFC_INVALID) { dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n"); return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index e3797a44e074..299e2a897f7e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -45,6 +45,7 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu); static const struct net_device_ops mlx5i_netdev_ops = { .ndo_open = mlx5i_open, .ndo_stop = mlx5i_close, + .ndo_get_stats64 = mlx5i_get_stats, .ndo_init = mlx5i_dev_init, .ndo_uninit = mlx5i_dev_cleanup, .ndo_change_mtu = mlx5i_change_mtu, @@ -83,6 +84,7 @@ void mlx5i_init(struct mlx5_core_dev *mdev, priv->netdev = netdev; priv->profile = profile; priv->ppriv = ppriv; + priv->max_opened_tc = 1; mutex_init(&priv->state_lock); mlx5_query_port_max_mtu(mdev, &max_mtu, 1); @@ -114,6 +116,47 @@ static void mlx5i_cleanup(struct mlx5e_priv *priv) /* Do nothing .. */ } +static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_sw_stats s = { 0 }; + int i, j; + + for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) { + struct mlx5e_channel_stats *channel_stats; + struct mlx5e_rq_stats *rq_stats; + + channel_stats = &priv->channel_stats[i]; + rq_stats = &channel_stats->rq; + + s.rx_packets += rq_stats->packets; + s.rx_bytes += rq_stats->bytes; + + for (j = 0; j < priv->max_opened_tc; j++) { + struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; + + s.tx_packets += sq_stats->packets; + s.tx_bytes += sq_stats->bytes; + s.tx_queue_dropped += sq_stats->dropped; + } + } + + memcpy(&priv->stats.sw, &s, sizeof(s)); +} + +void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct mlx5e_priv *priv = mlx5i_epriv(dev); + struct mlx5e_sw_stats *sstats = &priv->stats.sw; + + mlx5i_grp_sw_update_stats(priv); + + stats->rx_packets = sstats->rx_packets; + stats->rx_bytes = sstats->rx_bytes; + stats->tx_packets = sstats->tx_packets; + stats->tx_bytes = sstats->tx_bytes; + stats->tx_dropped = sstats->tx_queue_dropped; +} + int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -306,17 +349,26 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) static int mlx5i_init_rx(struct mlx5e_priv *priv) { + struct mlx5_core_dev *mdev = priv->mdev; int err; + mlx5e_create_q_counters(priv); + + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); + if (err) { + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + goto err_destroy_q_counters; + } + err = mlx5e_create_indirect_rqt(priv); if (err) - return err; + goto err_close_drop_rq; err = mlx5e_create_direct_rqts(priv); if (err) goto err_destroy_indirect_rqts; - err = mlx5e_create_indirect_tirs(priv); + err = mlx5e_create_indirect_tirs(priv, true); if (err) goto err_destroy_direct_rqts; @@ -333,11 +385,15 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv); + mlx5e_destroy_indirect_tirs(priv, true); err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv); err_destroy_indirect_rqts: mlx5e_destroy_rqt(priv, &priv->indir_rqt); +err_close_drop_rq: + mlx5e_close_drop_rq(&priv->drop_rq); +err_destroy_q_counters: + mlx5e_destroy_q_counters(priv); return err; } @@ -345,9 +401,11 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) { mlx5i_destroy_flow_steering(priv); mlx5e_destroy_direct_tirs(priv); - mlx5e_destroy_indirect_tirs(priv); + mlx5e_destroy_indirect_tirs(priv, true); mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); + mlx5e_destroy_q_counters(priv); } static const struct mlx5e_profile mlx5i_nic_profile = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 08eac92fc26c..2e7fb829e1b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -121,6 +121,7 @@ static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq, netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_av *av, u32 dqpn, u32 dqkey); void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); +void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); #endif /* CONFIG_MLX5_CORE_IPOIB */ #endif /* __MLX5E_IPOB_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 54a188f41f90..e3e8a5f1ac9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -146,6 +146,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = { .ndo_open = mlx5i_pkey_open, .ndo_stop = mlx5i_pkey_close, .ndo_init = mlx5i_pkey_dev_init, + .ndo_get_stats64 = mlx5i_get_stats, .ndo_uninit = mlx5i_pkey_dev_cleanup, .ndo_change_mtu = mlx5i_pkey_change_mtu, .ndo_do_ioctl = mlx5i_pkey_ioctl, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 3f767cde4c1d..0d90b1b4a3d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -111,10 +111,10 @@ static void mlx5_pps_out(struct work_struct *work) for (i = 0; i < clock->ptp_info.n_pins; i++) { u64 tstart; - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); tstart = clock->pps_info.start[i]; clock->pps_info.start[i] = 0; - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); if (!tstart) continue; @@ -132,10 +132,10 @@ static void mlx5_timestamp_overflow(struct work_struct *work) overflow_work); unsigned long flags; - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); timecounter_read(&clock->tc); mlx5_update_clock_info_page(clock->mdev); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); schedule_delayed_work(&clock->overflow_work, clock->overflow_period); } @@ -147,10 +147,10 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, u64 ns = timespec64_to_ns(ts); unsigned long flags; - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); timecounter_init(&clock->tc, &clock->cycles, ns); mlx5_update_clock_info_page(clock->mdev); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); return 0; } @@ -162,9 +162,9 @@ static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) u64 ns; unsigned long flags; - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); ns = timecounter_read(&clock->tc); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); *ts = ns_to_timespec64(ns); @@ -177,10 +177,10 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) ptp_info); unsigned long flags; - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); timecounter_adjtime(&clock->tc, delta); mlx5_update_clock_info_page(clock->mdev); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); return 0; } @@ -203,12 +203,12 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) adj *= delta; diff = div_u64(adj, 1000000000ULL); - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); timecounter_read(&clock->tc); clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff : clock->nominal_c_mult + diff; mlx5_update_clock_info_page(clock->mdev); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); return 0; } @@ -307,12 +307,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, ts.tv_nsec = rq->perout.start.nsec; ns = timespec64_to_ns(&ts); cycles_now = mlx5_read_internal_timer(mdev); - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); nsec_delta = ns - nsec_now; cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, clock->cycles.mult); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); time_stamp = cycles_now + cycles_delta; field_select = MLX5_MTPPS_FS_PIN_MODE | MLX5_MTPPS_FS_PATTERN | @@ -471,14 +471,14 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev, ts.tv_sec += 1; ts.tv_nsec = 0; ns = timespec64_to_ns(&ts); - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); nsec_delta = ns - nsec_now; cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, clock->cycles.mult); clock->pps_info.start[pin] = cycles_now + cycles_delta; schedule_work(&clock->pps_info.out_work); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); break; default: mlx5_core_err(mdev, " Unhandled event\n"); @@ -498,7 +498,7 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); return; } - rwlock_init(&clock->lock); + seqlock_init(&clock->lock); clock->cycles.read = read_internal_timer; clock->cycles.shift = MLX5_CYCLES_SHIFT; clock->cycles.mult = clocksource_khz2mult(dev_freq, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h index 02e2e4575e4f..263cb6e2aeee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h @@ -46,11 +46,13 @@ static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev) static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock, u64 timestamp) { + unsigned int seq; u64 nsec; - read_lock(&clock->lock); - nsec = timecounter_cyc2time(&clock->tc, timestamp); - read_unlock(&clock->lock); + do { + seq = read_seqbegin(&clock->lock); + nsec = timecounter_cyc2time(&clock->tc, timestamp); + } while (read_seqretry(&clock->lock, seq)); return ns_to_ktime(nsec); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index b5e9f664fc66..28132c7dc05f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1594,12 +1594,17 @@ static const struct pci_error_handlers mlx5_err_handler = { static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) { - int ret; + bool fast_teardown = false, force_teardown = false; + int ret = 1; + + fast_teardown = MLX5_CAP_GEN(dev, fast_teardown); + force_teardown = MLX5_CAP_GEN(dev, force_teardown); + + mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown); + mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown); - if (!MLX5_CAP_GEN(dev, force_teardown)) { - mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n"); + if (!fast_teardown && !force_teardown) return -EOPNOTSUPP; - } if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { mlx5_core_dbg(dev, "Device in internal error state, giving up\n"); @@ -1612,13 +1617,19 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) mlx5_drain_health_wq(dev); mlx5_stop_health_poll(dev, false); + ret = mlx5_cmd_fast_teardown_hca(dev); + if (!ret) + goto succeed; + ret = mlx5_cmd_force_teardown_hca(dev); - if (ret) { - mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret); - mlx5_start_health_poll(dev); - return ret; - } + if (!ret) + goto succeed; + + mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret); + mlx5_start_health_poll(dev); + return ret; +succeed: mlx5_enter_error_state(dev, true); /* Some platforms requiring freeing the IRQ's in the shutdown diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index b4134fa0bba3..cc298527baf1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -95,6 +95,8 @@ int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev); +int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev); + void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); void mlx5_core_page_fault(struct mlx5_core_dev *dev, @@ -214,4 +216,14 @@ int mlx5_lag_allow(struct mlx5_core_dev *dev); int mlx5_lag_forbid(struct mlx5_core_dev *dev); void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); + +enum { + MLX5_NIC_IFC_FULL = 0, + MLX5_NIC_IFC_DISABLED = 1, + MLX5_NIC_IFC_NO_DRAM_NIC = 2, + MLX5_NIC_IFC_INVALID = 3 +}; + +u8 mlx5_get_nic_state(struct mlx5_core_dev *dev); +void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state); #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c index 23cc337a96c9..7e20666ce5ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -73,7 +73,7 @@ static int get_pas_size(struct mlx5_srq_attr *in) u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride); u32 page_size = 1 << log_page_size; u32 rq_sz_po = rq_sz + (page_offset * po_quanta); - u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size; + u32 rq_num_pas = DIV_ROUND_UP(rq_sz_po, page_size); return rq_num_pas * sizeof(u64); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index b02af317c125..cfbea66b4879 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -1201,3 +1201,12 @@ int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev) return err; } EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport); + +u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev) +{ + if (!mdev->sys_image_guid) + mlx5_query_nic_vport_system_image_guid(mdev, &mdev->sys_image_guid); + + return mdev->sys_image_guid; +} +EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index 83f452b7ccbb..bb99f6d41fe0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -221,7 +221,7 @@ MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8); MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8); /* pci_eqe_cqn - * Completion Queue that triggeret this EQE. + * Completion Queue that triggered this EQE. */ MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 6e8b619b769b..32cb6718bb17 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -295,6 +295,7 @@ enum mlxsw_reg_sfd_rec_type { MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0, MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG = 0x1, MLXSW_REG_SFD_REC_TYPE_MULTICAST = 0x2, + MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL = 0xC, }; /* reg_sfd_rec_type @@ -525,6 +526,61 @@ mlxsw_reg_sfd_mc_pack(char *payload, int rec_index, mlxsw_reg_sfd_mc_mid_set(payload, rec_index, mid); } +/* reg_sfd_uc_tunnel_uip_msb + * When protocol is IPv4, the most significant byte of the underlay IPv4 + * destination IP. + * When protocol is IPv6, reserved. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_msb, MLXSW_REG_SFD_BASE_LEN, 24, + 8, MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_uc_tunnel_fid + * Filtering ID. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_fid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +enum mlxsw_reg_sfd_uc_tunnel_protocol { + MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4, + MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV6, +}; + +/* reg_sfd_uc_tunnel_protocol + * IP protocol. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_protocol, MLXSW_REG_SFD_BASE_LEN, 27, + 1, MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +/* reg_sfd_uc_tunnel_uip_lsb + * When protocol is IPv4, the least significant bytes of the underlay + * IPv4 destination IP. + * When protocol is IPv6, pointer to the underlay IPv6 destination IP + * which is configured by RIPS. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_lsb, MLXSW_REG_SFD_BASE_LEN, 0, + 24, MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +static inline void +mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_policy policy, + const char *mac, u16 fid, + enum mlxsw_reg_sfd_rec_action action, u32 uip, + enum mlxsw_reg_sfd_uc_tunnel_protocol proto) +{ + mlxsw_reg_sfd_rec_pack(payload, rec_index, + MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL, mac, + action); + mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); + mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24); + mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip); + mlxsw_reg_sfd_uc_tunnel_fid_set(payload, rec_index, fid); + mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto); +} + /* SFN - Switch FDB Notification Register * ------------------------------------------- * The switch provides notifications on newly learned FDB entries and @@ -1069,6 +1125,8 @@ enum mlxsw_reg_sfdf_flush_type { MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID, MLXSW_REG_SFDF_FLUSH_PER_LAG, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID, + MLXSW_REG_SFDF_FLUSH_PER_NVE, + MLXSW_REG_SFDF_FLUSH_PER_NVE_AND_FID, }; /* reg_sfdf_flush_type @@ -1079,6 +1137,10 @@ enum mlxsw_reg_sfdf_flush_type { * 3 - All FID dynamic entries pointing to port are flushed. * 4 - All dynamic entries pointing to LAG are flushed. * 5 - All FID dynamic entries pointing to LAG are flushed. + * 6 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are + * flushed. + * 7 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are + * flushed, per FID. * Access: RW */ MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4); @@ -1315,12 +1377,19 @@ MLXSW_ITEM32(reg, slcr, type, 0x00, 0, 4); */ MLXSW_ITEM32(reg, slcr, lag_hash, 0x04, 0, 20); -static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash) +/* reg_slcr_seed + * LAG seed value. The seed is the same for all ports. + * Access: RW + */ +MLXSW_ITEM32(reg, slcr, seed, 0x08, 0, 32); + +static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash, u32 seed) { MLXSW_REG_ZERO(slcr, payload); mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL); mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_CRC); mlxsw_reg_slcr_lag_hash_set(payload, lag_hash); + mlxsw_reg_slcr_seed_set(payload, seed); } /* SLCOR - Switch LAG Collector Register @@ -8279,6 +8348,508 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index, mlxsw_reg_mgpc_opcode_set(payload, opcode); } +/* MPRS - Monitoring Parsing State Register + * ---------------------------------------- + * The MPRS register is used for setting up the parsing for hash, + * policy-engine and routing. + */ +#define MLXSW_REG_MPRS_ID 0x9083 +#define MLXSW_REG_MPRS_LEN 0x14 + +MLXSW_REG_DEFINE(mprs, MLXSW_REG_MPRS_ID, MLXSW_REG_MPRS_LEN); + +/* reg_mprs_parsing_depth + * Minimum parsing depth. + * Need to enlarge parsing depth according to L3, MPLS, tunnels, ACL + * rules, traps, hash, etc. Default is 96 bytes. Reserved when SwitchX-2. + * Access: RW + */ +MLXSW_ITEM32(reg, mprs, parsing_depth, 0x00, 0, 16); + +/* reg_mprs_parsing_en + * Parsing enable. + * Bit 0 - Enable parsing of NVE of types VxLAN, VxLAN-GPE, GENEVE and + * NVGRE. Default is enabled. Reserved when SwitchX-2. + * Access: RW + */ +MLXSW_ITEM32(reg, mprs, parsing_en, 0x04, 0, 16); + +/* reg_mprs_vxlan_udp_dport + * VxLAN UDP destination port. + * Used for identifying VxLAN packets and for dport field in + * encapsulation. Default is 4789. + * Access: RW + */ +MLXSW_ITEM32(reg, mprs, vxlan_udp_dport, 0x10, 0, 16); + +static inline void mlxsw_reg_mprs_pack(char *payload, u16 parsing_depth, + u16 vxlan_udp_dport) +{ + MLXSW_REG_ZERO(mprs, payload); + mlxsw_reg_mprs_parsing_depth_set(payload, parsing_depth); + mlxsw_reg_mprs_parsing_en_set(payload, true); + mlxsw_reg_mprs_vxlan_udp_dport_set(payload, vxlan_udp_dport); +} + +/* TNGCR - Tunneling NVE General Configuration Register + * ---------------------------------------------------- + * The TNGCR register is used for setting up the NVE Tunneling configuration. + */ +#define MLXSW_REG_TNGCR_ID 0xA001 +#define MLXSW_REG_TNGCR_LEN 0x44 + +MLXSW_REG_DEFINE(tngcr, MLXSW_REG_TNGCR_ID, MLXSW_REG_TNGCR_LEN); + +enum mlxsw_reg_tngcr_type { + MLXSW_REG_TNGCR_TYPE_VXLAN, + MLXSW_REG_TNGCR_TYPE_VXLAN_GPE, + MLXSW_REG_TNGCR_TYPE_GENEVE, + MLXSW_REG_TNGCR_TYPE_NVGRE, +}; + +/* reg_tngcr_type + * Tunnel type for encapsulation and decapsulation. The types are mutually + * exclusive. + * Note: For Spectrum the NVE parsing must be enabled in MPRS. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, type, 0x00, 0, 4); + +/* reg_tngcr_nve_valid + * The VTEP is valid. Allows adding FDB entries for tunnel encapsulation. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_valid, 0x04, 31, 1); + +/* reg_tngcr_nve_ttl_uc + * The TTL for NVE tunnel encapsulation underlay unicast packets. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_ttl_uc, 0x04, 0, 8); + +/* reg_tngcr_nve_ttl_mc + * The TTL for NVE tunnel encapsulation underlay multicast packets. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_ttl_mc, 0x08, 0, 8); + +enum { + /* Do not copy flow label. Calculate flow label using nve_flh. */ + MLXSW_REG_TNGCR_FL_NO_COPY, + /* Copy flow label from inner packet if packet is IPv6 and + * encapsulation is by IPv6. Otherwise, calculate flow label using + * nve_flh. + */ + MLXSW_REG_TNGCR_FL_COPY, +}; + +/* reg_tngcr_nve_flc + * For NVE tunnel encapsulation: Flow label copy from inner packet. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_flc, 0x0C, 25, 1); + +enum { + /* Flow label is static. In Spectrum this means '0'. Spectrum-2 + * uses {nve_fl_prefix, nve_fl_suffix}. + */ + MLXSW_REG_TNGCR_FL_NO_HASH, + /* 8 LSBs of the flow label are calculated from ECMP hash of the + * inner packet. 12 MSBs are configured by nve_fl_prefix. + */ + MLXSW_REG_TNGCR_FL_HASH, +}; + +/* reg_tngcr_nve_flh + * NVE flow label hash. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_flh, 0x0C, 24, 1); + +/* reg_tngcr_nve_fl_prefix + * NVE flow label prefix. Constant 12 MSBs of the flow label. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_fl_prefix, 0x0C, 8, 12); + +/* reg_tngcr_nve_fl_suffix + * NVE flow label suffix. Constant 8 LSBs of the flow label. + * Reserved when nve_flh=1 and for Spectrum. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_fl_suffix, 0x0C, 0, 8); + +enum { + /* Source UDP port is fixed (default '0') */ + MLXSW_REG_TNGCR_UDP_SPORT_NO_HASH, + /* Source UDP port is calculated based on hash */ + MLXSW_REG_TNGCR_UDP_SPORT_HASH, +}; + +/* reg_tngcr_nve_udp_sport_type + * NVE UDP source port type. + * Spectrum uses LAG hash (SLCRv2). Spectrum-2 uses ECMP hash (RECRv2). + * When the source UDP port is calculated based on hash, then the 8 LSBs + * are calculated from hash the 8 MSBs are configured by + * nve_udp_sport_prefix. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_udp_sport_type, 0x10, 24, 1); + +/* reg_tngcr_nve_udp_sport_prefix + * NVE UDP source port prefix. Constant 8 MSBs of the UDP source port. + * Reserved when NVE type is NVGRE. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_udp_sport_prefix, 0x10, 8, 8); + +/* reg_tngcr_nve_group_size_mc + * The amount of sequential linked lists of MC entries. The first linked + * list is configured by SFD.underlay_mc_ptr. + * Valid values: 1, 2, 4, 8, 16, 32, 64 + * The linked list are configured by TNUMT. + * The hash is set by LAG hash. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_group_size_mc, 0x18, 0, 8); + +/* reg_tngcr_nve_group_size_flood + * The amount of sequential linked lists of flooding entries. The first + * linked list is configured by SFMR.nve_tunnel_flood_ptr + * Valid values: 1, 2, 4, 8, 16, 32, 64 + * The linked list are configured by TNUMT. + * The hash is set by LAG hash. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_group_size_flood, 0x1C, 0, 8); + +/* reg_tngcr_learn_enable + * During decapsulation, whether to learn from NVE port. + * Reserved when Spectrum-2. See TNPC. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, learn_enable, 0x20, 31, 1); + +/* reg_tngcr_underlay_virtual_router + * Underlay virtual router. + * Reserved when Spectrum-2. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, underlay_virtual_router, 0x20, 0, 16); + +/* reg_tngcr_underlay_rif + * Underlay ingress router interface. RIF type should be loopback generic. + * Reserved when Spectrum. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, underlay_rif, 0x24, 0, 16); + +/* reg_tngcr_usipv4 + * Underlay source IPv4 address of the NVE. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, usipv4, 0x28, 0, 32); + +/* reg_tngcr_usipv6 + * Underlay source IPv6 address of the NVE. For Spectrum, must not be + * modified under traffic of NVE tunneling encapsulation. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, tngcr, usipv6, 0x30, 16); + +static inline void mlxsw_reg_tngcr_pack(char *payload, + enum mlxsw_reg_tngcr_type type, + bool valid, u8 ttl) +{ + MLXSW_REG_ZERO(tngcr, payload); + mlxsw_reg_tngcr_type_set(payload, type); + mlxsw_reg_tngcr_nve_valid_set(payload, valid); + mlxsw_reg_tngcr_nve_ttl_uc_set(payload, ttl); + mlxsw_reg_tngcr_nve_ttl_mc_set(payload, ttl); + mlxsw_reg_tngcr_nve_flc_set(payload, MLXSW_REG_TNGCR_FL_NO_COPY); + mlxsw_reg_tngcr_nve_flh_set(payload, 0); + mlxsw_reg_tngcr_nve_udp_sport_type_set(payload, + MLXSW_REG_TNGCR_UDP_SPORT_HASH); + mlxsw_reg_tngcr_nve_udp_sport_prefix_set(payload, 0); + mlxsw_reg_tngcr_nve_group_size_mc_set(payload, 1); + mlxsw_reg_tngcr_nve_group_size_flood_set(payload, 1); +} + +/* TNUMT - Tunneling NVE Underlay Multicast Table Register + * ------------------------------------------------------- + * The TNUMT register is for building the underlay MC table. It is used + * for MC, flooding and BC traffic into the NVE tunnel. + */ +#define MLXSW_REG_TNUMT_ID 0xA003 +#define MLXSW_REG_TNUMT_LEN 0x20 + +MLXSW_REG_DEFINE(tnumt, MLXSW_REG_TNUMT_ID, MLXSW_REG_TNUMT_LEN); + +enum mlxsw_reg_tnumt_record_type { + MLXSW_REG_TNUMT_RECORD_TYPE_IPV4, + MLXSW_REG_TNUMT_RECORD_TYPE_IPV6, + MLXSW_REG_TNUMT_RECORD_TYPE_LABEL, +}; + +/* reg_tnumt_record_type + * Record type. + * Access: RW + */ +MLXSW_ITEM32(reg, tnumt, record_type, 0x00, 28, 4); + +enum mlxsw_reg_tnumt_tunnel_port { + MLXSW_REG_TNUMT_TUNNEL_PORT_NVE, + MLXSW_REG_TNUMT_TUNNEL_PORT_VPLS, + MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL0, + MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL1, +}; + +/* reg_tnumt_tunnel_port + * Tunnel port. + * Access: RW + */ +MLXSW_ITEM32(reg, tnumt, tunnel_port, 0x00, 24, 4); + +/* reg_tnumt_underlay_mc_ptr + * Index to the underlay multicast table. + * For Spectrum the index is to the KVD linear. + * Access: Index + */ +MLXSW_ITEM32(reg, tnumt, underlay_mc_ptr, 0x00, 0, 24); + +/* reg_tnumt_vnext + * The next_underlay_mc_ptr is valid. + * Access: RW + */ +MLXSW_ITEM32(reg, tnumt, vnext, 0x04, 31, 1); + +/* reg_tnumt_next_underlay_mc_ptr + * The next index to the underlay multicast table. + * Access: RW + */ +MLXSW_ITEM32(reg, tnumt, next_underlay_mc_ptr, 0x04, 0, 24); + +/* reg_tnumt_record_size + * Number of IP addresses in the record. + * Range is 1..cap_max_nve_mc_entries_ipv{4,6} + * Access: RW + */ +MLXSW_ITEM32(reg, tnumt, record_size, 0x08, 0, 3); + +/* reg_tnumt_udip + * The underlay IPv4 addresses. udip[i] is reserved if i >= size + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, tnumt, udip, 0x0C, 0, 32, 0x04, 0x00, false); + +/* reg_tnumt_udip_ptr + * The pointer to the underlay IPv6 addresses. udip_ptr[i] is reserved if + * i >= size. The IPv6 addresses are configured by RIPS. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, tnumt, udip_ptr, 0x0C, 0, 24, 0x04, 0x00, false); + +static inline void mlxsw_reg_tnumt_pack(char *payload, + enum mlxsw_reg_tnumt_record_type type, + enum mlxsw_reg_tnumt_tunnel_port tport, + u32 underlay_mc_ptr, bool vnext, + u32 next_underlay_mc_ptr, + u8 record_size) +{ + MLXSW_REG_ZERO(tnumt, payload); + mlxsw_reg_tnumt_record_type_set(payload, type); + mlxsw_reg_tnumt_tunnel_port_set(payload, tport); + mlxsw_reg_tnumt_underlay_mc_ptr_set(payload, underlay_mc_ptr); + mlxsw_reg_tnumt_vnext_set(payload, vnext); + mlxsw_reg_tnumt_next_underlay_mc_ptr_set(payload, next_underlay_mc_ptr); + mlxsw_reg_tnumt_record_size_set(payload, record_size); +} + +/* TNQCR - Tunneling NVE QoS Configuration Register + * ------------------------------------------------ + * The TNQCR register configures how QoS is set in encapsulation into the + * underlay network. + */ +#define MLXSW_REG_TNQCR_ID 0xA010 +#define MLXSW_REG_TNQCR_LEN 0x0C + +MLXSW_REG_DEFINE(tnqcr, MLXSW_REG_TNQCR_ID, MLXSW_REG_TNQCR_LEN); + +/* reg_tnqcr_enc_set_dscp + * For encapsulation: How to set DSCP field: + * 0 - Copy the DSCP from the overlay (inner) IP header to the underlay + * (outer) IP header. If there is no IP header, use TNQDR.dscp + * 1 - Set the DSCP field as TNQDR.dscp + * Access: RW + */ +MLXSW_ITEM32(reg, tnqcr, enc_set_dscp, 0x04, 28, 1); + +static inline void mlxsw_reg_tnqcr_pack(char *payload) +{ + MLXSW_REG_ZERO(tnqcr, payload); + mlxsw_reg_tnqcr_enc_set_dscp_set(payload, 0); +} + +/* TNQDR - Tunneling NVE QoS Default Register + * ------------------------------------------ + * The TNQDR register configures the default QoS settings for NVE + * encapsulation. + */ +#define MLXSW_REG_TNQDR_ID 0xA011 +#define MLXSW_REG_TNQDR_LEN 0x08 + +MLXSW_REG_DEFINE(tnqdr, MLXSW_REG_TNQDR_ID, MLXSW_REG_TNQDR_LEN); + +/* reg_tnqdr_local_port + * Local port number (receive port). CPU port is supported. + * Access: Index + */ +MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8); + +/* reg_tnqdr_dscp + * For encapsulation, the default DSCP. + * Access: RW + */ +MLXSW_ITEM32(reg, tnqdr, dscp, 0x04, 0, 6); + +static inline void mlxsw_reg_tnqdr_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(tnqdr, payload); + mlxsw_reg_tnqdr_local_port_set(payload, local_port); + mlxsw_reg_tnqdr_dscp_set(payload, 0); +} + +/* TNEEM - Tunneling NVE Encapsulation ECN Mapping Register + * -------------------------------------------------------- + * The TNEEM register maps ECN of the IP header at the ingress to the + * encapsulation to the ECN of the underlay network. + */ +#define MLXSW_REG_TNEEM_ID 0xA012 +#define MLXSW_REG_TNEEM_LEN 0x0C + +MLXSW_REG_DEFINE(tneem, MLXSW_REG_TNEEM_ID, MLXSW_REG_TNEEM_LEN); + +/* reg_tneem_overlay_ecn + * ECN of the IP header in the overlay network. + * Access: Index + */ +MLXSW_ITEM32(reg, tneem, overlay_ecn, 0x04, 24, 2); + +/* reg_tneem_underlay_ecn + * ECN of the IP header in the underlay network. + * Access: RW + */ +MLXSW_ITEM32(reg, tneem, underlay_ecn, 0x04, 16, 2); + +static inline void mlxsw_reg_tneem_pack(char *payload, u8 overlay_ecn, + u8 underlay_ecn) +{ + MLXSW_REG_ZERO(tneem, payload); + mlxsw_reg_tneem_overlay_ecn_set(payload, overlay_ecn); + mlxsw_reg_tneem_underlay_ecn_set(payload, underlay_ecn); +} + +/* TNDEM - Tunneling NVE Decapsulation ECN Mapping Register + * -------------------------------------------------------- + * The TNDEM register configures the actions that are done in the + * decapsulation. + */ +#define MLXSW_REG_TNDEM_ID 0xA013 +#define MLXSW_REG_TNDEM_LEN 0x0C + +MLXSW_REG_DEFINE(tndem, MLXSW_REG_TNDEM_ID, MLXSW_REG_TNDEM_LEN); + +/* reg_tndem_underlay_ecn + * ECN field of the IP header in the underlay network. + * Access: Index + */ +MLXSW_ITEM32(reg, tndem, underlay_ecn, 0x04, 24, 2); + +/* reg_tndem_overlay_ecn + * ECN field of the IP header in the overlay network. + * Access: Index + */ +MLXSW_ITEM32(reg, tndem, overlay_ecn, 0x04, 16, 2); + +/* reg_tndem_eip_ecn + * Egress IP ECN. ECN field of the IP header of the packet which goes out + * from the decapsulation. + * Access: RW + */ +MLXSW_ITEM32(reg, tndem, eip_ecn, 0x04, 8, 2); + +/* reg_tndem_trap_en + * Trap enable: + * 0 - No trap due to decap ECN + * 1 - Trap enable with trap_id + * Access: RW + */ +MLXSW_ITEM32(reg, tndem, trap_en, 0x08, 28, 4); + +/* reg_tndem_trap_id + * Trap ID. Either DECAP_ECN0 or DECAP_ECN1. + * Reserved when trap_en is '0'. + * Access: RW + */ +MLXSW_ITEM32(reg, tndem, trap_id, 0x08, 0, 9); + +static inline void mlxsw_reg_tndem_pack(char *payload, u8 underlay_ecn, + u8 overlay_ecn, u8 ecn, bool trap_en, + u16 trap_id) +{ + MLXSW_REG_ZERO(tndem, payload); + mlxsw_reg_tndem_underlay_ecn_set(payload, underlay_ecn); + mlxsw_reg_tndem_overlay_ecn_set(payload, overlay_ecn); + mlxsw_reg_tndem_eip_ecn_set(payload, ecn); + mlxsw_reg_tndem_trap_en_set(payload, trap_en); + mlxsw_reg_tndem_trap_id_set(payload, trap_id); +} + +/* TNPC - Tunnel Port Configuration Register + * ----------------------------------------- + * The TNPC register is used for tunnel port configuration. + * Reserved when Spectrum. + */ +#define MLXSW_REG_TNPC_ID 0xA020 +#define MLXSW_REG_TNPC_LEN 0x18 + +MLXSW_REG_DEFINE(tnpc, MLXSW_REG_TNPC_ID, MLXSW_REG_TNPC_LEN); + +enum mlxsw_reg_tnpc_tunnel_port { + MLXSW_REG_TNPC_TUNNEL_PORT_NVE, + MLXSW_REG_TNPC_TUNNEL_PORT_VPLS, + MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL0, + MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL1, +}; + +/* reg_tnpc_tunnel_port + * Tunnel port. + * Access: Index + */ +MLXSW_ITEM32(reg, tnpc, tunnel_port, 0x00, 0, 4); + +/* reg_tnpc_learn_enable_v6 + * During IPv6 underlay decapsulation, whether to learn from tunnel port. + * Access: RW + */ +MLXSW_ITEM32(reg, tnpc, learn_enable_v6, 0x04, 1, 1); + +/* reg_tnpc_learn_enable_v4 + * During IPv4 underlay decapsulation, whether to learn from tunnel port. + * Access: RW + */ +MLXSW_ITEM32(reg, tnpc, learn_enable_v4, 0x04, 0, 1); + +static inline void mlxsw_reg_tnpc_pack(char *payload, + enum mlxsw_reg_tnpc_tunnel_port tport, + bool learn_enable) +{ + MLXSW_REG_ZERO(tnpc, payload); + mlxsw_reg_tnpc_tunnel_port_set(payload, tport); + mlxsw_reg_tnpc_learn_enable_v4_set(payload, learn_enable); + mlxsw_reg_tnpc_learn_enable_v6_set(payload, learn_enable); +} + /* TIGCR - Tunneling IPinIP General Configuration Register * ------------------------------------------------------- * The TIGCR register is used for setting up the IPinIP Tunnel configuration. @@ -8336,8 +8907,15 @@ MLXSW_ITEM32(reg, sbpr, dir, 0x00, 24, 2); */ MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4); +/* reg_sbpr_infi_size + * Size is infinite. + * Access: RW + */ +MLXSW_ITEM32(reg, sbpr, infi_size, 0x04, 31, 1); + /* reg_sbpr_size * Pool size in buffer cells. + * Reserved when infi_size = 1. * Access: RW */ MLXSW_ITEM32(reg, sbpr, size, 0x04, 0, 24); @@ -8355,13 +8933,15 @@ MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4); static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool, enum mlxsw_reg_sbxx_dir dir, - enum mlxsw_reg_sbpr_mode mode, u32 size) + enum mlxsw_reg_sbpr_mode mode, u32 size, + bool infi_size) { MLXSW_REG_ZERO(sbpr, payload); mlxsw_reg_sbpr_pool_set(payload, pool); mlxsw_reg_sbpr_dir_set(payload, dir); mlxsw_reg_sbpr_mode_set(payload, mode); mlxsw_reg_sbpr_size_set(payload, size); + mlxsw_reg_sbpr_infi_size_set(payload, infi_size); } /* SBCM - Shared Buffer Class Management Register @@ -8409,6 +8989,12 @@ MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24); #define MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN 1 #define MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX 14 +/* reg_sbcm_infi_max + * Max buffer is infinite. + * Access: RW + */ +MLXSW_ITEM32(reg, sbcm, infi_max, 0x1C, 31, 1); + /* reg_sbcm_max_buff * When the pool associated to the port-pg/tclass is configured to * static, Maximum buffer size for the limiter configured in cells. @@ -8418,6 +9004,7 @@ MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24); * 0: 0 * i: (1/128)*2^(i-1), for i=1..14 * 0xFF: Infinity + * Reserved when infi_max = 1. * Access: RW */ MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24); @@ -8430,7 +9017,8 @@ MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4); static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff, enum mlxsw_reg_sbxx_dir dir, - u32 min_buff, u32 max_buff, u8 pool) + u32 min_buff, u32 max_buff, + bool infi_max, u8 pool) { MLXSW_REG_ZERO(sbcm, payload); mlxsw_reg_sbcm_local_port_set(payload, local_port); @@ -8438,6 +9026,7 @@ static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff, mlxsw_reg_sbcm_dir_set(payload, dir); mlxsw_reg_sbcm_min_buff_set(payload, min_buff); mlxsw_reg_sbcm_max_buff_set(payload, max_buff); + mlxsw_reg_sbcm_infi_max_set(payload, infi_max); mlxsw_reg_sbcm_pool_set(payload, pool); } @@ -8810,6 +9399,14 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mcc), MLXSW_REG(mcda), MLXSW_REG(mgpc), + MLXSW_REG(mprs), + MLXSW_REG(tngcr), + MLXSW_REG(tnumt), + MLXSW_REG(tnqcr), + MLXSW_REG(tnqdr), + MLXSW_REG(tneem), + MLXSW_REG(tndem), + MLXSW_REG(tnpc), MLXSW_REG(tigcr), MLXSW_REG(sbpr), MLXSW_REG(sbcm), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index 79a31de7c825..99b341539870 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -46,6 +46,8 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_RIFS, MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES, MLXSW_RES_ID_MAX_LPM_TREES, + MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4, + MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6, /* Internal resources. * Determined by the SW, not queried from the HW. @@ -96,6 +98,8 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, [MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10, [MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30, + [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4] = 0x2E02, + [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6] = 0x2E03, }; struct mlxsw_res { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 30bb2c533cec..ed7e4c4e7403 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -21,6 +21,7 @@ #include <linux/dcbnl.h> #include <linux/inetdevice.h> #include <linux/netlink.h> +#include <linux/random.h> #include <net/switchdev.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_mirred.h> @@ -331,7 +332,10 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) return -EINVAL; } if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == - MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor)) + MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && + (rev->minor > req_rev->minor || + (rev->minor == req_rev->minor && + rev->subminor >= req_rev->subminor))) return 0; dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", @@ -2804,6 +2808,13 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) MLXSW_REG_QEEC_MAS_DIS); if (err) return err; + + err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_TC, + i + 8, i, + MLXSW_REG_QEEC_MAS_DIS); + if (err) + return err; } /* Map all priorities to traffic class 0. */ @@ -3459,6 +3470,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), /* PKT Sample trap */ @@ -3472,6 +3484,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), + /* NVE traps */ + MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), }; static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) @@ -3656,8 +3670,10 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) { char slcr_pl[MLXSW_REG_SLCR_LEN]; + u32 seed; int err; + get_random_bytes(&seed, sizeof(seed)); mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | MLXSW_REG_SLCR_LAG_HASH_DMAC | MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | @@ -3666,7 +3682,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) MLXSW_REG_SLCR_LAG_HASH_DIP | MLXSW_REG_SLCR_LAG_HASH_SPORT | MLXSW_REG_SLCR_LAG_HASH_DPORT | - MLXSW_REG_SLCR_LAG_HASH_IPPROTO); + MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3cdb7aca90b7..1f68ac2a20f4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -383,6 +383,17 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) #endif /* spectrum_router.c */ +enum mlxsw_sp_l3proto { + MLXSW_SP_L3_PROTO_IPV4, + MLXSW_SP_L3_PROTO_IPV6, +#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1) +}; + +union mlxsw_sp_l3addr { + __be32 addr4; + struct in6_addr addr6; +}; + int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); @@ -416,6 +427,10 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, struct net_device *dev); +struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev); +u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); +struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif); /* spectrum_kvdl.c */ enum mlxsw_sp_kvdl_entry_type { @@ -423,6 +438,7 @@ enum mlxsw_sp_kvdl_entry_type { MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, + MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, }; static inline unsigned int @@ -433,6 +449,7 @@ mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type) case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */ case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */ case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */ + case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: /* fall through */ default: return 1; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c index 68c8b148bef2..8d14770766b4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c @@ -35,6 +35,7 @@ static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = { MAX_KVD_ACTION_SETS), MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE), MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE), + MLXSW_SP2_KVDL_PART_INFO(TNUMT, 0x29, KVD_SIZE, KVD_SIZE), }; #define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 3589432d1643..12c61e0cc570 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -25,28 +25,52 @@ struct mlxsw_cp_sb_occ { struct mlxsw_sp_sb_cm { u32 min_buff; u32 max_buff; - u8 pool; + u16 pool_index; struct mlxsw_cp_sb_occ occ; }; +#define MLXSW_SP_SB_INFI -1U + struct mlxsw_sp_sb_pm { u32 min_buff; u32 max_buff; struct mlxsw_cp_sb_occ occ; }; -#define MLXSW_SP_SB_POOL_COUNT 4 -#define MLXSW_SP_SB_TC_COUNT 8 +struct mlxsw_sp_sb_pool_des { + enum mlxsw_reg_sbxx_dir dir; + u8 pool; +}; + +/* Order ingress pools before egress pools. */ +static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = { + {MLXSW_REG_SBXX_DIR_INGRESS, 0}, + {MLXSW_REG_SBXX_DIR_INGRESS, 1}, + {MLXSW_REG_SBXX_DIR_INGRESS, 2}, + {MLXSW_REG_SBXX_DIR_INGRESS, 3}, + {MLXSW_REG_SBXX_DIR_EGRESS, 0}, + {MLXSW_REG_SBXX_DIR_EGRESS, 1}, + {MLXSW_REG_SBXX_DIR_EGRESS, 2}, + {MLXSW_REG_SBXX_DIR_EGRESS, 3}, + {MLXSW_REG_SBXX_DIR_EGRESS, 15}, +}; + +#define MLXSW_SP_SB_POOL_DESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pool_dess) + +#define MLXSW_SP_SB_ING_TC_COUNT 8 +#define MLXSW_SP_SB_EG_TC_COUNT 16 struct mlxsw_sp_sb_port { - struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT]; - struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT]; + struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT]; + struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT]; + struct mlxsw_sp_sb_pm pms[MLXSW_SP_SB_POOL_DESS_LEN]; }; struct mlxsw_sp_sb { - struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT]; + struct mlxsw_sp_sb_pr prs[MLXSW_SP_SB_POOL_DESS_LEN]; struct mlxsw_sp_sb_port *ports; u32 cell_size; + u64 sb_size; }; u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells) @@ -60,95 +84,122 @@ u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes) } static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp, - u8 pool, - enum mlxsw_reg_sbxx_dir dir) + u16 pool_index) { - return &mlxsw_sp->sb->prs[dir][pool]; + return &mlxsw_sp->sb->prs[pool_index]; +} + +static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir) +{ + if (dir == MLXSW_REG_SBXX_DIR_INGRESS) + return pg_buff < MLXSW_SP_SB_ING_TC_COUNT; + else + return pg_buff < MLXSW_SP_SB_EG_TC_COUNT; } static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, u8 pg_buff, enum mlxsw_reg_sbxx_dir dir) { - return &mlxsw_sp->sb->ports[local_port].cms[dir][pg_buff]; + struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port]; + + WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir)); + if (dir == MLXSW_REG_SBXX_DIR_INGRESS) + return &sb_port->ing_cms[pg_buff]; + else + return &sb_port->eg_cms[pg_buff]; } static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp, - u8 local_port, u8 pool, - enum mlxsw_reg_sbxx_dir dir) + u8 local_port, u16 pool_index) { - return &mlxsw_sp->sb->ports[local_port].pms[dir][pool]; + return &mlxsw_sp->sb->ports[local_port].pms[pool_index]; } -static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool, - enum mlxsw_reg_sbxx_dir dir, - enum mlxsw_reg_sbpr_mode mode, u32 size) +static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index, + enum mlxsw_reg_sbpr_mode mode, + u32 size, bool infi_size) { + const struct mlxsw_sp_sb_pool_des *des = + &mlxsw_sp_sb_pool_dess[pool_index]; char sbpr_pl[MLXSW_REG_SBPR_LEN]; struct mlxsw_sp_sb_pr *pr; int err; - mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size); + mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode, + size, infi_size); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); if (err) return err; - pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + if (infi_size) + size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size); + pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); pr->mode = mode; pr->size = size; return 0; } static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, - u8 pg_buff, enum mlxsw_reg_sbxx_dir dir, - u32 min_buff, u32 max_buff, u8 pool) + u8 pg_buff, u32 min_buff, u32 max_buff, + bool infi_max, u16 pool_index) { + const struct mlxsw_sp_sb_pool_des *des = + &mlxsw_sp_sb_pool_dess[pool_index]; char sbcm_pl[MLXSW_REG_SBCM_LEN]; + struct mlxsw_sp_sb_cm *cm; int err; - mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir, - min_buff, max_buff, pool); + mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir, + min_buff, max_buff, infi_max, des->pool); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl); if (err) return err; - if (pg_buff < MLXSW_SP_SB_TC_COUNT) { - struct mlxsw_sp_sb_cm *cm; - cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir); + if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) { + if (infi_max) + max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, + mlxsw_sp->sb->sb_size); + + cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, + des->dir); cm->min_buff = min_buff; cm->max_buff = max_buff; - cm->pool = pool; + cm->pool_index = pool_index; } return 0; } static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, - u8 pool, enum mlxsw_reg_sbxx_dir dir, - u32 min_buff, u32 max_buff) + u16 pool_index, u32 min_buff, u32 max_buff) { + const struct mlxsw_sp_sb_pool_des *des = + &mlxsw_sp_sb_pool_dess[pool_index]; char sbpm_pl[MLXSW_REG_SBPM_LEN]; struct mlxsw_sp_sb_pm *pm; int err; - mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false, min_buff, max_buff); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl); if (err) return err; - pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir); + pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index); pm->min_buff = min_buff; pm->max_buff = max_buff; return 0; } static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port, - u8 pool, enum mlxsw_reg_sbxx_dir dir, - struct list_head *bulk_list) + u16 pool_index, struct list_head *bulk_list) { + const struct mlxsw_sp_sb_pool_des *des = + &mlxsw_sp_sb_pool_dess[pool_index]; char sbpm_pl[MLXSW_REG_SBPM_LEN]; - mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0); + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, + true, 0, 0); return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl, bulk_list, NULL, 0); } @@ -163,14 +214,16 @@ static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core, } static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, - u8 pool, enum mlxsw_reg_sbxx_dir dir, - struct list_head *bulk_list) + u16 pool_index, struct list_head *bulk_list) { + const struct mlxsw_sp_sb_pool_des *des = + &mlxsw_sp_sb_pool_dess[pool_index]; char sbpm_pl[MLXSW_REG_SBPM_LEN]; struct mlxsw_sp_sb_pm *pm; - pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir); - mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0); + pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index); + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, + false, 0, 0); return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl, bulk_list, mlxsw_sp_sb_pm_occ_query_cb, @@ -254,63 +307,54 @@ static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp) .size = _size, \ } -static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = { +static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs[] = { + /* Ingress pools. */ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_INGRESS_SIZE), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_INGRESS_MNG_SIZE), -}; - -#define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress) - -static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = { + /* Egress pools. */ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI), }; -#define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress) +#define MLXSW_SP_SB_PRS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs) -static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, - enum mlxsw_reg_sbxx_dir dir, - const struct mlxsw_sp_sb_pr *prs, - size_t prs_len) +static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sb_pr *prs, + size_t prs_len) { int i; int err; for (i = 0; i < prs_len; i++) { - u32 size = mlxsw_sp_bytes_cells(mlxsw_sp, prs[i].size); - - err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, prs[i].mode, size); + u32 size = prs[i].size; + u32 size_cells; + + if (size == MLXSW_SP_SB_INFI) { + err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode, + 0, true); + } else { + size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size); + err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode, + size_cells, false); + } if (err) return err; } return 0; } -static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp) -{ - int err; - - err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS, - mlxsw_sp_sb_prs_ingress, - MLXSW_SP_SB_PRS_INGRESS_LEN); - if (err) - return err; - return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS, - mlxsw_sp_sb_prs_egress, - MLXSW_SP_SB_PRS_EGRESS_LEN); -} - #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \ { \ .min_buff = _min_buff, \ .max_buff = _max_buff, \ - .pool = _pool, \ + .pool_index = _pool, \ } static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { @@ -329,38 +373,38 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress) static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(1, 0xff, 0), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(1, 0xff, 4), }; #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress) -#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0) +#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4) static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, @@ -390,6 +434,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \ ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms) +static bool +mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index) +{ + struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); + + return pr->mode == MLXSW_REG_SBPR_MODE_STATIC; +} + static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, enum mlxsw_reg_sbxx_dir dir, const struct mlxsw_sp_sb_cm *cms, @@ -401,16 +453,29 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, for (i = 0; i < cms_len; i++) { const struct mlxsw_sp_sb_cm *cm; u32 min_buff; + u32 max_buff; if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS) continue; /* PG number 8 does not exist, skip it */ cm = &cms[i]; - /* All pools are initialized using dynamic thresholds, - * therefore 'max_buff' isn't specified in cells. - */ + if (WARN_ON(mlxsw_sp_sb_pool_dess[cm->pool_index].dir != dir)) + continue; + min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff); - err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir, - min_buff, cm->max_buff, cm->pool); + max_buff = cm->max_buff; + if (max_buff == MLXSW_SP_SB_INFI) { + err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, + min_buff, 0, + true, cm->pool_index); + } else { + if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, + cm->pool_index)) + max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, + max_buff); + err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, + min_buff, max_buff, + false, cm->pool_index); + } if (err) return err; } @@ -448,91 +513,74 @@ static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) .max_buff = _max_buff, \ } -static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = { +static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { + /* Ingress pools. */ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), -}; - -#define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress) - -static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = { + /* Egress pools. */ MLXSW_SP_SB_PM(0, 7), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_PM(10000, 90000), }; -#define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress) +#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms) -static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, - enum mlxsw_reg_sbxx_dir dir, - const struct mlxsw_sp_sb_pm *pms, - size_t pms_len) +static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; int i; int err; - for (i = 0; i < pms_len; i++) { - const struct mlxsw_sp_sb_pm *pm; + for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) { + const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp_sb_pms[i]; + u32 max_buff; + u32 min_buff; - pm = &pms[i]; - err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir, - pm->min_buff, pm->max_buff); + min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff); + max_buff = pm->max_buff; + if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i)) + max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff); + err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port, + i, min_buff, max_buff); if (err) return err; } return 0; } -static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) -{ - int err; - - err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp, - mlxsw_sp_port->local_port, - MLXSW_REG_SBXX_DIR_INGRESS, - mlxsw_sp_sb_pms_ingress, - MLXSW_SP_SB_PMS_INGRESS_LEN); - if (err) - return err; - return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp, - mlxsw_sp_port->local_port, - MLXSW_REG_SBXX_DIR_EGRESS, - mlxsw_sp_sb_pms_egress, - MLXSW_SP_SB_PMS_EGRESS_LEN); -} - struct mlxsw_sp_sb_mm { u32 min_buff; u32 max_buff; - u8 pool; + u16 pool_index; }; #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \ { \ .min_buff = _min_buff, \ .max_buff = _max_buff, \ - .pool = _pool, \ + .pool_index = _pool, \ } static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), }; #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) @@ -544,16 +592,18 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) int err; for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) { + const struct mlxsw_sp_sb_pool_des *des; const struct mlxsw_sp_sb_mm *mc; u32 min_buff; mc = &mlxsw_sp_sb_mms[i]; - /* All pools are initialized using dynamic thresholds, - * therefore 'max_buff' isn't specified in cells. + des = &mlxsw_sp_sb_pool_dess[mc->pool_index]; + /* All pools used by sb_mm's are initialized using dynamic + * thresholds, therefore 'max_buff' isn't specified in cells. */ min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff); mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff, - mc->pool); + des->pool); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl); if (err) return err; @@ -561,9 +611,24 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) return 0; } +static void mlxsw_sp_pool_count(u16 *p_ingress_len, u16 *p_egress_len) +{ + int i; + + for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; ++i) + if (mlxsw_sp_sb_pool_dess[i].dir == MLXSW_REG_SBXX_DIR_EGRESS) + goto out; + WARN(1, "No egress pools\n"); + +out: + *p_ingress_len = i; + *p_egress_len = MLXSW_SP_SB_POOL_DESS_LEN - i; +} + int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) { - u64 sb_size; + u16 ing_pool_count; + u16 eg_pool_count; int err; if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE)) @@ -571,17 +636,19 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE)) return -EIO; - sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE); mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL); if (!mlxsw_sp->sb) return -ENOMEM; mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE); + mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, + MAX_BUFFER_SIZE); err = mlxsw_sp_sb_ports_init(mlxsw_sp); if (err) goto err_sb_ports_init; - err = mlxsw_sp_sb_prs_init(mlxsw_sp); + err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp_sb_prs, + MLXSW_SP_SB_PRS_LEN); if (err) goto err_sb_prs_init; err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); @@ -590,11 +657,13 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) err = mlxsw_sp_sb_mms_init(mlxsw_sp); if (err) goto err_sb_mms_init; - err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, sb_size, - MLXSW_SP_SB_POOL_COUNT, - MLXSW_SP_SB_POOL_COUNT, - MLXSW_SP_SB_TC_COUNT, - MLXSW_SP_SB_TC_COUNT); + mlxsw_sp_pool_count(&ing_pool_count, &eg_pool_count); + err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, + mlxsw_sp->sb->sb_size, + ing_pool_count, + eg_pool_count, + MLXSW_SP_SB_ING_TC_COUNT, + MLXSW_SP_SB_EG_TC_COUNT); if (err) goto err_devlink_sb_register; @@ -632,36 +701,15 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) return err; } -static u8 pool_get(u16 pool_index) -{ - return pool_index % MLXSW_SP_SB_POOL_COUNT; -} - -static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir) -{ - u16 pool_index; - - pool_index = pool; - if (dir == MLXSW_REG_SBXX_DIR_EGRESS) - pool_index += MLXSW_SP_SB_POOL_COUNT; - return pool_index; -} - -static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index) -{ - return pool_index < MLXSW_SP_SB_POOL_COUNT ? - MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS; -} - int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info) { + enum mlxsw_reg_sbxx_dir dir = mlxsw_sp_sb_pool_dess[pool_index].dir; struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); - u8 pool = pool_get(pool_index); - enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); - struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + struct mlxsw_sp_sb_pr *pr; + pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); pool_info->pool_type = (enum devlink_sb_pool_type) dir; pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size); pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode; @@ -674,34 +722,32 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size); - u8 pool = pool_get(pool_index); - enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); enum mlxsw_reg_sbpr_mode mode; if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) return -EINVAL; mode = (enum mlxsw_reg_sbpr_mode) threshold_type; - return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size); + return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode, + pool_size, false); } #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */ -static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool, - enum mlxsw_reg_sbxx_dir dir, u32 max_buff) +static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index, + u32 max_buff) { - struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET; return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff); } -static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool, - enum mlxsw_reg_sbxx_dir dir, u32 threshold, - u32 *p_max_buff) +static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index, + u32 threshold, u32 *p_max_buff) { - struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) { int val; @@ -725,12 +771,10 @@ int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u8 local_port = mlxsw_sp_port->local_port; - u8 pool = pool_get(pool_index); - enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, - pool, dir); + pool_index); - *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir, + *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index, pm->max_buff); return 0; } @@ -743,17 +787,15 @@ int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u8 local_port = mlxsw_sp_port->local_port; - u8 pool = pool_get(pool_index); - enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); u32 max_buff; int err; - err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, + err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index, threshold, &max_buff); if (err) return err; - return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir, + return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index, 0, max_buff); } @@ -771,9 +813,9 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir); - *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir, + *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index, cm->max_buff); - *p_pool_index = pool_index_get(cm->pool, dir); + *p_pool_index = cm->pool_index; return 0; } @@ -788,24 +830,24 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, u8 local_port = mlxsw_sp_port->local_port; u8 pg_buff = tc_index; enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type; - u8 pool = pool_get(pool_index); u32 max_buff; int err; - if (dir != dir_get(pool_index)) + if (dir != mlxsw_sp_sb_pool_dess[pool_index].dir) return -EINVAL; - err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, + err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index, threshold, &max_buff); if (err) return err; - return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir, - 0, max_buff, pool); + return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, + 0, max_buff, false, pool_index); } #define MASKED_COUNT_MAX \ - (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2)) + (MLXSW_REG_SBSR_REC_MAX_COUNT / \ + (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT)) struct mlxsw_sp_sb_sr_occ_query_cb_ctx { u8 masked_count; @@ -831,7 +873,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; - for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) { cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i, MLXSW_REG_SBXX_DIR_INGRESS); mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++, @@ -845,7 +887,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; - for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) { cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i, MLXSW_REG_SBXX_DIR_EGRESS); mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++, @@ -880,23 +922,17 @@ next_batch: local_port_1 = local_port; masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, false); - for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); + for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); - } for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); - for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) { - err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, - MLXSW_REG_SBXX_DIR_INGRESS, - &bulk_list); - if (err) - goto out; + for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) { err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, - MLXSW_REG_SBXX_DIR_EGRESS, &bulk_list); if (err) goto out; @@ -945,23 +981,17 @@ next_batch: local_port++; masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, true); - for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); + for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); - } for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); - for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) { - err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, - MLXSW_REG_SBXX_DIR_INGRESS, - &bulk_list); - if (err) - goto out; + for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) { err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, - MLXSW_REG_SBXX_DIR_EGRESS, &bulk_list); if (err) goto out; @@ -994,10 +1024,8 @@ int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u8 local_port = mlxsw_sp_port->local_port; - u8 pool = pool_get(pool_index); - enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, - pool, dir); + pool_index); *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur); *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 1a60391daafa..3dbafdeaab2b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -7,17 +7,6 @@ #include "spectrum.h" #include "reg.h" -enum mlxsw_sp_l3proto { - MLXSW_SP_L3_PROTO_IPV4, - MLXSW_SP_L3_PROTO_IPV6, -#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1) -}; - -union mlxsw_sp_l3addr { - __be32 addr4; - struct in6_addr addr6; -}; - struct mlxsw_sp_rif_ipip_lb; struct mlxsw_sp_rif_ipip_lb_config { enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; @@ -35,8 +24,6 @@ struct mlxsw_sp_neigh_entry; struct mlxsw_sp_nexthop; struct mlxsw_sp_ipip_entry; -struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev); struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, u16 rif_index); u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); @@ -44,9 +31,7 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif); u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif); u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); -u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif); -struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif); int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index db715da7bab7..fa16ad2c6a50 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -17,7 +17,6 @@ #include <net/switchdev.h> #include "spectrum_span.h" -#include "spectrum_router.h" #include "spectrum_switchdev.h" #include "spectrum.h" #include "core.h" @@ -2289,7 +2288,7 @@ struct mlxsw_sp_switchdev_event_work { unsigned long event; }; -static void mlxsw_sp_switchdev_event_work(struct work_struct *work) +static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work) { struct mlxsw_sp_switchdev_event_work *switchdev_work = container_of(work, struct mlxsw_sp_switchdev_event_work, work); @@ -2344,16 +2343,23 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, { struct net_device *dev = switchdev_notifier_info_to_dev(ptr); struct mlxsw_sp_switchdev_event_work *switchdev_work; - struct switchdev_notifier_fdb_info *fdb_info = ptr; + struct switchdev_notifier_fdb_info *fdb_info; + struct switchdev_notifier_info *info = ptr; + struct net_device *br_dev; - if (!mlxsw_sp_port_dev_lower_find_rcu(dev)) + /* Tunnel devices are not our uppers, so check their master instead */ + br_dev = netdev_master_upper_dev_get_rcu(dev); + if (!br_dev) + return NOTIFY_DONE; + if (!netif_is_bridge_master(br_dev)) + return NOTIFY_DONE; + if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev)) return NOTIFY_DONE; switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); if (!switchdev_work) return NOTIFY_BAD; - INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work); switchdev_work->dev = dev; switchdev_work->event = event; @@ -2362,6 +2368,11 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */ case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ case SWITCHDEV_FDB_DEL_TO_BRIDGE: + fdb_info = container_of(info, + struct switchdev_notifier_fdb_info, + info); + INIT_WORK(&switchdev_work->work, + mlxsw_sp_switchdev_bridge_fdb_event_work); memcpy(&switchdev_work->fdb_info, ptr, sizeof(switchdev_work->fdb_info)); switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 53020724c2f6..6f18f4d3322a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -24,6 +24,7 @@ enum { MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, MLXSW_TRAP_ID_PKT_SAMPLE = 0x38, MLXSW_TRAP_ID_FID_MISS = 0x3D, + MLXSW_TRAP_ID_DECAP_ECN0 = 0x40, MLXSW_TRAP_ID_ARPBC = 0x50, MLXSW_TRAP_ID_ARPUC = 0x51, MLXSW_TRAP_ID_MTUERROR = 0x52, @@ -59,6 +60,7 @@ enum { MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91, MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92, MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1, + MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD, MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6, MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7, MLXSW_TRAP_ID_ACL0 = 0x1C0, diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index bd51e057e915..b881f5d4a7f9 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -1164,7 +1164,7 @@ ks8695_timeout(struct net_device *ndev) * sk_buff and adds it to the TX ring. It then kicks the TX DMA * engine to ensure transmission begins. */ -static int +static netdev_tx_t ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ks8695_priv *ksp = netdev_priv(ndev); diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index 0e9719fbc624..35f8c9ef204d 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1021,9 +1021,9 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len) * spin_lock_irqsave is required because tx and rx should be mutual exclusive. * So while tx is in-progress, prevent IRQ interrupt from happenning. */ -static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) { - int retv = NETDEV_TX_OK; + netdev_tx_t retv = NETDEV_TX_OK; struct ks_net *ks = netdev_priv(netdev); disable_irq(netdev->irq); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 001b5f714c1b..867cddba840f 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -999,7 +999,6 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter) struct phy_device *phydev; struct net_device *netdev; int ret = -EIO; - u32 mii_adv; netdev = adapter->netdev; phydev = phy_find_first(adapter->mdiobus); @@ -1013,13 +1012,11 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter) goto return_error; /* MAC doesn't support 1000T Half */ - phydev->supported &= ~SUPPORTED_1000baseT_Half; + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); /* support both flow controls */ + phy_support_asym_pause(phydev); phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); - phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - mii_adv = (u32)mii_advertise_flowctrl(phy->fc_request_control); - phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv); phy->fc_autoneg = phydev->autoneg; phy_start(phydev); diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index ccdf9123f26f..b2109eca81fd 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -977,8 +977,8 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter) lan743x_ptp_disable(adapter); } -void lan743x_ptp_set_sync_ts_insert(struct lan743x_adapter *adapter, - bool ts_insert_enable) +static void lan743x_ptp_set_sync_ts_insert(struct lan743x_adapter *adapter, + bool ts_insert_enable) { u32 ptp_tx_mod = lan743x_csr_read(adapter, PTP_TX_MOD); diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig index 36c84625d54e..bcec0587cf61 100644 --- a/drivers/net/ethernet/mscc/Kconfig +++ b/drivers/net/ethernet/mscc/Kconfig @@ -23,6 +23,8 @@ config MSCC_OCELOT_SWITCH config MSCC_OCELOT_SWITCH_OCELOT tristate "Ocelot switch driver on Ocelot" depends on MSCC_OCELOT_SWITCH + depends on GENERIC_PHY + depends on OF_NET help This driver supports the Ocelot network switch device as present on the Ocelot SoCs. diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 1a4f2bb48ead..9b8a17ee3cb3 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -472,6 +472,7 @@ static int ocelot_port_open(struct net_device *dev) { struct ocelot_port *port = netdev_priv(dev); struct ocelot *ocelot = port->ocelot; + enum phy_mode phy_mode; int err; /* Enable receiving frames on the port, and activate auto-learning of @@ -482,8 +483,21 @@ static int ocelot_port_open(struct net_device *dev) ANA_PORT_PORT_CFG_PORTID_VAL(port->chip_port), ANA_PORT_PORT_CFG, port->chip_port); + if (port->serdes) { + if (port->phy_mode == PHY_INTERFACE_MODE_SGMII) + phy_mode = PHY_MODE_SGMII; + else + phy_mode = PHY_MODE_QSGMII; + + err = phy_set_mode(port->serdes, phy_mode); + if (err) { + netdev_err(dev, "Could not set mode of SerDes\n"); + return err; + } + } + err = phy_connect_direct(dev, port->phy, &ocelot_port_adjust_link, - PHY_INTERFACE_MODE_NA); + port->phy_mode); if (err) { netdev_err(dev, "Could not attach to PHY\n"); return err; @@ -1606,7 +1620,7 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, dev->ethtool_ops = &ocelot_ethtool_ops; dev->switchdev_ops = &ocelot_port_switchdev_ops; - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 616bec30dfa3..62c7c8eb00d9 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -11,12 +11,13 @@ #include <linux/bitops.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> +#include <linux/phy.h> +#include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include "ocelot_ana.h" #include "ocelot_dev.h" -#include "ocelot_hsio.h" #include "ocelot_qsys.h" #include "ocelot_rew.h" #include "ocelot_sys.h" @@ -333,79 +334,6 @@ enum ocelot_reg { SYS_CM_DATA_RD, SYS_CM_OP, SYS_CM_DATA, - HSIO_PLL5G_CFG0 = HSIO << TARGET_OFFSET, - HSIO_PLL5G_CFG1, - HSIO_PLL5G_CFG2, - HSIO_PLL5G_CFG3, - HSIO_PLL5G_CFG4, - HSIO_PLL5G_CFG5, - HSIO_PLL5G_CFG6, - HSIO_PLL5G_STATUS0, - HSIO_PLL5G_STATUS1, - HSIO_PLL5G_BIST_CFG0, - HSIO_PLL5G_BIST_CFG1, - HSIO_PLL5G_BIST_CFG2, - HSIO_PLL5G_BIST_STAT0, - HSIO_PLL5G_BIST_STAT1, - HSIO_RCOMP_CFG0, - HSIO_RCOMP_STATUS, - HSIO_SYNC_ETH_CFG, - HSIO_SYNC_ETH_PLL_CFG, - HSIO_S1G_DES_CFG, - HSIO_S1G_IB_CFG, - HSIO_S1G_OB_CFG, - HSIO_S1G_SER_CFG, - HSIO_S1G_COMMON_CFG, - HSIO_S1G_PLL_CFG, - HSIO_S1G_PLL_STATUS, - HSIO_S1G_DFT_CFG0, - HSIO_S1G_DFT_CFG1, - HSIO_S1G_DFT_CFG2, - HSIO_S1G_TP_CFG, - HSIO_S1G_RC_PLL_BIST_CFG, - HSIO_S1G_MISC_CFG, - HSIO_S1G_DFT_STATUS, - HSIO_S1G_MISC_STATUS, - HSIO_MCB_S1G_ADDR_CFG, - HSIO_S6G_DIG_CFG, - HSIO_S6G_DFT_CFG0, - HSIO_S6G_DFT_CFG1, - HSIO_S6G_DFT_CFG2, - HSIO_S6G_TP_CFG0, - HSIO_S6G_TP_CFG1, - HSIO_S6G_RC_PLL_BIST_CFG, - HSIO_S6G_MISC_CFG, - HSIO_S6G_OB_ANEG_CFG, - HSIO_S6G_DFT_STATUS, - HSIO_S6G_ERR_CNT, - HSIO_S6G_MISC_STATUS, - HSIO_S6G_DES_CFG, - HSIO_S6G_IB_CFG, - HSIO_S6G_IB_CFG1, - HSIO_S6G_IB_CFG2, - HSIO_S6G_IB_CFG3, - HSIO_S6G_IB_CFG4, - HSIO_S6G_IB_CFG5, - HSIO_S6G_OB_CFG, - HSIO_S6G_OB_CFG1, - HSIO_S6G_SER_CFG, - HSIO_S6G_COMMON_CFG, - HSIO_S6G_PLL_CFG, - HSIO_S6G_ACJTAG_CFG, - HSIO_S6G_GP_CFG, - HSIO_S6G_IB_STATUS0, - HSIO_S6G_IB_STATUS1, - HSIO_S6G_ACJTAG_STATUS, - HSIO_S6G_PLL_STATUS, - HSIO_S6G_REVID, - HSIO_MCB_S6G_ADDR_CFG, - HSIO_HW_CFG, - HSIO_HW_QSGMII_CFG, - HSIO_HW_QSGMII_STAT, - HSIO_CLK_CFG, - HSIO_TEMP_SENSOR_CTRL, - HSIO_TEMP_SENSOR_CFG, - HSIO_TEMP_SENSOR_STAT, }; enum ocelot_regfield { @@ -527,6 +455,9 @@ struct ocelot_port { u8 vlan_aware; u64 *stats; + + phy_interface_t phy_mode; + struct phy *serdes; }; u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset); diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index 3cdf63e35b53..4c23d18bbf44 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -6,9 +6,11 @@ */ #include <linux/interrupt.h> #include <linux/module.h> +#include <linux/of_net.h> #include <linux/netdevice.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> +#include <linux/mfd/syscon.h> #include <linux/skbuff.h> #include "ocelot.h" @@ -126,11 +128,16 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) len += sz; } while (len < buf_len); - /* Read the FCS and discard it */ + /* Read the FCS */ sz = ocelot_rx_frame_word(ocelot, grp, false, &val); /* Update the statistics if part of the FCS was read before */ len -= ETH_FCS_LEN - sz; + if (unlikely(dev->features & NETIF_F_RXFCS)) { + buf = (u32 *)skb_put(skb, ETH_FCS_LEN); + *buf = val; + } + if (sz < 0) { err = sz; break; @@ -168,6 +175,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct device_node *ports, *portnp; struct ocelot *ocelot; + struct regmap *hsio; u32 val; struct { @@ -179,7 +187,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev) { QSYS, "qsys" }, { ANA, "ana" }, { QS, "qs" }, - { HSIO, "hsio" }, }; if (!np && !pdev->dev.platform_data) @@ -202,6 +209,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev) ocelot->targets[res[i].id] = target; } + hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio"); + if (IS_ERR(hsio)) { + dev_err(&pdev->dev, "missing hsio syscon\n"); + return PTR_ERR(hsio); + } + + ocelot->targets[HSIO] = hsio; + err = ocelot_chip_init(ocelot); if (err) return err; @@ -244,18 +259,11 @@ static int mscc_ocelot_probe(struct platform_device *pdev) INIT_LIST_HEAD(&ocelot->multicast); ocelot_init(ocelot); - ocelot_rmw(ocelot, HSIO_HW_CFG_DEV1G_4_MODE | - HSIO_HW_CFG_DEV1G_6_MODE | - HSIO_HW_CFG_DEV1G_9_MODE, - HSIO_HW_CFG_DEV1G_4_MODE | - HSIO_HW_CFG_DEV1G_6_MODE | - HSIO_HW_CFG_DEV1G_9_MODE, - HSIO_HW_CFG); - for_each_available_child_of_node(ports, portnp) { struct device_node *phy_node; struct phy_device *phy; struct resource *res; + struct phy *serdes; void __iomem *regs; char res_name[8]; u32 port; @@ -280,10 +288,43 @@ static int mscc_ocelot_probe(struct platform_device *pdev) continue; err = ocelot_probe_port(ocelot, port, regs, phy); - if (err) { - dev_err(&pdev->dev, "failed to probe ports\n"); + if (err) + return err; + + err = of_get_phy_mode(portnp); + if (err < 0) + ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA; + else + ocelot->ports[port]->phy_mode = err; + + switch (ocelot->ports[port]->phy_mode) { + case PHY_INTERFACE_MODE_NA: + continue; + case PHY_INTERFACE_MODE_SGMII: + break; + case PHY_INTERFACE_MODE_QSGMII: + break; + default: + dev_err(ocelot->dev, + "invalid phy mode for port%d, (Q)SGMII only\n", + port); + return -EINVAL; + } + + serdes = devm_of_phy_get(ocelot->dev, portnp, NULL); + if (IS_ERR(serdes)) { + err = PTR_ERR(serdes); + if (err == -EPROBE_DEFER) + dev_dbg(ocelot->dev, "deferring probe\n"); + else + dev_err(ocelot->dev, + "missing SerDes phys for port%d\n", + port); + goto err_probe_ports; } + + ocelot->ports[port]->serdes = serdes; } register_netdevice_notifier(&ocelot_netdevice_nb); diff --git a/drivers/net/ethernet/mscc/ocelot_dev_gmii.h b/drivers/net/ethernet/mscc/ocelot_dev_gmii.h deleted file mode 100644 index 6aa40ea223a2..000000000000 --- a/drivers/net/ethernet/mscc/ocelot_dev_gmii.h +++ /dev/null @@ -1,154 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ -/* - * Microsemi Ocelot Switch driver - * - * Copyright (c) 2017 Microsemi Corporation - */ - -#ifndef _MSCC_OCELOT_DEV_GMII_H_ -#define _MSCC_OCELOT_DEV_GMII_H_ - -#define DEV_GMII_PORT_MODE_CLOCK_CFG 0x0 - -#define DEV_GMII_PORT_MODE_CLOCK_CFG_MAC_TX_RST BIT(5) -#define DEV_GMII_PORT_MODE_CLOCK_CFG_MAC_RX_RST BIT(4) -#define DEV_GMII_PORT_MODE_CLOCK_CFG_PORT_RST BIT(3) -#define DEV_GMII_PORT_MODE_CLOCK_CFG_PHY_RST BIT(2) -#define DEV_GMII_PORT_MODE_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0)) -#define DEV_GMII_PORT_MODE_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0) - -#define DEV_GMII_PORT_MODE_PORT_MISC 0x4 - -#define DEV_GMII_PORT_MODE_PORT_MISC_MPLS_RX_ENA BIT(5) -#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_ERROR_ENA BIT(4) -#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_PAUSE_ENA BIT(3) -#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_CTRL_ENA BIT(2) -#define DEV_GMII_PORT_MODE_PORT_MISC_GMII_LOOP_ENA BIT(1) -#define DEV_GMII_PORT_MODE_PORT_MISC_DEV_LOOP_ENA BIT(0) - -#define DEV_GMII_PORT_MODE_EVENTS 0x8 - -#define DEV_GMII_PORT_MODE_EEE_CFG 0xc - -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_ENA BIT(22) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15)) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8)) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1)) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1) -#define DEV_GMII_PORT_MODE_EEE_CFG_PORT_LPI BIT(0) - -#define DEV_GMII_PORT_MODE_RX_PATH_DELAY 0x10 - -#define DEV_GMII_PORT_MODE_TX_PATH_DELAY 0x14 - -#define DEV_GMII_PORT_MODE_PTP_PREDICT_CFG 0x18 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG 0x1c - -#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG_RX_ENA BIT(4) -#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG_TX_ENA BIT(0) - -#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG 0x20 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8) -#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4) -#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_FDX_ENA BIT(0) - -#define DEV_GMII_MAC_CFG_STATUS_MAC_MAXLEN_CFG 0x24 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG 0x28 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16)) -#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16) -#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16) -#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_PB_ENA BIT(1) -#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) -#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2) - -#define DEV_GMII_MAC_CFG_STATUS_MAC_ADV_CHK_CFG 0x2c - -#define DEV_GMII_MAC_CFG_STATUS_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0) - -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG 0x30 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8)) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4)) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0)) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0) - -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG 0x34 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_OB_ENA BIT(25) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_WEXC_DIS BIT(24) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16)) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_M GENMASK(23, 16) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_LOAD BIT(12) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0)) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0) - -#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG 0x38 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG_TBI_MODE BIT(4) -#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0) - -#define DEV_GMII_MAC_CFG_STATUS_MAC_FC_MAC_LOW_CFG 0x3c - -#define DEV_GMII_MAC_CFG_STATUS_MAC_FC_MAC_HIGH_CFG 0x40 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY 0x44 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_JUNK_STICKY BIT(5) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_JAM_STICKY BIT(3) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_ABORT_STICKY BIT(0) - -#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG 0x48 - -#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA BIT(0) -#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA BIT(4) -#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_KEEP_S_AFTER_D BIT(8) - -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG 0x4c - -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS BIT(0) -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME(x) (((x) << 4) & GENMASK(11, 4)) -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_M GENMASK(11, 4) -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_X(x) (((x) & GENMASK(11, 4)) >> 4) -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS(x) (((x) << 12) & GENMASK(13, 12)) -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_M GENMASK(13, 12) -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_X(x) (((x) & GENMASK(13, 12)) >> 12) - -#define DEV_GMII_MM_STATISTICS_MM_STATUS 0x50 - -#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STATUS BIT(0) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STICKY BIT(4) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE(x) (((x) << 8) & GENMASK(10, 8)) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE_M GENMASK(10, 8) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE_X(x) (((x) & GENMASK(10, 8)) >> 8) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_UNEXP_RX_PFRM_STICKY BIT(12) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_UNEXP_TX_PFRM_STICKY BIT(16) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_RX_FRAME_STATUS BIT(20) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_TX_FRAME_STATUS BIT(24) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_TX_PRMPT_STATUS BIT(28) - -#endif diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c index e334b406c40c..9271af18b93b 100644 --- a/drivers/net/ethernet/mscc/ocelot_regs.c +++ b/drivers/net/ethernet/mscc/ocelot_regs.c @@ -5,6 +5,7 @@ * Copyright (c) 2017 Microsemi Corporation */ #include "ocelot.h" +#include <soc/mscc/ocelot_hsio.h> static const u32 ocelot_ana_regmap[] = { REG(ANA_ADVLEARN, 0x009000), @@ -102,82 +103,6 @@ static const u32 ocelot_qs_regmap[] = { REG(QS_INH_DBG, 0x000048), }; -static const u32 ocelot_hsio_regmap[] = { - REG(HSIO_PLL5G_CFG0, 0x000000), - REG(HSIO_PLL5G_CFG1, 0x000004), - REG(HSIO_PLL5G_CFG2, 0x000008), - REG(HSIO_PLL5G_CFG3, 0x00000c), - REG(HSIO_PLL5G_CFG4, 0x000010), - REG(HSIO_PLL5G_CFG5, 0x000014), - REG(HSIO_PLL5G_CFG6, 0x000018), - REG(HSIO_PLL5G_STATUS0, 0x00001c), - REG(HSIO_PLL5G_STATUS1, 0x000020), - REG(HSIO_PLL5G_BIST_CFG0, 0x000024), - REG(HSIO_PLL5G_BIST_CFG1, 0x000028), - REG(HSIO_PLL5G_BIST_CFG2, 0x00002c), - REG(HSIO_PLL5G_BIST_STAT0, 0x000030), - REG(HSIO_PLL5G_BIST_STAT1, 0x000034), - REG(HSIO_RCOMP_CFG0, 0x000038), - REG(HSIO_RCOMP_STATUS, 0x00003c), - REG(HSIO_SYNC_ETH_CFG, 0x000040), - REG(HSIO_SYNC_ETH_PLL_CFG, 0x000048), - REG(HSIO_S1G_DES_CFG, 0x00004c), - REG(HSIO_S1G_IB_CFG, 0x000050), - REG(HSIO_S1G_OB_CFG, 0x000054), - REG(HSIO_S1G_SER_CFG, 0x000058), - REG(HSIO_S1G_COMMON_CFG, 0x00005c), - REG(HSIO_S1G_PLL_CFG, 0x000060), - REG(HSIO_S1G_PLL_STATUS, 0x000064), - REG(HSIO_S1G_DFT_CFG0, 0x000068), - REG(HSIO_S1G_DFT_CFG1, 0x00006c), - REG(HSIO_S1G_DFT_CFG2, 0x000070), - REG(HSIO_S1G_TP_CFG, 0x000074), - REG(HSIO_S1G_RC_PLL_BIST_CFG, 0x000078), - REG(HSIO_S1G_MISC_CFG, 0x00007c), - REG(HSIO_S1G_DFT_STATUS, 0x000080), - REG(HSIO_S1G_MISC_STATUS, 0x000084), - REG(HSIO_MCB_S1G_ADDR_CFG, 0x000088), - REG(HSIO_S6G_DIG_CFG, 0x00008c), - REG(HSIO_S6G_DFT_CFG0, 0x000090), - REG(HSIO_S6G_DFT_CFG1, 0x000094), - REG(HSIO_S6G_DFT_CFG2, 0x000098), - REG(HSIO_S6G_TP_CFG0, 0x00009c), - REG(HSIO_S6G_TP_CFG1, 0x0000a0), - REG(HSIO_S6G_RC_PLL_BIST_CFG, 0x0000a4), - REG(HSIO_S6G_MISC_CFG, 0x0000a8), - REG(HSIO_S6G_OB_ANEG_CFG, 0x0000ac), - REG(HSIO_S6G_DFT_STATUS, 0x0000b0), - REG(HSIO_S6G_ERR_CNT, 0x0000b4), - REG(HSIO_S6G_MISC_STATUS, 0x0000b8), - REG(HSIO_S6G_DES_CFG, 0x0000bc), - REG(HSIO_S6G_IB_CFG, 0x0000c0), - REG(HSIO_S6G_IB_CFG1, 0x0000c4), - REG(HSIO_S6G_IB_CFG2, 0x0000c8), - REG(HSIO_S6G_IB_CFG3, 0x0000cc), - REG(HSIO_S6G_IB_CFG4, 0x0000d0), - REG(HSIO_S6G_IB_CFG5, 0x0000d4), - REG(HSIO_S6G_OB_CFG, 0x0000d8), - REG(HSIO_S6G_OB_CFG1, 0x0000dc), - REG(HSIO_S6G_SER_CFG, 0x0000e0), - REG(HSIO_S6G_COMMON_CFG, 0x0000e4), - REG(HSIO_S6G_PLL_CFG, 0x0000e8), - REG(HSIO_S6G_ACJTAG_CFG, 0x0000ec), - REG(HSIO_S6G_GP_CFG, 0x0000f0), - REG(HSIO_S6G_IB_STATUS0, 0x0000f4), - REG(HSIO_S6G_IB_STATUS1, 0x0000f8), - REG(HSIO_S6G_ACJTAG_STATUS, 0x0000fc), - REG(HSIO_S6G_PLL_STATUS, 0x000100), - REG(HSIO_S6G_REVID, 0x000104), - REG(HSIO_MCB_S6G_ADDR_CFG, 0x000108), - REG(HSIO_HW_CFG, 0x00010c), - REG(HSIO_HW_QSGMII_CFG, 0x000110), - REG(HSIO_HW_QSGMII_STAT, 0x000114), - REG(HSIO_CLK_CFG, 0x000118), - REG(HSIO_TEMP_SENSOR_CTRL, 0x00011c), - REG(HSIO_TEMP_SENSOR_CFG, 0x000120), - REG(HSIO_TEMP_SENSOR_STAT, 0x000124), -}; - static const u32 ocelot_qsys_regmap[] = { REG(QSYS_PORT_MODE, 0x011200), REG(QSYS_SWITCH_PORT_MODE, 0x011234), @@ -302,7 +227,6 @@ static const u32 ocelot_sys_regmap[] = { static const u32 *ocelot_regmap[] = { [ANA] = ocelot_ana_regmap, [QS] = ocelot_qs_regmap, - [HSIO] = ocelot_hsio_regmap, [QSYS] = ocelot_qsys_regmap, [REW] = ocelot_rew_regmap, [SYS] = ocelot_sys_regmap, @@ -453,9 +377,11 @@ static void ocelot_pll5_init(struct ocelot *ocelot) /* Configure PLL5. This will need a proper CCF driver * The values are coming from the VTSS API for Ocelot */ - ocelot_write(ocelot, HSIO_PLL5G_CFG4_IB_CTRL(0x7600) | - HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8), HSIO_PLL5G_CFG4); - ocelot_write(ocelot, HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) | + regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4, + HSIO_PLL5G_CFG4_IB_CTRL(0x7600) | + HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8)); + regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0, + HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) | HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) | HSIO_PLL5G_CFG0_ENA_BIAS | HSIO_PLL5G_CFG0_ENA_VCO_BUF | @@ -465,13 +391,14 @@ static void ocelot_pll5_init(struct ocelot *ocelot) HSIO_PLL5G_CFG0_SELBGV820(4) | HSIO_PLL5G_CFG0_DIV4 | HSIO_PLL5G_CFG0_ENA_CLKTREE | - HSIO_PLL5G_CFG0_ENA_LANE, HSIO_PLL5G_CFG0); - ocelot_write(ocelot, HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET | + HSIO_PLL5G_CFG0_ENA_LANE); + regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2, + HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET | HSIO_PLL5G_CFG2_EN_RESET_OVERRUN | HSIO_PLL5G_CFG2_GAIN_TEST(0x8) | HSIO_PLL5G_CFG2_ENA_AMPCTRL | HSIO_PLL5G_CFG2_PWD_AMPCTRL_N | - HSIO_PLL5G_CFG2_AMPC_SEL(0x10), HSIO_PLL5G_CFG2); + HSIO_PLL5G_CFG2_AMPC_SEL(0x10)); } int ocelot_chip_init(struct ocelot *ocelot) diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index b8983e73265a..82be90075695 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -75,6 +75,7 @@ #include <linux/tcp.h> #include <linux/uaccess.h> #include <linux/io.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/slab.h> #include <linux/prefetch.h> #include <net/tcp.h> @@ -491,7 +492,7 @@ static struct pci_driver s2io_driver = { }; /* A simplifier macro used both by init and free shared_mem Fns(). */ -#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each) +#define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each) /* netqueue manipulation helper functions */ static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp) @@ -3679,11 +3680,9 @@ static void restore_xmsi_data(struct s2io_nic *nic) writeq(nic->msix_info[i].data, &bar0->xmsi_data); val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); writeq(val64, &bar0->xmsi_access); - if (wait_for_msix_trans(nic, msix_index)) { + if (wait_for_msix_trans(nic, msix_index)) DBG_PRINT(ERR_DBG, "%s: index: %d failed\n", __func__, msix_index); - continue; - } } } diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h index 1a24a7218794..0a921f30f98f 100644 --- a/drivers/net/ethernet/neterion/s2io.h +++ b/drivers/net/ethernet/neterion/s2io.h @@ -10,6 +10,7 @@ * system is licensed under the GPL. * See the file COPYING in this distribution for more information. ************************************************************************/ +#include <linux/io-64-nonatomic-lo-hi.h> #ifndef _S2IO_H #define _S2IO_H @@ -970,27 +971,6 @@ struct s2io_nic { #define RESET_ERROR 1 #define CMD_ERROR 2 -/* OS related system calls */ -#ifndef readq -static inline u64 readq(void __iomem *addr) -{ - u64 ret = 0; - ret = readl(addr + 4); - ret <<= 32; - ret |= readl(addr); - - return ret; -} -#endif - -#ifndef writeq -static inline void writeq(u64 val, void __iomem *addr) -{ - writel((u32) (val), addr); - writel((u32) (val >> 32), (addr + 4)); -} -#endif - /* * Some registers have to be written in a particular order to * expect correct hardware operation. The macro SPECIAL_REG_WRITE diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 398011c87643..4c1fb7e57888 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -13,6 +13,7 @@ ******************************************************************************/ #include <linux/vmalloc.h> #include <linux/etherdevice.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/pci.h> #include <linux/slab.h> diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h index d743a37a3cee..e678ba379598 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h @@ -2011,26 +2011,6 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set( void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); -#ifndef readq -static inline u64 readq(void __iomem *addr) -{ - u64 ret = 0; - ret = readl(addr + 4); - ret <<= 32; - ret |= readl(addr); - - return ret; -} -#endif - -#ifndef writeq -static inline void writeq(u64 val, void __iomem *addr) -{ - writel((u32) (val), addr); - writel((u32) (val >> 32), (addr + 4)); -} -#endif - static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr) { writel(val, addr + 4); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c index 0c3b5dea2858..f7a0d1d5885e 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c @@ -12,6 +12,7 @@ * Copyright(c) 2002-2010 Exar Corp. ******************************************************************************/ #include <linux/etherdevice.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/prefetch.h> #include "vxge-traffic.h" @@ -2261,7 +2262,7 @@ void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id) { struct __vxge_hw_device *hldev = vp->vpath->hldev; - if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)) + if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) __vxge_hw_pio_mem_write32_upper( (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]); diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index b157ccd8c80f..3c661f422688 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -1,36 +1,5 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ #include <linux/kernel.h> @@ -55,30 +24,21 @@ #define NFP_QMSTAT_DROP 16 #define NFP_QMSTAT_ECN 24 -static unsigned long long -nfp_abm_q_lvl_thrs(struct nfp_abm_link *alink, unsigned int queue) -{ - return alink->abm->q_lvls->addr + - (alink->queue_base + queue) * NFP_QLVL_STRIDE + NFP_QLVL_THRS; -} - static int nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, unsigned int stride, unsigned int offset, unsigned int i, bool is_u64, u64 *res) { struct nfp_cpp *cpp = alink->abm->app->cpp; - u32 val32, mur; - u64 val, addr; + u64 val, sym_offset; + u32 val32; int err; - mur = NFP_CPP_ATOMIC_RD(sym->target, sym->domain); - - addr = sym->addr + (alink->queue_base + i) * stride + offset; + sym_offset = (alink->queue_base + i) * stride + offset; if (is_u64) - err = nfp_cpp_readq(cpp, mur, addr, &val); + err = __nfp_rtsym_readq(cpp, sym, 3, 0, sym_offset, &val); else - err = nfp_cpp_readl(cpp, mur, addr, &val32); + err = __nfp_rtsym_readl(cpp, sym, 3, 0, sym_offset, &val32); if (err) { nfp_err(cpp, "RED offload reading stat failed on vNIC %d queue %d\n", @@ -114,13 +74,12 @@ nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val) { struct nfp_cpp *cpp = alink->abm->app->cpp; - u32 muw; + u64 sym_offset; int err; - muw = NFP_CPP_ATOMIC_WR(alink->abm->q_lvls->target, - alink->abm->q_lvls->domain); - - err = nfp_cpp_writel(cpp, muw, nfp_abm_q_lvl_thrs(alink, i), val); + sym_offset = (alink->queue_base + i) * NFP_QLVL_STRIDE + NFP_QLVL_THRS; + err = __nfp_rtsym_writel(cpp, alink->abm->q_lvls, 4, 0, + sym_offset, val); if (err) { nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n", alink->id, i); @@ -290,10 +249,10 @@ nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size) nfp_err(pf->cpp, "Symbol '%s' not found\n", name); return ERR_PTR(-ENOENT); } - if (sym->size != size) { + if (nfp_rtsym_size(sym) != size) { nfp_err(pf->cpp, "Symbol '%s' wrong size: expected %u got %llu\n", - name, size, sym->size); + name, size, nfp_rtsym_size(sym)); return ERR_PTR(-EINVAL); } diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index b84a6c2d387b..c0830c0c2c3f 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -1,36 +1,5 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <linux/etherdevice.h> @@ -540,8 +509,9 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, { struct nfp_eth_table_port *eth_port = &pf->eth_tbl->ports[id]; u8 mac_addr[ETH_ALEN]; - const char *mac_str; - char name[32]; + struct nfp_nsp *nsp; + char hwinfo[32]; + int err; if (id > pf->eth_tbl->count) { nfp_warn(pf->cpp, "No entry for persistent MAC address\n"); @@ -549,22 +519,37 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, return; } - snprintf(name, sizeof(name), "eth%u.mac.pf%u", + snprintf(hwinfo, sizeof(hwinfo), "eth%u.mac.pf%u", eth_port->eth_index, abm->pf_id); - mac_str = nfp_hwinfo_lookup(pf->hwinfo, name); - if (!mac_str) { - nfp_warn(pf->cpp, "Can't lookup persistent MAC address (%s)\n", - name); + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + nfp_warn(pf->cpp, "Failed to access the NSP for persistent MAC address: %ld\n", + PTR_ERR(nsp)); + eth_hw_addr_random(nn->dp.netdev); + return; + } + + if (!nfp_nsp_has_hwinfo_lookup(nsp)) { + nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n"); + eth_hw_addr_random(nn->dp.netdev); + return; + } + + err = nfp_nsp_hwinfo_lookup(nsp, hwinfo, sizeof(hwinfo)); + nfp_nsp_close(nsp); + if (err) { + nfp_warn(pf->cpp, "Reading persistent MAC address failed: %d\n", + err); eth_hw_addr_random(nn->dp.netdev); return; } - if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", + if (sscanf(hwinfo, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", &mac_addr[0], &mac_addr[1], &mac_addr[2], &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) { nfp_warn(pf->cpp, "Can't parse persistent MAC address (%s)\n", - mac_str); + hwinfo); eth_hw_addr_random(nn->dp.netdev); return; } diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h index 934a70835473..f907b7d98917 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.h +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -1,36 +1,5 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2018 Netronome Systems, Inc. */ #ifndef __NFP_ABM_H__ #define __NFP_ABM_H__ 1 diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 2572a4b91c7c..9b6cfa697879 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bpf.h> #include <linux/bitops.h> @@ -89,15 +59,32 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size) return skb; } +static unsigned int +nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n) +{ + unsigned int size; + + size = sizeof(struct cmsg_req_map_op); + size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n; + + return size; +} + static struct sk_buff * nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n) { + return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n)); +} + +static unsigned int +nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n) +{ unsigned int size; - size = sizeof(struct cmsg_req_map_op); - size += sizeof(struct cmsg_key_value_pair) * n; + size = sizeof(struct cmsg_reply_map_op); + size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n; - return nfp_bpf_cmsg_alloc(bpf, size); + return size; } static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb) @@ -338,6 +325,34 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map) dev_consume_skb_any(skb); } +static void * +nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req, + unsigned int n) +{ + return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n]; +} + +static void * +nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req, + unsigned int n) +{ + return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n]; +} + +static void * +nfp_bpf_ctrl_reply_key(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply, + unsigned int n) +{ + return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n]; +} + +static void * +nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply, + unsigned int n) +{ + return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n]; +} + static int nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_bpf_cmsg_type op, @@ -366,12 +381,13 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, /* Copy inputs */ if (key) - memcpy(&req->elem[0].key, key, map->key_size); + memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size); if (value) - memcpy(&req->elem[0].value, value, map->value_size); + memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value, + map->value_size); skb = nfp_bpf_cmsg_communicate(bpf, skb, op, - sizeof(*reply) + sizeof(*reply->elem)); + nfp_bpf_cmsg_map_reply_size(bpf, 1)); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -382,9 +398,11 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, /* Copy outputs */ if (out_key) - memcpy(out_key, &reply->elem[0].key, map->key_size); + memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0), + map->key_size); if (out_value) - memcpy(out_value, &reply->elem[0].value, map->value_size); + memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0), + map->value_size); dev_consume_skb_any(skb); @@ -428,6 +446,13 @@ int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, key, NULL, 0, next_key, NULL); } +unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf) +{ + return max3((unsigned int)NFP_NET_DEFAULT_MTU, + nfp_bpf_cmsg_map_req_size(bpf, 1), + nfp_bpf_cmsg_map_reply_size(bpf, 1)); +} + void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) { struct nfp_app_bpf *bpf = app->priv; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h index e4f9b7ec8528..721921bcf120 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef NFP_BPF_FW_H #define NFP_BPF_FW_H 1 @@ -52,6 +22,7 @@ enum bpf_cap_tlv_type { NFP_BPF_CAP_TYPE_RANDOM = 4, NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5, NFP_BPF_CAP_TYPE_ADJUST_TAIL = 6, + NFP_BPF_CAP_TYPE_ABI_VERSION = 7, }; struct nfp_bpf_cap_tlv_func { @@ -98,6 +69,7 @@ enum nfp_bpf_cmsg_type { #define CMSG_TYPE_MAP_REPLY_BIT 7 #define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req)) +/* BPF ABIv2 fixed-length control message fields */ #define CMSG_MAP_KEY_LW 16 #define CMSG_MAP_VALUE_LW 16 @@ -147,24 +119,19 @@ struct cmsg_reply_map_free_tbl { __be32 count; }; -struct cmsg_key_value_pair { - __be32 key[CMSG_MAP_KEY_LW]; - __be32 value[CMSG_MAP_VALUE_LW]; -}; - struct cmsg_req_map_op { struct cmsg_hdr hdr; __be32 tid; __be32 count; __be32 flags; - struct cmsg_key_value_pair elem[0]; + u8 data[0]; }; struct cmsg_reply_map_op { struct cmsg_reply_map_simple reply_hdr; __be32 count; __be32 resv; - struct cmsg_key_value_pair elem[0]; + u8 data[0]; }; struct cmsg_bpf_event { diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index eff57f7d056a..97d33bb4d84d 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2016-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #define pr_fmt(fmt) "NFP net bpf: " fmt @@ -267,6 +237,38 @@ emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer) } static void +__emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, + u8 defer, bool dst_lmextn, bool src_lmextn) +{ + u64 insn; + + insn = OP_BR_ALU_BASE | + FIELD_PREP(OP_BR_ALU_A_SRC, areg) | + FIELD_PREP(OP_BR_ALU_B_SRC, breg) | + FIELD_PREP(OP_BR_ALU_DEFBR, defer) | + FIELD_PREP(OP_BR_ALU_IMM_HI, imm_hi) | + FIELD_PREP(OP_BR_ALU_SRC_LMEXTN, src_lmextn) | + FIELD_PREP(OP_BR_ALU_DST_LMEXTN, dst_lmextn); + + nfp_prog_push(nfp_prog, insn); +} + +static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer) +{ + struct nfp_insn_ur_regs reg; + int err; + + err = swreg_to_unrestricted(reg_none(), base, reg_imm(0), ®); + if (err) { + nfp_prog->error = err; + return; + } + + __emit_br_alu(nfp_prog, reg.areg, reg.breg, 0, defer, reg.dst_lmextn, + reg.src_lmextn); +} + +static void __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, enum immed_width width, bool invert, enum immed_shift shift, bool wr_both, @@ -1137,7 +1139,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, bool clr_gpr, lmem_step step) { - s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off; + s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; bool first = true, last; bool needs_inc = false; swreg stack_off_reg; @@ -1146,7 +1148,8 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool lm3 = true; int ret; - if (meta->ptr_not_const) { + if (meta->ptr_not_const || + meta->flags & FLAG_INSN_PTR_CALLER_STACK_FRAME) { /* Use of the last encountered ptr_off is OK, they all have * the same alignment. Depend on low bits of value being * discarded when written to LMaddr register. @@ -1695,7 +1698,7 @@ map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) s64 lm_off; /* We only have to reload LM0 if the key is not at start of stack */ - lm_off = nfp_prog->stack_depth; + lm_off = nfp_prog->stack_frame_depth; lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; load_lm_ptr = meta->arg2.var_off || lm_off; @@ -1808,10 +1811,10 @@ static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) swreg stack_depth_reg; stack_depth_reg = ur_load_imm_any(nfp_prog, - nfp_prog->stack_depth, + nfp_prog->stack_frame_depth, stack_imm(nfp_prog)); - emit_alu(nfp_prog, reg_both(dst), - stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg); + emit_alu(nfp_prog, reg_both(dst), stack_reg(nfp_prog), + ALU_OP_ADD, stack_depth_reg); wrp_immed(nfp_prog, reg_both(dst + 1), 0); } else { wrp_reg_mov(nfp_prog, dst, src); @@ -3081,7 +3084,93 @@ static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); } -static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +static int +bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + u32 ret_tgt, stack_depth, offset_br; + swreg tmp_reg; + + stack_depth = round_up(nfp_prog->stack_frame_depth, STACK_FRAME_ALIGN); + /* Space for saving the return address is accounted for by the callee, + * so stack_depth can be zero for the main function. + */ + if (stack_depth) { + tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, + stack_imm(nfp_prog)); + emit_alu(nfp_prog, stack_reg(nfp_prog), + stack_reg(nfp_prog), ALU_OP_ADD, tmp_reg); + emit_csr_wr(nfp_prog, stack_reg(nfp_prog), + NFP_CSR_ACT_LM_ADDR0); + } + + /* Two cases for jumping to the callee: + * + * - If callee uses and needs to save R6~R9 then: + * 1. Put the start offset of the callee into imm_b(). This will + * require a fixup step, as we do not necessarily know this + * address yet. + * 2. Put the return address from the callee to the caller into + * register ret_reg(). + * 3. (After defer slots are consumed) Jump to the subroutine that + * pushes the registers to the stack. + * The subroutine acts as a trampoline, and returns to the address in + * imm_b(), i.e. jumps to the callee. + * + * - If callee does not need to save R6~R9 then just load return + * address to the caller in ret_reg(), and jump to the callee + * directly. + * + * Using ret_reg() to pass the return address to the callee is set here + * as a convention. The callee can then push this address onto its + * stack frame in its prologue. The advantages of passing the return + * address through ret_reg(), instead of pushing it to the stack right + * here, are the following: + * - It looks cleaner. + * - If the called function is called multiple time, we get a lower + * program size. + * - We save two no-op instructions that should be added just before + * the emit_br() when stack depth is not null otherwise. + * - If we ever find a register to hold the return address during whole + * execution of the callee, we will not have to push the return + * address to the stack for leaf functions. + */ + if (!meta->jmp_dst) { + pr_err("BUG: BPF-to-BPF call has no destination recorded\n"); + return -ELOOP; + } + if (nfp_prog->subprog[meta->jmp_dst->subprog_idx].needs_reg_push) { + ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, + RELO_BR_GO_CALL_PUSH_REGS); + offset_br = nfp_prog_current_offset(nfp_prog); + wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL); + } else { + ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; + emit_br(nfp_prog, BR_UNC, meta->n + 1 + meta->insn.imm, 1); + offset_br = nfp_prog_current_offset(nfp_prog); + } + wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL); + + if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) + return -EINVAL; + + if (stack_depth) { + tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, + stack_imm(nfp_prog)); + emit_alu(nfp_prog, stack_reg(nfp_prog), + stack_reg(nfp_prog), ALU_OP_SUB, tmp_reg); + emit_csr_wr(nfp_prog, stack_reg(nfp_prog), + NFP_CSR_ACT_LM_ADDR0); + wrp_nops(nfp_prog, 3); + } + + meta->num_insns_after_br = nfp_prog_current_offset(nfp_prog); + meta->num_insns_after_br -= offset_br; + + return 0; +} + +static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { switch (meta->insn.imm) { case BPF_FUNC_xdp_adjust_head: @@ -3102,6 +3191,19 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) } } +static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + if (is_mbpf_pseudo_call(meta)) + return bpf_to_bpf_call(nfp_prog, meta); + else + return helper_call(nfp_prog, meta); +} + +static bool nfp_is_main_function(struct nfp_insn_meta *meta) +{ + return meta->subprog_idx == 0; +} + static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); @@ -3109,6 +3211,39 @@ static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int +nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + if (nfp_prog->subprog[meta->subprog_idx].needs_reg_push) { + /* Pop R6~R9 to the stack via related subroutine. + * We loaded the return address to the caller into ret_reg(). + * This means that the subroutine does not come back here, we + * make it jump back to the subprogram caller directly! + */ + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 1, + RELO_BR_GO_CALL_POP_REGS); + /* Pop return address from the stack. */ + wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); + } else { + /* Pop return address from the stack. */ + wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); + /* Jump back to caller if no callee-saved registers were used + * by the subprogram. + */ + emit_rtn(nfp_prog, ret_reg(nfp_prog), 0); + } + + return 0; +} + +static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + if (nfp_is_main_function(meta)) + return goto_out(nfp_prog, meta); + else + return nfp_subprog_epilogue(nfp_prog, meta); +} + static const instr_cb_t instr_cb[256] = { [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, @@ -3197,36 +3332,66 @@ static const instr_cb_t instr_cb[256] = { [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, [BPF_JMP | BPF_CALL] = call, - [BPF_JMP | BPF_EXIT] = goto_out, + [BPF_JMP | BPF_EXIT] = jmp_exit, }; /* --- Assembler logic --- */ +static int +nfp_fixup_immed_relo(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + struct nfp_insn_meta *jmp_dst, u32 br_idx) +{ + if (immed_get_value(nfp_prog->prog[br_idx + 1])) { + pr_err("BUG: failed to fix up callee register saving\n"); + return -EINVAL; + } + + immed_set_value(&nfp_prog->prog[br_idx + 1], jmp_dst->off); + + return 0; +} + static int nfp_fixup_branches(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta, *jmp_dst; u32 idx, br_idx; + int err; list_for_each_entry(meta, &nfp_prog->insns, l) { if (meta->skip) continue; - if (meta->insn.code == (BPF_JMP | BPF_CALL)) - continue; if (BPF_CLASS(meta->insn.code) != BPF_JMP) continue; + if (meta->insn.code == (BPF_JMP | BPF_EXIT) && + !nfp_is_main_function(meta)) + continue; + if (is_mbpf_helper_call(meta)) + continue; if (list_is_last(&meta->l, &nfp_prog->insns)) br_idx = nfp_prog->last_bpf_off; else br_idx = list_next_entry(meta, l)->off - 1; + /* For BPF-to-BPF function call, a stack adjustment sequence is + * generated after the return instruction. Therefore, we must + * withdraw the length of this sequence to have br_idx pointing + * to where the "branch" NFP instruction is expected to be. + */ + if (is_mbpf_pseudo_call(meta)) + br_idx -= meta->num_insns_after_br; + if (!nfp_is_br(nfp_prog->prog[br_idx])) { pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", br_idx, meta->insn.code, nfp_prog->prog[br_idx]); return -ELOOP; } + + if (meta->insn.code == (BPF_JMP | BPF_EXIT)) + continue; + /* Leave special branches for later */ if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != - RELO_BR_REL) + RELO_BR_REL && !is_mbpf_pseudo_call(meta)) continue; if (!meta->jmp_dst) { @@ -3241,6 +3406,18 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog) return -ELOOP; } + if (is_mbpf_pseudo_call(meta) && + nfp_prog->subprog[jmp_dst->subprog_idx].needs_reg_push) { + err = nfp_fixup_immed_relo(nfp_prog, meta, + jmp_dst, br_idx); + if (err) + return err; + } + + if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != + RELO_BR_REL) + continue; + for (idx = meta->off; idx <= br_idx; idx++) { if (!nfp_is_br(nfp_prog->prog[idx])) continue; @@ -3258,6 +3435,27 @@ static void nfp_intro(struct nfp_prog *nfp_prog) plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); } +static void +nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + /* Save return address into the stack. */ + wrp_mov(nfp_prog, reg_lm(0, 0), ret_reg(nfp_prog)); +} + +static void +nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + unsigned int depth = nfp_prog->subprog[meta->subprog_idx].stack_depth; + + nfp_prog->stack_frame_depth = round_up(depth, 4); + nfp_subprog_prologue(nfp_prog, meta); +} + +bool nfp_is_subprog_start(struct nfp_insn_meta *meta) +{ + return meta->flags & FLAG_INSN_IS_SUBPROG_START; +} + static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) { /* TC direct-action mode: @@ -3348,6 +3546,67 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog) emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); } +static bool nfp_prog_needs_callee_reg_save(struct nfp_prog *nfp_prog) +{ + unsigned int idx; + + for (idx = 1; idx < nfp_prog->subprog_cnt; idx++) + if (nfp_prog->subprog[idx].needs_reg_push) + return true; + + return false; +} + +static void nfp_push_callee_registers(struct nfp_prog *nfp_prog) +{ + u8 reg; + + /* Subroutine: Save all callee saved registers (R6 ~ R9). + * imm_b() holds the return address. + */ + nfp_prog->tgt_call_push_regs = nfp_prog_current_offset(nfp_prog); + for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { + u8 adj = (reg - BPF_REG_0) * 2; + u8 idx = (reg - BPF_REG_6) * 2; + + /* The first slot in the stack frame is used to push the return + * address in bpf_to_bpf_call(), start just after. + */ + wrp_mov(nfp_prog, reg_lm(0, 1 + idx), reg_b(adj)); + + if (reg == BPF_REG_8) + /* Prepare to jump back, last 3 insns use defer slots */ + emit_rtn(nfp_prog, imm_b(nfp_prog), 3); + + wrp_mov(nfp_prog, reg_lm(0, 1 + idx + 1), reg_b(adj + 1)); + } +} + +static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog) +{ + u8 reg; + + /* Subroutine: Restore all callee saved registers (R6 ~ R9). + * ret_reg() holds the return address. + */ + nfp_prog->tgt_call_pop_regs = nfp_prog_current_offset(nfp_prog); + for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { + u8 adj = (reg - BPF_REG_0) * 2; + u8 idx = (reg - BPF_REG_6) * 2; + + /* The first slot in the stack frame holds the return address, + * start popping just after that. + */ + wrp_mov(nfp_prog, reg_both(adj), reg_lm(0, 1 + idx)); + + if (reg == BPF_REG_8) + /* Prepare to jump back, last 3 insns use defer slots */ + emit_rtn(nfp_prog, ret_reg(nfp_prog), 3); + + wrp_mov(nfp_prog, reg_both(adj + 1), reg_lm(0, 1 + idx + 1)); + } +} + static void nfp_outro(struct nfp_prog *nfp_prog) { switch (nfp_prog->type) { @@ -3360,13 +3619,23 @@ static void nfp_outro(struct nfp_prog *nfp_prog) default: WARN_ON(1); } + + if (!nfp_prog_needs_callee_reg_save(nfp_prog)) + return; + + nfp_push_callee_registers(nfp_prog); + nfp_pop_callee_registers(nfp_prog); } static int nfp_translate(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta; + unsigned int depth; int err; + depth = nfp_prog->subprog[0].stack_depth; + nfp_prog->stack_frame_depth = round_up(depth, 4); + nfp_intro(nfp_prog); if (nfp_prog->error) return nfp_prog->error; @@ -3376,6 +3645,12 @@ static int nfp_translate(struct nfp_prog *nfp_prog) meta->off = nfp_prog_current_offset(nfp_prog); + if (nfp_is_subprog_start(meta)) { + nfp_start_subprog(nfp_prog, meta); + if (nfp_prog->error) + return nfp_prog->error; + } + if (meta->skip) { nfp_prog->n_translated++; continue; @@ -4018,20 +4293,35 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) /* Another pass to record jump information. */ list_for_each_entry(meta, &nfp_prog->insns, l) { + struct nfp_insn_meta *dst_meta; u64 code = meta->insn.code; + unsigned int dst_idx; + bool pseudo_call; + + if (BPF_CLASS(code) != BPF_JMP) + continue; + if (BPF_OP(code) == BPF_EXIT) + continue; + if (is_mbpf_helper_call(meta)) + continue; - if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT && - BPF_OP(code) != BPF_CALL) { - struct nfp_insn_meta *dst_meta; - unsigned short dst_indx; + /* If opcode is BPF_CALL at this point, this can only be a + * BPF-to-BPF call (a.k.a pseudo call). + */ + pseudo_call = BPF_OP(code) == BPF_CALL; - dst_indx = meta->n + 1 + meta->insn.off; - dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx, - cnt); + if (pseudo_call) + dst_idx = meta->n + 1 + meta->insn.imm; + else + dst_idx = meta->n + 1 + meta->insn.off; - meta->jmp_dst = dst_meta; - dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; - } + dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx, cnt); + + if (pseudo_call) + dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START; + + dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; + meta->jmp_dst = dst_meta; } } @@ -4054,6 +4344,7 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) for (i = 0; i < nfp_prog->prog_len; i++) { enum nfp_relo_type special; u32 val; + u16 off; special = FIELD_GET(OP_RELO_TYPE, prog[i]); switch (special) { @@ -4070,6 +4361,24 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) br_set_offset(&prog[i], nfp_prog->tgt_abort + bv->start_off); break; + case RELO_BR_GO_CALL_PUSH_REGS: + if (!nfp_prog->tgt_call_push_regs) { + pr_err("BUG: failed to detect subprogram registers needs\n"); + err = -EINVAL; + goto err_free_prog; + } + off = nfp_prog->tgt_call_push_regs + bv->start_off; + br_set_offset(&prog[i], off); + break; + case RELO_BR_GO_CALL_POP_REGS: + if (!nfp_prog->tgt_call_pop_regs) { + pr_err("BUG: failed to detect subprogram registers needs\n"); + err = -EINVAL; + goto err_free_prog; + } + off = nfp_prog->tgt_call_pop_regs + bv->start_off; + br_set_offset(&prog[i], off); + break; case RELO_BR_NEXT_PKT: br_set_offset(&prog[i], bv->tgt_done); break; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 970af07f4656..6243af0ab025 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <net/pkt_cls.h> @@ -54,11 +24,14 @@ const struct rhashtable_params nfp_bpf_maps_neutral_params = { static bool nfp_net_ebpf_capable(struct nfp_net *nn) { #ifdef __LITTLE_ENDIAN - if (nn->cap & NFP_NET_CFG_CTRL_BPF && - nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI) - return true; -#endif + struct nfp_app_bpf *bpf = nn->app->priv; + + return nn->cap & NFP_NET_CFG_CTRL_BPF && + bpf->abi_version && + nn_readb(nn, NFP_NET_CFG_BPF_ABI) == bpf->abi_version; +#else return false; +#endif } static int @@ -342,6 +315,26 @@ nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value, return 0; } +static int +nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value, + u32 length) +{ + if (length < 4) { + nfp_err(bpf->app->cpp, "truncated ABI version TLV: %d\n", + length); + return -EINVAL; + } + + bpf->abi_version = readl(value); + if (bpf->abi_version < 2 || bpf->abi_version > 3) { + nfp_warn(bpf->app->cpp, "unsupported BPF ABI version: %d\n", + bpf->abi_version); + bpf->abi_version = 0; + } + + return 0; +} + static int nfp_bpf_parse_capabilities(struct nfp_app *app) { struct nfp_cpp *cpp = app->pf->cpp; @@ -393,6 +386,11 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app) length)) goto err_release_free; break; + case NFP_BPF_CAP_TYPE_ABI_VERSION: + if (nfp_bpf_parse_cap_abi_version(app->priv, value, + length)) + goto err_release_free; + break; default: nfp_dbg(cpp, "unknown BPF capability: %d\n", type); break; @@ -414,6 +412,11 @@ err_release_free: return -EINVAL; } +static void nfp_bpf_init_capabilities(struct nfp_app_bpf *bpf) +{ + bpf->abi_version = 2; /* Original BPF ABI version */ +} + static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev) { struct nfp_app_bpf *bpf = app->priv; @@ -447,10 +450,21 @@ static int nfp_bpf_init(struct nfp_app *app) if (err) goto err_free_bpf; + nfp_bpf_init_capabilities(bpf); + err = nfp_bpf_parse_capabilities(app); if (err) goto err_free_neutral_maps; + if (bpf->abi_version < 3) { + bpf->cmsg_key_sz = CMSG_MAP_KEY_LW * 4; + bpf->cmsg_val_sz = CMSG_MAP_VALUE_LW * 4; + } else { + bpf->cmsg_key_sz = bpf->maps.max_key_sz; + bpf->cmsg_val_sz = bpf->maps.max_val_sz; + app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf); + } + bpf->bpf_dev = bpf_offload_dev_create(); err = PTR_ERR_OR_ZERO(bpf->bpf_dev); if (err) @@ -465,11 +479,6 @@ err_free_bpf: return err; } -static void nfp_check_rhashtable_empty(void *ptr, void *arg) -{ - WARN_ON_ONCE(1); -} - static void nfp_bpf_clean(struct nfp_app *app) { struct nfp_app_bpf *bpf = app->priv; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index dbd00982fd2b..52457ae3b259 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2016-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #ifndef __NFP_BPF_H__ #define __NFP_BPF_H__ 1 @@ -61,6 +31,8 @@ enum nfp_relo_type { /* internal jumps to parts of the outro */ RELO_BR_GO_OUT, RELO_BR_GO_ABORT, + RELO_BR_GO_CALL_PUSH_REGS, + RELO_BR_GO_CALL_POP_REGS, /* external jumps to fixed addresses */ RELO_BR_NEXT_PKT, RELO_BR_HELPER, @@ -104,6 +76,7 @@ enum pkt_vec { #define imma_a(np) reg_a(STATIC_REG_IMMA) #define imma_b(np) reg_b(STATIC_REG_IMMA) #define imm_both(np) reg_both(STATIC_REG_IMM) +#define ret_reg(np) imm_a(np) #define NFP_BPF_ABI_FLAGS reg_imm(0) #define NFP_BPF_ABI_FLAG_MARK 1 @@ -121,12 +94,17 @@ enum pkt_vec { * @cmsg_replies: received cmsg replies waiting to be consumed * @cmsg_wq: work queue for waiting for cmsg replies * + * @cmsg_key_sz: size of key in cmsg element array + * @cmsg_val_sz: size of value in cmsg element array + * * @map_list: list of offloaded maps * @maps_in_use: number of currently offloaded maps * @map_elems_in_use: number of elements allocated to offloaded maps * * @maps_neutral: hash table of offload-neutral maps (on pointer) * + * @abi_version: global BPF ABI version + * * @adjust_head: adjust head capability * @adjust_head.flags: extra flags for adjust head * @adjust_head.off_min: minimal packet offset within buffer required @@ -164,12 +142,17 @@ struct nfp_app_bpf { struct sk_buff_head cmsg_replies; struct wait_queue_head cmsg_wq; + unsigned int cmsg_key_sz; + unsigned int cmsg_val_sz; + struct list_head map_list; unsigned int maps_in_use; unsigned int map_elems_in_use; struct rhashtable maps_neutral; + u32 abi_version; + struct nfp_bpf_cap_adjust_head { u32 flags; int off_min; @@ -252,7 +235,9 @@ struct nfp_bpf_reg_state { bool var_off; }; -#define FLAG_INSN_IS_JUMP_DST BIT(0) +#define FLAG_INSN_IS_JUMP_DST BIT(0) +#define FLAG_INSN_IS_SUBPROG_START BIT(1) +#define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2) /** * struct nfp_insn_meta - BPF instruction wrapper @@ -269,6 +254,7 @@ struct nfp_bpf_reg_state { * @xadd_maybe_16bit: 16bit immediate is possible * @jmp_dst: destination info for jump instructions * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB + * @num_insns_after_br: number of insns following a branch jump, used for fixup * @func_id: function id for call instructions * @arg1: arg1 for call instructions * @arg2: arg2 for call instructions @@ -279,6 +265,7 @@ struct nfp_bpf_reg_state { * @off: index of first generated machine instruction (in nfp_prog.prog) * @n: eBPF instruction number * @flags: eBPF instruction extra optimization flags + * @subprog_idx: index of subprogram to which the instruction belongs * @skip: skip this instruction (optimized out) * @double_cb: callback for second part of the instruction * @l: link on nfp_prog->insns list @@ -304,6 +291,7 @@ struct nfp_insn_meta { struct { struct nfp_insn_meta *jmp_dst; bool jump_neg_op; + u32 num_insns_after_br; /* only for BPF-to-BPF calls */ }; /* function calls */ struct { @@ -325,6 +313,7 @@ struct nfp_insn_meta { unsigned int off; unsigned short n; unsigned short flags; + unsigned short subprog_idx; bool skip; instr_cb_t double_cb; @@ -413,6 +402,34 @@ static inline bool is_mbpf_div(const struct nfp_insn_meta *meta) return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV; } +static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta) +{ + struct bpf_insn insn = meta->insn; + + return insn.code == (BPF_JMP | BPF_CALL) && + insn.src_reg != BPF_PSEUDO_CALL; +} + +static inline bool is_mbpf_pseudo_call(const struct nfp_insn_meta *meta) +{ + struct bpf_insn insn = meta->insn; + + return insn.code == (BPF_JMP | BPF_CALL) && + insn.src_reg == BPF_PSEUDO_CALL; +} + +#define STACK_FRAME_ALIGN 64 + +/** + * struct nfp_bpf_subprog_info - nfp BPF sub-program (a.k.a. function) info + * @stack_depth: maximum stack depth used by this sub-program + * @needs_reg_push: whether sub-program uses callee-saved registers + */ +struct nfp_bpf_subprog_info { + u16 stack_depth; + u8 needs_reg_push : 1; +}; + /** * struct nfp_prog - nfp BPF program * @bpf: backpointer to the bpf app priv structure @@ -424,12 +441,16 @@ static inline bool is_mbpf_div(const struct nfp_insn_meta *meta) * @last_bpf_off: address of the last instruction translated from BPF * @tgt_out: jump target for normal exit * @tgt_abort: jump target for abort (e.g. access outside of packet buffer) + * @tgt_call_push_regs: jump target for subroutine for saving R6~R9 to stack + * @tgt_call_pop_regs: jump target for subroutine used for restoring R6~R9 * @n_translated: number of successfully translated instructions (for errors) * @error: error code if something went wrong - * @stack_depth: max stack depth from the verifier + * @stack_frame_depth: max stack depth for current frame * @adjust_head_location: if program has single adjust head call - the insn no. * @map_records_cnt: the number of map pointers recorded for this prog + * @subprog_cnt: number of sub-programs, including main function * @map_records: the map record pointers from bpf->maps_neutral + * @subprog: pointer to an array of objects holding info about sub-programs * @insns: list of BPF instruction wrappers (struct nfp_insn_meta) */ struct nfp_prog { @@ -446,15 +467,19 @@ struct nfp_prog { unsigned int last_bpf_off; unsigned int tgt_out; unsigned int tgt_abort; + unsigned int tgt_call_push_regs; + unsigned int tgt_call_pop_regs; unsigned int n_translated; int error; - unsigned int stack_depth; + unsigned int stack_frame_depth; unsigned int adjust_head_location; unsigned int map_records_cnt; + unsigned int subprog_cnt; struct nfp_bpf_neutral_map **map_records; + struct nfp_bpf_subprog_info *subprog; struct list_head insns; }; @@ -471,6 +496,7 @@ struct nfp_bpf_vnic { unsigned int tgt_done; }; +bool nfp_is_subprog_start(struct nfp_insn_meta *meta); void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt); int nfp_bpf_jit(struct nfp_prog *prog); bool nfp_bpf_supported_opcode(u8 code); @@ -492,6 +518,7 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); +unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf); long long int nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map); void diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 1ccd6371a15b..927e038d9f77 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2016-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ /* * nfp_net_offload.c @@ -208,6 +178,8 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta, *tmp; + kfree(nfp_prog->subprog); + list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) { list_del(&meta->l); kfree(meta); @@ -250,18 +222,9 @@ err_free: static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog) { struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; - unsigned int stack_size; unsigned int max_instr; int err; - stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; - if (prog->aux->stack_depth > stack_size) { - nn_info(nn, "stack too large: program %dB > FW stack %dB\n", - prog->aux->stack_depth, stack_size); - return -EOPNOTSUPP; - } - nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4); - max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); nfp_prog->__prog_alloc_len = max_instr * sizeof(u64); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index a6e9248669e1..193dd685b365 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -1,43 +1,15 @@ -/* - * Copyright (C) 2016-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #include <linux/bpf.h> #include <linux/bpf_verifier.h> #include <linux/kernel.h> +#include <linux/netdevice.h> #include <linux/pkt_cls.h> #include "../nfp_app.h" #include "../nfp_main.h" +#include "../nfp_net.h" #include "fw.h" #include "main.h" @@ -155,8 +127,9 @@ nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env, } static int -nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, - struct nfp_insn_meta *meta) +nfp_bpf_check_helper_call(struct nfp_prog *nfp_prog, + struct bpf_verifier_env *env, + struct nfp_insn_meta *meta) { const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1; const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2; @@ -333,6 +306,9 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog, { s32 old_off, new_off; + if (reg->frameno != env->cur_state->curframe) + meta->flags |= FLAG_INSN_PTR_CALLER_STACK_FRAME; + if (!tnum_is_const(reg->var_off)) { pr_vlog(env, "variable ptr stack access\n"); return -EINVAL; @@ -620,8 +596,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) return -EINVAL; } - if (meta->insn.code == (BPF_JMP | BPF_CALL)) - return nfp_bpf_check_call(nfp_prog, env, meta); + if (is_mbpf_helper_call(meta)) + return nfp_bpf_check_helper_call(nfp_prog, env, meta); if (meta->insn.code == (BPF_JMP | BPF_EXIT)) return nfp_bpf_check_exit(nfp_prog, env); @@ -640,6 +616,131 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) return 0; } +static int +nfp_assign_subprog_idx_and_regs(struct bpf_verifier_env *env, + struct nfp_prog *nfp_prog) +{ + struct nfp_insn_meta *meta; + int index = 0; + + list_for_each_entry(meta, &nfp_prog->insns, l) { + if (nfp_is_subprog_start(meta)) + index++; + meta->subprog_idx = index; + + if (meta->insn.dst_reg >= BPF_REG_6 && + meta->insn.dst_reg <= BPF_REG_9) + nfp_prog->subprog[index].needs_reg_push = 1; + } + + if (index + 1 != nfp_prog->subprog_cnt) { + pr_vlog(env, "BUG: number of processed BPF functions is not consistent (processed %d, expected %d)\n", + index + 1, nfp_prog->subprog_cnt); + return -EFAULT; + } + + return 0; +} + +static unsigned int +nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog, unsigned int cnt) +{ + struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog); + unsigned int max_depth = 0, depth = 0, frame = 0; + struct nfp_insn_meta *ret_insn[MAX_CALL_FRAMES]; + unsigned short frame_depths[MAX_CALL_FRAMES]; + unsigned short ret_prog[MAX_CALL_FRAMES]; + unsigned short idx = meta->subprog_idx; + + /* Inspired from check_max_stack_depth() from kernel verifier. + * Starting from main subprogram, walk all instructions and recursively + * walk all callees that given subprogram can call. Since recursion is + * prevented by the kernel verifier, this algorithm only needs a local + * stack of MAX_CALL_FRAMES to remember callsites. + */ +process_subprog: + frame_depths[frame] = nfp_prog->subprog[idx].stack_depth; + frame_depths[frame] = round_up(frame_depths[frame], STACK_FRAME_ALIGN); + depth += frame_depths[frame]; + max_depth = max(max_depth, depth); + +continue_subprog: + for (; meta != nfp_prog_last_meta(nfp_prog) && meta->subprog_idx == idx; + meta = nfp_meta_next(meta)) { + if (!is_mbpf_pseudo_call(meta)) + continue; + + /* We found a call to a subprogram. Remember instruction to + * return to and subprog id. + */ + ret_insn[frame] = nfp_meta_next(meta); + ret_prog[frame] = idx; + + /* Find the callee and start processing it. */ + meta = nfp_bpf_goto_meta(nfp_prog, meta, + meta->n + 1 + meta->insn.imm, cnt); + idx = meta->subprog_idx; + frame++; + goto process_subprog; + } + /* End of for() loop means the last instruction of the subprog was + * reached. If we popped all stack frames, return; otherwise, go on + * processing remaining instructions from the caller. + */ + if (frame == 0) + return max_depth; + + depth -= frame_depths[frame]; + frame--; + meta = ret_insn[frame]; + idx = ret_prog[frame]; + goto continue_subprog; +} + +static int nfp_bpf_finalize(struct bpf_verifier_env *env) +{ + unsigned int stack_size, stack_needed; + struct bpf_subprog_info *info; + struct nfp_prog *nfp_prog; + struct nfp_net *nn; + int i; + + nfp_prog = env->prog->aux->offload->dev_priv; + nfp_prog->subprog_cnt = env->subprog_cnt; + nfp_prog->subprog = kcalloc(nfp_prog->subprog_cnt, + sizeof(nfp_prog->subprog[0]), GFP_KERNEL); + if (!nfp_prog->subprog) + return -ENOMEM; + + nfp_assign_subprog_idx_and_regs(env, nfp_prog); + + info = env->subprog_info; + for (i = 0; i < nfp_prog->subprog_cnt; i++) { + nfp_prog->subprog[i].stack_depth = info[i].stack_depth; + + if (i == 0) + continue; + + /* Account for size of return address. */ + nfp_prog->subprog[i].stack_depth += REG_WIDTH; + /* Account for size of saved registers, if necessary. */ + if (nfp_prog->subprog[i].needs_reg_push) + nfp_prog->subprog[i].stack_depth += BPF_REG_SIZE * 4; + } + + nn = netdev_priv(env->prog->aux->offload->netdev); + stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; + stack_needed = nfp_bpf_get_stack_usage(nfp_prog, env->prog->len); + if (stack_needed > stack_size) { + pr_vlog(env, "stack too large: program %dB > FW stack %dB\n", + stack_needed, stack_size); + return -EOPNOTSUPP; + } + + return 0; +} + const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = { - .insn_hook = nfp_verify_insn, + .insn_hook = nfp_verify_insn, + .finalize = nfp_bpf_finalize, }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 46ba0cf257c6..f2b1938236fe 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <net/geneve.h> diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index cb8565222621..4c5eaf36d5bb 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <linux/netdevice.h> diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 325954b829c8..29d673aa5277 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef NFP_FLOWER_CMSG_H #define NFP_FLOWER_CMSG_H diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c index bf10598f66ae..81dcf5b318ba 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ #include "main.h" diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index e57d23746585..3a54728d2ea6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/etherdevice.h> #include <linux/lockdep.h> @@ -518,8 +488,8 @@ err_clear_nn: static int nfp_flower_init(struct nfp_app *app) { const struct nfp_pf *pf = app->pf; + u64 version, features, ctx_count; struct nfp_flower_priv *app_priv; - u64 version, features; int err; if (!pf->eth_tbl) { @@ -543,6 +513,16 @@ static int nfp_flower_init(struct nfp_app *app) return err; } + ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT", + &err); + if (err) { + nfp_warn(app->cpp, + "FlowerNIC: unsupported host context count: %d\n", + err); + err = 0; + ctx_count = BIT(17); + } + /* We need to ensure hardware has enough flower capabilities. */ if (version != NFP_FLOWER_ALLOWED_VER) { nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n"); @@ -553,6 +533,7 @@ static int nfp_flower_init(struct nfp_app *app) if (!app_priv) return -ENOMEM; + app_priv->stats_ring_size = roundup_pow_of_two(ctx_count); app->priv = app_priv; app_priv->app = app; skb_queue_head_init(&app_priv->cmsg_skbs_high); @@ -563,7 +544,7 @@ static int nfp_flower_init(struct nfp_app *app) init_waitqueue_head(&app_priv->mtu_conf.wait_q); spin_lock_init(&app_priv->mtu_conf.lock); - err = nfp_flower_metadata_init(app); + err = nfp_flower_metadata_init(app, ctx_count); if (err) goto err_free_app_priv; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 81d941ab895c..90045bab95bf 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef __NFP_FLOWER_H__ #define __NFP_FLOWER_H__ 1 @@ -38,6 +8,7 @@ #include <linux/circ_buf.h> #include <linux/hashtable.h> +#include <linux/rhashtable.h> #include <linux/time64.h> #include <linux/types.h> #include <net/pkt_cls.h> @@ -50,10 +21,8 @@ struct net_device; struct nfp_app; #define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff) -#define NFP_FL_STATS_ENTRY_RS BIT(20) -#define NFP_FL_STATS_ELEM_RS 4 -#define NFP_FL_REPEATED_HASH_MAX BIT(17) -#define NFP_FLOWER_HASH_BITS 19 +#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \ + init_unalloc) #define NFP_FLOWER_MASK_ENTRY_RS 256 #define NFP_FLOWER_MASK_ELEMENT_RS 1 #define NFP_FLOWER_MASK_HASH_BITS 10 @@ -138,7 +107,10 @@ struct nfp_fl_lag { * @stats_ids: List of free stats ids * @mask_ids: List of free mask ids * @mask_table: Hash table used to store masks + * @stats_ring_size: Maximum number of allowed stats ids * @flow_table: Hash table used to store flower rules + * @stats: Stored stats updates for flower rules + * @stats_lock: Lock for flower rule stats updates * @cmsg_work: Workqueue for control messages processing * @cmsg_skbs_high: List of higher priority skbs for control message * processing @@ -171,7 +143,10 @@ struct nfp_flower_priv { struct nfp_fl_stats_id stats_ids; struct nfp_fl_mask_id mask_ids; DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); - DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); + u32 stats_ring_size; + struct rhashtable flow_table; + struct nfp_fl_stats *stats; + spinlock_t stats_lock; /* lock stats */ struct work_struct cmsg_work; struct sk_buff_head cmsg_skbs_high; struct sk_buff_head cmsg_skbs_low; @@ -227,10 +202,8 @@ struct nfp_fl_stats { struct nfp_fl_payload { struct nfp_fl_rule_metadata meta; unsigned long tc_flower_cookie; - struct hlist_node link; + struct rhash_head fl_node; struct rcu_head rcu; - spinlock_t lock; /* lock stats */ - struct nfp_fl_stats stats; __be32 nfp_tun_ipv4_addr; struct net_device *ingress_dev; char *unmasked_data; @@ -239,6 +212,8 @@ struct nfp_fl_payload { bool ingress_offload; }; +extern const struct rhashtable_params nfp_flower_table_params; + struct nfp_fl_stats_frame { __be32 stats_con_id; __be32 pkt_count; @@ -246,7 +221,7 @@ struct nfp_fl_stats_frame { __be64 stats_cookie; }; -int nfp_flower_metadata_init(struct nfp_app *app); +int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count); void nfp_flower_metadata_cleanup(struct nfp_app *app); int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 17acb8cc6044..e54fb6034326 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <net/pkt_cls.h> diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index c098730544b7..48729bf171e0 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/hash.h> #include <linux/hashtable.h> @@ -48,6 +18,12 @@ struct nfp_mask_id_table { u8 mask_id; }; +struct nfp_fl_flow_table_cmp_arg { + struct net_device *netdev; + unsigned long cookie; + __be32 host_ctx; +}; + static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id) { struct nfp_flower_priv *priv = app->priv; @@ -55,14 +31,14 @@ static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id) ring = &priv->stats_ids.free_list; /* Check if buffer is full. */ - if (!CIRC_SPACE(ring->head, ring->tail, NFP_FL_STATS_ENTRY_RS * - NFP_FL_STATS_ELEM_RS - + if (!CIRC_SPACE(ring->head, ring->tail, + priv->stats_ring_size * NFP_FL_STATS_ELEM_RS - NFP_FL_STATS_ELEM_RS + 1)) return -ENOBUFS; memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS); ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) % - (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS); + (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS); return 0; } @@ -74,7 +50,7 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) struct circ_buf *ring; ring = &priv->stats_ids.free_list; - freed_stats_id = NFP_FL_STATS_ENTRY_RS; + freed_stats_id = priv->stats_ring_size; /* Check for unallocated entries first. */ if (priv->stats_ids.init_unalloc > 0) { *stats_context_id = priv->stats_ids.init_unalloc - 1; @@ -92,7 +68,7 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) *stats_context_id = temp_stats_id; memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS); ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) % - (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS); + (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS); return 0; } @@ -102,56 +78,37 @@ struct nfp_fl_payload * nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, struct net_device *netdev, __be32 host_ctx) { + struct nfp_fl_flow_table_cmp_arg flower_cmp_arg; struct nfp_flower_priv *priv = app->priv; - struct nfp_fl_payload *flower_entry; - hash_for_each_possible_rcu(priv->flow_table, flower_entry, link, - tc_flower_cookie) - if (flower_entry->tc_flower_cookie == tc_flower_cookie && - (!netdev || flower_entry->ingress_dev == netdev) && - (host_ctx == NFP_FL_STATS_CTX_DONT_CARE || - flower_entry->meta.host_ctx_id == host_ctx)) - return flower_entry; + flower_cmp_arg.netdev = netdev; + flower_cmp_arg.cookie = tc_flower_cookie; + flower_cmp_arg.host_ctx = host_ctx; - return NULL; -} - -static void -nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats) -{ - struct nfp_fl_payload *nfp_flow; - unsigned long flower_cookie; - - flower_cookie = be64_to_cpu(stats->stats_cookie); - - rcu_read_lock(); - nfp_flow = nfp_flower_search_fl_table(app, flower_cookie, NULL, - stats->stats_con_id); - if (!nfp_flow) - goto exit_rcu_unlock; - - spin_lock(&nfp_flow->lock); - nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count); - nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count); - nfp_flow->stats.used = jiffies; - spin_unlock(&nfp_flow->lock); - -exit_rcu_unlock: - rcu_read_unlock(); + return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg, + nfp_flower_table_params); } void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb) { unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb); - struct nfp_fl_stats_frame *stats_frame; + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_stats_frame *stats; unsigned char *msg; + u32 ctx_id; int i; msg = nfp_flower_cmsg_get_data(skb); - stats_frame = (struct nfp_fl_stats_frame *)msg; - for (i = 0; i < msg_len / sizeof(*stats_frame); i++) - nfp_flower_update_stats(app, stats_frame + i); + spin_lock(&priv->stats_lock); + for (i = 0; i < msg_len / sizeof(*stats); i++) { + stats = (struct nfp_fl_stats_frame *)msg + i; + ctx_id = be32_to_cpu(stats->stats_con_id); + priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count); + priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count); + priv->stats[ctx_id].used = jiffies; + } + spin_unlock(&priv->stats_lock); } static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) @@ -345,9 +302,9 @@ int nfp_compile_flow_metadata(struct nfp_app *app, /* Update flow payload with mask ids. */ nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; - nfp_flow->stats.pkts = 0; - nfp_flow->stats.bytes = 0; - nfp_flow->stats.used = jiffies; + priv->stats[stats_cxt].pkts = 0; + priv->stats[stats_cxt].bytes = 0; + priv->stats[stats_cxt].used = jiffies; check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev, NFP_FL_STATS_CTX_DONT_CARE); @@ -389,12 +346,56 @@ int nfp_modify_flow_metadata(struct nfp_app *app, return nfp_release_stats_entry(app, temp_ctx_id); } -int nfp_flower_metadata_init(struct nfp_app *app) +static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg, + const void *obj) +{ + const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key; + const struct nfp_fl_payload *flow_entry = obj; + + if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) && + (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE || + flow_entry->meta.host_ctx_id == cmp_arg->host_ctx)) + return flow_entry->tc_flower_cookie != cmp_arg->cookie; + + return 1; +} + +static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed) +{ + const struct nfp_fl_payload *flower_entry = data; + + return jhash2((u32 *)&flower_entry->tc_flower_cookie, + sizeof(flower_entry->tc_flower_cookie) / sizeof(u32), + seed); +} + +static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed) +{ + const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data; + + return jhash2((u32 *)&cmp_arg->cookie, + sizeof(cmp_arg->cookie) / sizeof(u32), seed); +} + +const struct rhashtable_params nfp_flower_table_params = { + .head_offset = offsetof(struct nfp_fl_payload, fl_node), + .hashfn = nfp_fl_key_hashfn, + .obj_cmpfn = nfp_fl_obj_cmpfn, + .obj_hashfn = nfp_fl_obj_hashfn, + .automatic_shrinking = true, +}; + +int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count) { struct nfp_flower_priv *priv = app->priv; + int err; hash_init(priv->mask_table); - hash_init(priv->flow_table); + + err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params); + if (err) + return err; + get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed)); /* Init ring buffer and unallocated mask_ids. */ @@ -402,7 +403,7 @@ int nfp_flower_metadata_init(struct nfp_app *app) kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL); if (!priv->mask_ids.mask_id_free_list.buf) - return -ENOMEM; + goto err_free_flow_table; priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1; @@ -416,18 +417,29 @@ int nfp_flower_metadata_init(struct nfp_app *app) /* Init ring buffer and unallocated stats_ids. */ priv->stats_ids.free_list.buf = vmalloc(array_size(NFP_FL_STATS_ELEM_RS, - NFP_FL_STATS_ENTRY_RS)); + priv->stats_ring_size)); if (!priv->stats_ids.free_list.buf) goto err_free_last_used; - priv->stats_ids.init_unalloc = NFP_FL_REPEATED_HASH_MAX; + priv->stats_ids.init_unalloc = host_ctx_count; + + priv->stats = kvmalloc_array(priv->stats_ring_size, + sizeof(struct nfp_fl_stats), GFP_KERNEL); + if (!priv->stats) + goto err_free_ring_buf; + + spin_lock_init(&priv->stats_lock); return 0; +err_free_ring_buf: + vfree(priv->stats_ids.free_list.buf); err_free_last_used: kfree(priv->mask_ids.last_used); err_free_mask_id: kfree(priv->mask_ids.mask_id_free_list.buf); +err_free_flow_table: + rhashtable_destroy(&priv->flow_table); return -ENOMEM; } @@ -438,6 +450,9 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app) if (!priv) return; + rhashtable_free_and_destroy(&priv->flow_table, + nfp_check_rhashtable_empty, NULL); + kvfree(priv->stats); kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.last_used); vfree(priv->stats_ids.free_list.buf); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index bd19624f10cf..29c95423ab64 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/skbuff.h> #include <net/devlink.h> @@ -428,8 +398,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress) flow_pay->nfp_tun_ipv4_addr = 0; flow_pay->meta.flags = 0; - spin_lock_init(&flow_pay->lock); - flow_pay->ingress_offload = !egress; return flow_pay; @@ -513,9 +481,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_destroy_flow; - INIT_HLIST_NODE(&flow_pay->link); flow_pay->tc_flower_cookie = flow->cookie; - hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie); + err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, + nfp_flower_table_params); + if (err) + goto err_destroy_flow; + port->tc_offload_cnt++; /* Deallocate flow payload when flower rule has been destroyed. */ @@ -550,6 +521,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, struct tc_cls_flower_offload *flow, bool egress) { struct nfp_port *port = nfp_port_from_netdev(netdev); + struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *nfp_flow; struct net_device *ingr_dev; int err; @@ -573,11 +545,13 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, goto err_free_flow; err_free_flow: - hash_del_rcu(&nfp_flow->link); port->tc_offload_cnt--; kfree(nfp_flow->action_data); kfree(nfp_flow->mask_data); kfree(nfp_flow->unmasked_data); + WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, + &nfp_flow->fl_node, + nfp_flower_table_params)); kfree_rcu(nfp_flow, rcu); return err; } @@ -598,8 +572,10 @@ static int nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, struct tc_cls_flower_offload *flow, bool egress) { + struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *nfp_flow; struct net_device *ingr_dev; + u32 ctx_id; ingr_dev = egress ? NULL : netdev; nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, @@ -610,13 +586,16 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, if (nfp_flow->ingress_offload && egress) return 0; - spin_lock_bh(&nfp_flow->lock); - tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes, - nfp_flow->stats.pkts, nfp_flow->stats.used); + ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); + + spin_lock_bh(&priv->stats_lock); + tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes, + priv->stats[ctx_id].pkts, + priv->stats[ctx_id].used); - nfp_flow->stats.pkts = 0; - nfp_flow->stats.bytes = 0; - spin_unlock_bh(&nfp_flow->lock); + priv->stats[ctx_id].pkts = 0; + priv->stats[ctx_id].bytes = 0; + spin_unlock_bh(&priv->stats_lock); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 382bb93cb090..30c926c4bc47 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/etherdevice.h> #include <linux/inetdevice.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_abi.h b/drivers/net/ethernet/netronome/nfp/nfp_abi.h index 8b56c27931bf..dd359a44adfb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_abi.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_abi.h @@ -1,36 +1,5 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2018 Netronome Systems, Inc. */ #ifndef __NFP_ABI__ #define __NFP_ABI__ 1 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index 8607d09ab732..68a0991aac22 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bug.h> #include <linux/lockdep.h> @@ -60,6 +30,11 @@ static const struct nfp_app_type *apps[] = { #endif }; +void nfp_check_rhashtable_empty(void *ptr, void *arg) +{ + WARN_ON_ONCE(1); +} + struct nfp_app *nfp_app_from_netdev(struct net_device *netdev) { if (nfp_netdev_is_nfp_net(netdev)) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 4e1eb3395648..4d6ecf99b1cc 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef _NFP_APP_H #define _NFP_APP_H 1 @@ -40,6 +10,8 @@ #include "nfp_net_repr.h" +#define NFP_APP_CTRL_MTU_MAX U32_MAX + struct bpf_prog; struct net_device; struct netdev_bpf; @@ -178,6 +150,7 @@ struct nfp_app_type { * @ctrl: pointer to ctrl vNIC struct * @reprs: array of pointers to representors * @type: pointer to const application ops and info + * @ctrl_mtu: MTU to set on the control vNIC (set in .init()) * @priv: app-specific priv data */ struct nfp_app { @@ -189,9 +162,11 @@ struct nfp_app { struct nfp_reprs __rcu *reprs[NFP_REPR_TYPE_MAX + 1]; const struct nfp_app_type *type; + unsigned int ctrl_mtu; void *priv; }; +void nfp_check_rhashtable_empty(void *ptr, void *arg); bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c index e2dfe4f168bb..f119277fd66c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include "nfpcore/nfp_cpp.h" #include "nfpcore/nfp_nsp.h" diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c index cc6ace2be8a9..b04b83687fe2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2016-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #include <linux/bitops.h> #include <linux/errno.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index fad0e62a910c..648c2810e5ba 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2016-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #ifndef __NFP_ASM_H__ #define __NFP_ASM_H__ 1 @@ -82,6 +52,15 @@ #define OP_BR_BIT_ADDR_LO OP_BR_ADDR_LO #define OP_BR_BIT_ADDR_HI OP_BR_ADDR_HI +#define OP_BR_ALU_BASE 0x0e800000000ULL +#define OP_BR_ALU_BASE_MASK 0x0ff80000000ULL +#define OP_BR_ALU_A_SRC 0x000000003ffULL +#define OP_BR_ALU_B_SRC 0x000000ffc00ULL +#define OP_BR_ALU_DEFBR 0x00000300000ULL +#define OP_BR_ALU_IMM_HI 0x0007fc00000ULL +#define OP_BR_ALU_SRC_LMEXTN 0x40000000000ULL +#define OP_BR_ALU_DST_LMEXTN 0x80000000000ULL + static inline bool nfp_is_br(u64 insn) { return (insn & OP_BR_BASE_MASK) == OP_BR_BASE || diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index db463e20a876..107b048b33b4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/rtnetlink.h> #include <net/devlink.h> @@ -177,7 +147,8 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) return nfp_app_eswitch_mode_get(pf->app, mode); } -static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) +static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) { struct nfp_pf *pf = devlink_priv(devlink); int ret; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c index f0dcf45aeec1..5cabb1aa9c0c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017 Netronome Systems, Inc. */ #include <linux/kernel.h> #include <linux/bitops.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 4a540c5e27fe..6c10e8d119e4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_main.c @@ -68,6 +38,10 @@ static const struct pci_device_id nfp_pci_device_ids[] = { PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, }, + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000, + PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, + PCI_ANY_ID, 0, + }, { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, @@ -112,23 +86,18 @@ nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, void *out_data, u64 out_length) { - unsigned long long addr; unsigned long err_at; u64 max_data_sz; u32 val = 0; - u32 cpp_id; int n, err; if (!pf->mbox) return -EOPNOTSUPP; - cpp_id = NFP_CPP_ISLAND_ID(pf->mbox->target, NFP_CPP_ACTION_RW, 0, - pf->mbox->domain); - addr = pf->mbox->addr; - max_data_sz = pf->mbox->size - NFP_MBOX_SYM_MIN_SIZE; + max_data_sz = nfp_rtsym_size(pf->mbox) - NFP_MBOX_SYM_MIN_SIZE; /* Check if cmd field is clear */ - err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val); + err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_CMD, &val); if (err || val) { nfp_warn(pf->cpp, "failed to issue command (%u): %u, err: %d\n", cmd, val, err); @@ -136,30 +105,29 @@ int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, } in_length = min(in_length, max_data_sz); - n = nfp_cpp_write(pf->cpp, cpp_id, addr + NFP_MBOX_DATA, - in_data, in_length); + n = nfp_rtsym_write(pf->cpp, pf->mbox, NFP_MBOX_DATA, in_data, + in_length); if (n != in_length) return -EIO; /* Write data_len and wipe reserved */ - err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, - in_length); + err = nfp_rtsym_writeq(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, in_length); if (err) return err; /* Read back for ordering */ - err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val); + err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, &val); if (err) return err; /* Write cmd and wipe return value */ - err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, cmd); + err = nfp_rtsym_writeq(pf->cpp, pf->mbox, NFP_MBOX_CMD, cmd); if (err) return err; err_at = jiffies + 5 * HZ; while (true) { /* Wait for command to go to 0 (NFP_MBOX_NO_CMD) */ - err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val); + err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_CMD, &val); if (err) return err; if (!val) @@ -172,18 +140,18 @@ int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, } /* Copy output if any (could be error info, do it before reading ret) */ - err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val); + err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, &val); if (err) return err; out_length = min_t(u32, val, min(out_length, max_data_sz)); - n = nfp_cpp_read(pf->cpp, cpp_id, addr + NFP_MBOX_DATA, - out_data, out_length); + n = nfp_rtsym_read(pf->cpp, pf->mbox, NFP_MBOX_DATA, + out_data, out_length); if (n != out_length) return -EIO; /* Check if there is an error */ - err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_RET, &val); + err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_RET, &val); if (err) return err; if (val) @@ -441,8 +409,11 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) } fw = nfp_net_fw_find(pdev, pf); - if (!fw) + if (!fw) { + if (nfp_nsp_has_stored_fw_load(nsp)) + nfp_nsp_load_stored_fw(nsp); return 0; + } dev_info(&pdev->dev, "Soft-reset, loading FW image\n"); err = nfp_nsp_device_soft_reset(nsp); @@ -453,7 +424,6 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) } err = nfp_nsp_load_fw(nsp, fw); - if (err < 0) { dev_err(&pdev->dev, "FW loading failed: %d\n", err); goto exit_release_fw; @@ -566,9 +536,9 @@ static int nfp_pf_find_rtsyms(struct nfp_pf *pf) /* Optional per-PCI PF mailbox */ snprintf(pf_symbol, sizeof(pf_symbol), NFP_MBOX_SYM_NAME, pf_id); pf->mbox = nfp_rtsym_lookup(pf->rtbl, pf_symbol); - if (pf->mbox && pf->mbox->size < NFP_MBOX_SYM_MIN_SIZE) { + if (pf->mbox && nfp_rtsym_size(pf->mbox) < NFP_MBOX_SYM_MIN_SIZE) { nfp_err(pf->cpp, "PF mailbox symbol too small: %llu < %d\n", - pf->mbox->size, NFP_MBOX_SYM_MIN_SIZE); + nfp_rtsym_size(pf->mbox), NFP_MBOX_SYM_MIN_SIZE); return -EINVAL; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 595b3dc280e3..a3613a2e0aa5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_main.h diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 439e6ffe2f05..6f0c37d09256 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_net.h diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index c6d29fdbb880..6bddfcfdec34 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_net_common.c @@ -2094,10 +2064,10 @@ static void nfp_ctrl_poll(unsigned long arg) { struct nfp_net_r_vector *r_vec = (void *)arg; - spin_lock_bh(&r_vec->lock); + spin_lock(&r_vec->lock); nfp_net_tx_complete(r_vec->tx_ring, 0); __nfp_ctrl_tx_queued(r_vec); - spin_unlock_bh(&r_vec->lock); + spin_unlock(&r_vec->lock); if (nfp_ctrl_rx(r_vec)) { nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); @@ -2187,9 +2157,13 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); - if (!tx_ring->txds) + &tx_ring->dma, + GFP_KERNEL | __GFP_NOWARN); + if (!tx_ring->txds) { + netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", + tx_ring->cnt); goto err_alloc; + } tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs), GFP_KERNEL); @@ -2341,9 +2315,13 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) rx_ring->cnt = dp->rxd_cnt; rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); - if (!rx_ring->rxds) + &rx_ring->dma, + GFP_KERNEL | __GFP_NOWARN); + if (!rx_ring->rxds) { + netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", + rx_ring->cnt); goto err_alloc; + } rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs), GFP_KERNEL); @@ -3159,6 +3137,7 @@ static void nfp_net_stat64(struct net_device *netdev, struct nfp_net *nn = netdev_priv(netdev); int r; + /* Collect software stats */ for (r = 0; r < nn->max_r_vecs; r++) { struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; u64 data[3]; @@ -3184,6 +3163,14 @@ static void nfp_net_stat64(struct net_device *netdev, stats->tx_bytes += data[1]; stats->tx_errors += data[2]; } + + /* Add in device stats */ + stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES); + stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS); + stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS); + + stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS); + stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS); } static int nfp_net_set_features(struct net_device *netdev, @@ -3751,15 +3738,18 @@ static void nfp_net_netdev_init(struct nfp_net *nn) } if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) netdev->hw_features |= NETIF_F_RXHASH; - if (nn->cap & NFP_NET_CFG_CTRL_VXLAN && - nn->cap & NFP_NET_CFG_CTRL_NVGRE) { + if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) { if (nn->cap & NFP_NET_CFG_CTRL_LSO) - netdev->hw_features |= NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL; - nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; - - netdev->hw_enc_features = netdev->hw_features; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN; + } + if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) { + if (nn->cap & NFP_NET_CFG_CTRL_LSO) + netdev->hw_features |= NETIF_F_GSO_GRE; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE; } + if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE)) + netdev->hw_enc_features = netdev->hw_features; netdev->vlan_features = netdev->hw_features; @@ -3864,10 +3854,20 @@ int nfp_net_init(struct nfp_net *nn) return err; /* Set default MTU and Freelist buffer size */ - if (nn->max_mtu < NFP_NET_DEFAULT_MTU) + if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) { + if (nn->app->ctrl_mtu <= nn->max_mtu) { + nn->dp.mtu = nn->app->ctrl_mtu; + } else { + if (nn->app->ctrl_mtu != NFP_APP_CTRL_MTU_MAX) + nn_warn(nn, "app requested MTU above max supported %u > %u\n", + nn->app->ctrl_mtu, nn->max_mtu); + nn->dp.mtu = nn->max_mtu; + } + } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) { nn->dp.mtu = nn->max_mtu; - else + } else { nn->dp.mtu = NFP_NET_DEFAULT_MTU; + } nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp); if (nfp_app_ctrl_uses_data_vnics(nn->app)) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c index 1f9149bb2ae6..f2aaef976c7d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <linux/device.h> @@ -113,6 +83,13 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem, caps->mbox_len = length; } break; + case NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0: + case NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1: + dev_warn(dev, + "experimental TLV type:%u offset:%u len:%u\n", + FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr), + offset, length); + break; default: if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr)) break; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 44d3ea75d043..d7c8518ac952 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_net_ctrl.h @@ -264,7 +234,6 @@ * %NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code */ #define NFP_NET_CFG_BPF_ABI 0x0080 -#define NFP_NET_BPF_ABI 2 #define NFP_NET_CFG_BPF_CAP 0x0081 #define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */ #define NFP_NET_CFG_BPF_MAX_LEN 0x0082 @@ -489,12 +458,20 @@ * %NFP_NET_CFG_TLV_TYPE_MBOX: * Variable, mailbox area. Overwrites the default location which is * %NFP_NET_CFG_MBOX_BASE and length %NFP_NET_CFG_MBOX_VAL_MAX_SZ. + * + * %NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0: + * %NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1: + * Variable, experimental IDs. IDs designated for internal development and + * experiments before a stable TLV ID has been allocated to a feature. Should + * never be present in production firmware. */ #define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0 #define NFP_NET_CFG_TLV_TYPE_RESERVED 1 #define NFP_NET_CFG_TLV_TYPE_END 2 #define NFP_NET_CFG_TLV_TYPE_ME_FREQ 3 #define NFP_NET_CFG_TLV_TYPE_MBOX 4 +#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0 5 +#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1 6 struct device; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c index bb8ed460086e..769ceef09756 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/ethtool.h> #include <linux/vmalloc.h> @@ -188,25 +158,21 @@ nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl) const struct nfp_rtsym *specsym; struct nfp_dumpspec *dumpspec; int bytes_read; - u32 cpp_id; + u64 sym_size; specsym = nfp_rtsym_lookup(rtbl, NFP_DUMP_SPEC_RTSYM); if (!specsym) return NULL; + sym_size = nfp_rtsym_size(specsym); /* expected size of this buffer is in the order of tens of kilobytes */ - dumpspec = vmalloc(sizeof(*dumpspec) + specsym->size); + dumpspec = vmalloc(sizeof(*dumpspec) + sym_size); if (!dumpspec) return NULL; + dumpspec->size = sym_size; - dumpspec->size = specsym->size; - - cpp_id = NFP_CPP_ISLAND_ID(specsym->target, NFP_CPP_ACTION_RW, 0, - specsym->domain); - - bytes_read = nfp_cpp_read(cpp, cpp_id, specsym->addr, dumpspec->data, - specsym->size); - if (bytes_read != specsym->size) { + bytes_read = nfp_rtsym_read(cpp, specsym, 0, dumpspec->data, sym_size); + if (bytes_read != sym_size) { vfree(dumpspec); nfp_warn(cpp, "Debug dump specification read failed.\n"); return NULL; @@ -266,7 +232,6 @@ nfp_calc_rtsym_dump_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec) struct nfp_dumpspec_rtsym *spec_rtsym; const struct nfp_rtsym *sym; u32 tl_len, key_len; - u32 size; spec_rtsym = (struct nfp_dumpspec_rtsym *)spec; tl_len = be32_to_cpu(spec->length); @@ -278,13 +243,8 @@ nfp_calc_rtsym_dump_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec) if (!sym) return nfp_dump_error_tlv_size(spec); - if (sym->type == NFP_RTSYM_TYPE_ABS) - size = sizeof(sym->addr); - else - size = sym->size; - return ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1) + - ALIGN8(size); + ALIGN8(nfp_rtsym_size(sym)); } static int @@ -644,7 +604,6 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec, const struct nfp_rtsym *sym; u32 tl_len, key_len; int bytes_read; - u32 cpp_id; void *dest; int err; @@ -657,11 +616,7 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec, if (!sym) return nfp_dump_error_tlv(&spec->tl, -ENOENT, dump); - if (sym->type == NFP_RTSYM_TYPE_ABS) - sym_size = sizeof(sym->addr); - else - sym_size = sym->size; - + sym_size = nfp_rtsym_size(sym); header_size = ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1); total_size = header_size + ALIGN8(sym_size); @@ -676,23 +631,20 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec, memcpy(dump_header->rtsym, spec->rtsym, key_len + 1); dump_header->cpp.dump_length = cpu_to_be32(sym_size); - if (sym->type == NFP_RTSYM_TYPE_ABS) { - *(u64 *)dest = sym->addr; - } else { + if (sym->type != NFP_RTSYM_TYPE_ABS) { cpp_params.target = sym->target; cpp_params.action = NFP_CPP_ACTION_RW; cpp_params.token = 0; cpp_params.island = sym->domain; - cpp_id = nfp_get_numeric_cpp_id(&cpp_params); dump_header->cpp.cpp_id = cpp_params; dump_header->cpp.offset = cpu_to_be32(sym->addr); - bytes_read = nfp_cpp_read(pf->cpp, cpp_id, sym->addr, dest, - sym_size); - if (bytes_read != sym_size) { - if (bytes_read >= 0) - bytes_read = -EIO; - dump_header->error = cpu_to_be32(bytes_read); - } + } + + bytes_read = nfp_rtsym_read(pf->cpp, sym, 0, dest, sym_size); + if (bytes_read != sym_size) { + if (bytes_read >= 0) + bytes_read = -EIO; + dump_header->error = cpu_to_be32(bytes_read); } return 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index 099b63d67451..69b1c9b62e3d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ #include <linux/debugfs.h> #include <linux/module.h> #include <linux/rtnetlink.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 6a79c8e4a7a4..cb9c512abc76 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_net_ethtool.c diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 28516eecccc8..1e7d20468a34 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_net_main.c @@ -470,8 +440,8 @@ static void nfp_net_pci_unmap_mem(struct nfp_pf *pf) static int nfp_net_pci_map_mem(struct nfp_pf *pf) { + u32 min_size, cpp_id; u8 __iomem *mem; - u32 min_size; int err; min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE; @@ -519,9 +489,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) pf->vfcfg_tbl2 = NULL; } - mem = nfp_cpp_map_area(pf->cpp, "net.qc", 0, 0, - NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ, - &pf->qc_area); + cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); + mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id, NFP_PCIE_QUEUE(0), + NFP_QCP_QUEUE_AREA_SZ, &pf->qc_area); if (IS_ERR(mem)) { nfp_err(pf->cpp, "Failed to map Queue Controller area.\n"); err = PTR_ERR(mem); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 18a09cdcd9c6..c09b893c30dd 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/etherdevice.h> #include <linux/io-64-nonatomic-hi-lo.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h index 1bf2b18109ab..c412b94bfb97 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef NFP_NET_REPR_H #define NFP_NET_REPR_H diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index 8b1b962cf1d1..b6ec46ed0540 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <linux/errno.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h index e9df9d1eab8e..c9f09c5bb5ee 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017 Netronome Systems, Inc. */ #ifndef _NFP_NET_SRIOV_H_ #define _NFP_NET_SRIOV_H_ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index 68928c86b698..d2c1e9ea5668 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_netvf_main.c diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index 9c1298114c70..86bc149ca231 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/lockdep.h> #include <linux/netdevice.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index 51f10ae2d53e..b2479a2a49e5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef _NFP_PORT_H_ #define _NFP_PORT_H_ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c index 0ecd83705368..814360ed3a20 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c @@ -1,36 +1,5 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ #include <linux/kernel.h> #include <net/devlink.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h index 6cee6382deb4..afab6f0fc564 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ #ifndef NFP_CRC32_H #define NFP_CRC32_H diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index f44d0a857314..db94b0bddc92 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp.h diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h index 0e497a6154db..4a12133850f5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ #ifndef NFP6000_NFP6000_H #define NFP6000_NFP6000_H diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h index 40fb19939505..9a86ec11c5ba 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* * nfp_xpb.h diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index c8d0b1016a64..85d46f206b3c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp6000_pcie.c @@ -138,6 +108,7 @@ /* The number of explicit BARs to reserve. * Minimum is 0, maximum is 4 on the NFP6000. + * The NFP3800 can have only one per PF. */ #define NFP_PCIE_EXPLICIT_BARS 2 @@ -589,8 +560,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3), }; char status_msg[196] = {}; + int i, err, bars_free; struct nfp_bar *bar; - int i, bars_free; int expl_groups; char *msg, *end; @@ -643,6 +614,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar), nfp_bar_resource_len(bar)); if (bar->iomem) { + int pf; + msg += snprintf(msg, end - msg, "0.0: General/MSI-X SRAM, "); atomic_inc(&bar->refcnt); bars_free--; @@ -651,22 +624,40 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000; - if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 || - nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) { - nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0); - } else { - int pf = nfp->pdev->devfn & 7; - + switch (nfp->pdev->device) { + case PCI_DEVICE_ID_NETRONOME_NFP3800: + pf = nfp->pdev->devfn & 7; nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf); + break; + case PCI_DEVICE_ID_NETRONOME_NFP4000: + case PCI_DEVICE_ID_NETRONOME_NFP5000: + case PCI_DEVICE_ID_NETRONOME_NFP6000: + nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0); + break; + default: + dev_err(nfp->dev, "Unsupported device ID: %04hx!\n", + nfp->pdev->device); + err = -EINVAL; + goto err_unmap_bar0; } nfp->iomem.em = bar->iomem + NFP_PCIE_EM; } - if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 || - nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) - expl_groups = 4; - else + switch (nfp->pdev->device) { + case PCI_DEVICE_ID_NETRONOME_NFP3800: expl_groups = 1; + break; + case PCI_DEVICE_ID_NETRONOME_NFP4000: + case PCI_DEVICE_ID_NETRONOME_NFP5000: + case PCI_DEVICE_ID_NETRONOME_NFP6000: + expl_groups = 4; + break; + default: + dev_err(nfp->dev, "Unsupported device ID: %04hx!\n", + nfp->pdev->device); + err = -EINVAL; + goto err_unmap_bar0; + } /* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */ bar = &nfp->bar[1]; @@ -711,6 +702,11 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) dev_info(nfp->dev, "%sfree: %d/%d\n", status_msg, bars_free, nfp->bars); return 0; + +err_unmap_bar0: + if (nfp->bar[0].iomem) + iounmap(nfp->bar[0].iomem); + return err; } static void disable_bars(struct nfp6000_pcie *nfp) @@ -1327,7 +1323,7 @@ struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev) /* Finished with card initialization. */ dev_info(&pdev->dev, - "Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n"); + "Netronome Flow Processor NFP4000/NFP5000/NFP6000 PCIe Card Probe\n"); pcie_print_link_status(pdev); nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h index 245d8aaaa97d..6d1bffa6eac6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* * nfp6000_pcie.h diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h index 31fe92247f51..3d172e255693 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* * nfp_arm.h diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index c338d539fa96..2dd0f5842873 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_cpp.h @@ -56,9 +26,16 @@ dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) #define nfp_dbg(cpp, fmt, args...) \ dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) +#define nfp_printk(level, cpp, fmt, args...) \ + dev_printk(level, nfp_cpp_device(cpp)->parent, \ + NFP_SUBSYS ": " fmt, ## args) #define PCI_64BIT_BAR_COUNT 3 +/* NFP hardware vendor/device ids. + */ +#define PCI_DEVICE_ID_NETRONOME_NFP3800 0x3800 + #define NFP_CPP_NUM_TARGETS 16 /* Max size of area it should be safe to request */ #define NFP_CPP_SAFE_AREA_SIZE SZ_2M @@ -226,6 +203,7 @@ void nfp_cpp_free(struct nfp_cpp *cpp); u32 nfp_cpp_model(struct nfp_cpp *cpp); u16 nfp_cpp_interface(struct nfp_cpp *cpp); int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial); +unsigned int nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp); struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 cpp_id, @@ -286,8 +264,8 @@ int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id, unsigned long long address, u64 value); u8 __iomem * -nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, int domain, int target, - u64 addr, unsigned long size, struct nfp_cpp_area **area); +nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, u32 cpp_id, u64 addr, + unsigned long size, struct nfp_cpp_area **area); struct nfp_cpp_mutex; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index 73de57a09800..94994a939277 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_cppcore.c @@ -75,6 +45,7 @@ struct nfp_cpp_resource { * @interface: chip interface id we are using to reach it * @serial: chip serial number * @imb_cat_table: CPP Mapping Table + * @mu_locality_lsb: MU access type bit offset * * Following fields use explicit locking: * @resource_list: NFP CPP resource list @@ -100,6 +71,7 @@ struct nfp_cpp { wait_queue_head_t waitq; u32 imb_cat_table[16]; + unsigned int mu_locality_lsb; struct mutex area_cache_mutex; struct list_head area_cache_list; @@ -266,6 +238,34 @@ int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial) return sizeof(cpp->serial); } +#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0 +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12) + +static int nfp_cpp_set_mu_locality_lsb(struct nfp_cpp *cpp) +{ + unsigned int mode, addr40; + u32 imbcppat; + int res; + + imbcppat = cpp->imb_cat_table[NFP_CPP_TARGET_MU]; + mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat); + addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE); + + res = nfp_cppat_mu_locality_lsb(mode, addr40); + if (res < 0) + return res; + cpp->mu_locality_lsb = res; + + return 0; +} + +unsigned int nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp) +{ + return cpp->mu_locality_lsb; +} + /** * nfp_cpp_area_alloc_with_name() - allocate a new CPP area * @cpp: CPP device handle @@ -1241,6 +1241,12 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL3, &mask[1]); + err = nfp_cpp_set_mu_locality_lsb(cpp); + if (err < 0) { + dev_err(parent, "Can't calculate MU locality bit offset\n"); + goto err_out; + } + dev_info(cpp->dev.parent, "Model: 0x%08x, SN: %pM, Ifc: 0x%04x\n", nfp_cpp_model(cpp), cpp->serial, nfp_cpp_interface(cpp)); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c index 20bad05e2e92..3cfecf105bde 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_cpplib.c @@ -294,8 +264,7 @@ exit_release: * nfp_cpp_map_area() - Helper function to map an area * @cpp: NFP CPP handler * @name: Name for the area - * @domain: CPP domain - * @target: CPP target + * @cpp_id: CPP ID for operation * @addr: CPP address * @size: Size of the area * @area: Area handle (output) @@ -306,15 +275,12 @@ exit_release: * Return: Pointer to memory mapped area or ERR_PTR */ u8 __iomem * -nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, int domain, int target, - u64 addr, unsigned long size, struct nfp_cpp_area **area) +nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, u32 cpp_id, u64 addr, + unsigned long size, struct nfp_cpp_area **area) { u8 __iomem *res; - u32 dest; - - dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain); - *area = nfp_cpp_area_alloc_acquire(cpp, name, dest, addr, size); + *area = nfp_cpp_area_alloc_acquire(cpp, name, cpp_id, addr, size); if (!*area) goto err_eio; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c index 063a9a6243d6..f05dd34ab89f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM * after chip reset. diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c index 5f193fe2d69e..79e17943519e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* * nfp_mip.c diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c index c88bf673cb76..7bc17b94ac60 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ #include <linux/delay.h> #include <linux/device.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c index 40510860341b..d4e02542e2e9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_nffw.c @@ -156,29 +126,6 @@ static u64 nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi) return (mip_off_hi & 0xFF) << 32 | le32_to_cpu(fi->mip_offset_lo); } -#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7) -#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12) -#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0 -#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12) - -static int nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp) -{ - unsigned int mode, addr40; - u32 xpbaddr, imbcppat; - int err; - - /* Hardcoded XPB IMB Base, island 0 */ - xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4; - err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat); - if (err < 0) - return err; - - mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat); - addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE); - - return nfp_cppat_mu_locality_lsb(mode, addr40); -} - static unsigned int nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr) { @@ -304,14 +251,7 @@ int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off) *off = nffw_fwinfo_mip_offset_get(fwinfo); if (nffw_fwinfo_mip_mu_da_get(fwinfo)) { - int locality_off; - - if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU) - return 0; - - locality_off = nfp_mip_mu_locality_lsb(state->cpp); - if (locality_off < 0) - return locality_off; + int locality_off = nfp_cpp_mu_locality_lsb(state->cpp); *off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); *off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h index df599d5b6bb3..49a4d3f56b56 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_nffw.h @@ -61,10 +31,12 @@ void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size); /* Implemented in nfp_rtsym.c */ -#define NFP_RTSYM_TYPE_NONE 0 -#define NFP_RTSYM_TYPE_OBJECT 1 -#define NFP_RTSYM_TYPE_FUNCTION 2 -#define NFP_RTSYM_TYPE_ABS 3 +enum nfp_rtsym_type { + NFP_RTSYM_TYPE_NONE = 0, + NFP_RTSYM_TYPE_OBJECT = 1, + NFP_RTSYM_TYPE_FUNCTION = 2, + NFP_RTSYM_TYPE_ABS = 3, +}; #define NFP_RTSYM_TARGET_NONE 0 #define NFP_RTSYM_TARGET_LMEM -1 @@ -83,7 +55,7 @@ struct nfp_rtsym { const char *name; u64 addr; u64 size; - int type; + enum nfp_rtsym_type type; int target; int domain; }; @@ -98,6 +70,32 @@ const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx); const struct nfp_rtsym * nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name); +u64 nfp_rtsym_size(const struct nfp_rtsym *rtsym); +int __nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len); +int nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len); +int __nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 *value); +int nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 *value); +int __nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 *value); +int nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 *value); +int __nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len); +int nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len); +int __nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 value); +int nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 value); +int __nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 value); +int nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 value); + u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error); int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 2abee0fe3a7c..ce1577bbbd2a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_nsp.c @@ -87,6 +57,11 @@ #define NSP_CODE_MAJOR GENMASK(15, 12) #define NSP_CODE_MINOR GENMASK(11, 0) +#define NFP_FW_LOAD_RET_MAJOR GENMASK(15, 8) +#define NFP_FW_LOAD_RET_MINOR GENMASK(23, 16) + +#define NFP_HWINFO_LOOKUP_SIZE GENMASK(11, 0) + enum nfp_nsp_cmd { SPCODE_NOOP = 0, /* No operation */ SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */ @@ -100,6 +75,8 @@ enum nfp_nsp_cmd { SPCODE_NSP_WRITE_FLASH = 11, /* Load and flash image from buffer */ SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */ SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */ + SPCODE_FW_STORED = 16, /* If no FW loaded, load flash app FW */ + SPCODE_HWINFO_LOOKUP = 17, /* Lookup HWinfo with overwrites etc. */ }; static const struct { @@ -127,6 +104,40 @@ struct nfp_nsp { void *entries; }; +/** + * struct nfp_nsp_command_arg - NFP command argument structure + * @code: NFP SP Command Code + * @timeout_sec:Timeout value to wait for completion in seconds + * @option: NFP SP Command Argument + * @buff_cpp: NFP SP Buffer CPP Address info + * @buff_addr: NFP SP Buffer Host address + * @error_cb: Callback for interpreting option if error occurred + */ +struct nfp_nsp_command_arg { + u16 code; + unsigned int timeout_sec; + u32 option; + u32 buff_cpp; + u64 buff_addr; + void (*error_cb)(struct nfp_nsp *state, u32 ret_val); +}; + +/** + * struct nfp_nsp_command_buf_arg - NFP command with buffer argument structure + * @arg: NFP command argument structure + * @in_buf: Buffer with data for input + * @in_size: Size of @in_buf + * @out_buf: Buffer for output data + * @out_size: Size of @out_buf + */ +struct nfp_nsp_command_buf_arg { + struct nfp_nsp_command_arg arg; + const void *in_buf; + unsigned int in_size; + void *out_buf; + unsigned int out_size; +}; + struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state) { return state->cpp; @@ -291,11 +302,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr, /** * __nfp_nsp_command() - Execute a command on the NFP Service Processor * @state: NFP SP state - * @code: NFP SP Command Code - * @option: NFP SP Command Argument - * @buff_cpp: NFP SP Buffer CPP Address info - * @buff_addr: NFP SP Buffer Host address - * @timeout_sec:Timeout value to wait for completion in seconds + * @arg: NFP command argument structure * * Return: 0 for success with no result * @@ -308,8 +315,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr, * -ETIMEDOUT if the NSP took longer than @timeout_sec seconds to complete */ static int -__nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, - u64 buff_addr, u32 timeout_sec) +__nfp_nsp_command(struct nfp_nsp *state, const struct nfp_nsp_command_arg *arg) { u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command; struct nfp_cpp *cpp = state->cpp; @@ -326,22 +332,22 @@ __nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, if (err) return err; - if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) || - !FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) { + if (!FIELD_FIT(NSP_BUFFER_CPP, arg->buff_cpp >> 8) || + !FIELD_FIT(NSP_BUFFER_ADDRESS, arg->buff_addr)) { nfp_err(cpp, "Host buffer out of reach %08x %016llx\n", - buff_cpp, buff_addr); + arg->buff_cpp, arg->buff_addr); return -EINVAL; } err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer, - FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) | - FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr)); + FIELD_PREP(NSP_BUFFER_CPP, arg->buff_cpp >> 8) | + FIELD_PREP(NSP_BUFFER_ADDRESS, arg->buff_addr)); if (err < 0) return err; err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command, - FIELD_PREP(NSP_COMMAND_OPTION, option) | - FIELD_PREP(NSP_COMMAND_CODE, code) | + FIELD_PREP(NSP_COMMAND_OPTION, arg->option) | + FIELD_PREP(NSP_COMMAND_CODE, arg->code) | FIELD_PREP(NSP_COMMAND_START, 1)); if (err < 0) return err; @@ -351,16 +357,16 @@ __nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, NSP_COMMAND_START, 0, NFP_NSP_TIMEOUT_DEFAULT); if (err) { nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n", - err, code); + err, arg->code); return err; } /* Wait for NSP_STATUS_BUSY to go to 0 */ err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_status, NSP_STATUS_BUSY, - 0, timeout_sec); + 0, arg->timeout_sec ?: NFP_NSP_TIMEOUT_DEFAULT); if (err) { nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n", - err, code); + err, arg->code); return err; } @@ -372,26 +378,28 @@ __nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, err = FIELD_GET(NSP_STATUS_RESULT, reg); if (err) { nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n", - -err, (int)ret_val, code); - nfp_nsp_print_extended_error(state, ret_val); + -err, (int)ret_val, arg->code); + if (arg->error_cb) + arg->error_cb(state, ret_val); + else + nfp_nsp_print_extended_error(state, ret_val); return -err; } return ret_val; } -static int -nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, - u64 buff_addr) +static int nfp_nsp_command(struct nfp_nsp *state, u16 code) { - return __nfp_nsp_command(state, code, option, buff_cpp, buff_addr, - NFP_NSP_TIMEOUT_DEFAULT); + const struct nfp_nsp_command_arg arg = { + .code = code, + }; + + return __nfp_nsp_command(state, &arg); } static int -__nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, - const void *in_buf, unsigned int in_size, void *out_buf, - unsigned int out_size, u32 timeout_sec) +nfp_nsp_command_buf(struct nfp_nsp *nsp, struct nfp_nsp_command_buf_arg *arg) { struct nfp_cpp *cpp = nsp->cpp; unsigned int max_size; @@ -401,7 +409,7 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, if (nsp->ver.minor < 13) { nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %hu.%hu)\n", - code, nsp->ver.major, nsp->ver.minor); + arg->arg.code, nsp->ver.major, nsp->ver.minor); return -EOPNOTSUPP; } @@ -412,10 +420,11 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, if (err < 0) return err; - max_size = max(in_size, out_size); + max_size = max(arg->in_size, arg->out_size); if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) { nfp_err(cpp, "NSP: default buffer too small for command 0x%04x (%llu < %u)\n", - code, FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M, + arg->arg.code, + FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M, max_size); return -EINVAL; } @@ -430,27 +439,30 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, cpp_id = FIELD_GET(NSP_DFLT_BUFFER_CPP, reg) << 8; cpp_buf = FIELD_GET(NSP_DFLT_BUFFER_ADDRESS, reg); - if (in_buf && in_size) { - err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size); + if (arg->in_buf && arg->in_size) { + err = nfp_cpp_write(cpp, cpp_id, cpp_buf, + arg->in_buf, arg->in_size); if (err < 0) return err; } /* Zero out remaining part of the buffer */ - if (out_buf && out_size && out_size > in_size) { - memset(out_buf, 0, out_size - in_size); - err = nfp_cpp_write(cpp, cpp_id, cpp_buf + in_size, - out_buf, out_size - in_size); + if (arg->out_buf && arg->out_size && arg->out_size > arg->in_size) { + memset(arg->out_buf, 0, arg->out_size - arg->in_size); + err = nfp_cpp_write(cpp, cpp_id, cpp_buf + arg->in_size, + arg->out_buf, arg->out_size - arg->in_size); if (err < 0) return err; } - ret = __nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf, - timeout_sec); + arg->arg.buff_cpp = cpp_id; + arg->arg.buff_addr = cpp_buf; + ret = __nfp_nsp_command(nsp, &arg->arg); if (ret < 0) return ret; - if (out_buf && out_size) { - err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size); + if (arg->out_buf && arg->out_size) { + err = nfp_cpp_read(cpp, cpp_id, cpp_buf, + arg->out_buf, arg->out_size); if (err < 0) return err; } @@ -458,16 +470,6 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, return ret; } -static int -nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, - const void *in_buf, unsigned int in_size, void *out_buf, - unsigned int out_size) -{ - return __nfp_nsp_command_buf(nsp, code, option, in_buf, in_size, - out_buf, out_size, - NFP_NSP_TIMEOUT_DEFAULT); -} - int nfp_nsp_wait(struct nfp_nsp *state) { const unsigned long wait_until = jiffies + NFP_NSP_TIMEOUT_BOOT * HZ; @@ -479,7 +481,7 @@ int nfp_nsp_wait(struct nfp_nsp *state) for (;;) { const unsigned long start_time = jiffies; - err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0); + err = nfp_nsp_command(state, SPCODE_NOOP); if (err != -EAGAIN) break; @@ -501,53 +503,211 @@ int nfp_nsp_wait(struct nfp_nsp *state) int nfp_nsp_device_soft_reset(struct nfp_nsp *state) { - return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0); + return nfp_nsp_command(state, SPCODE_SOFT_RESET); } int nfp_nsp_mac_reinit(struct nfp_nsp *state) { - return nfp_nsp_command(state, SPCODE_MAC_INIT, 0, 0, 0); + return nfp_nsp_command(state, SPCODE_MAC_INIT); +} + +static void nfp_nsp_load_fw_extended_msg(struct nfp_nsp *state, u32 ret_val) +{ + static const char * const major_msg[] = { + /* 0 */ "Firmware from driver loaded", + /* 1 */ "Firmware from flash loaded", + /* 2 */ "Firmware loading failure", + }; + static const char * const minor_msg[] = { + /* 0 */ "", + /* 1 */ "no named partition on flash", + /* 2 */ "error reading from flash", + /* 3 */ "can not deflate", + /* 4 */ "not a trusted file", + /* 5 */ "can not parse FW file", + /* 6 */ "MIP not found in FW file", + /* 7 */ "null firmware name in MIP", + /* 8 */ "FW version none", + /* 9 */ "FW build number none", + /* 10 */ "no FW selection policy HWInfo key found", + /* 11 */ "static FW selection policy", + /* 12 */ "FW version has precedence", + /* 13 */ "different FW application load requested", + /* 14 */ "development build", + }; + unsigned int major, minor; + const char *level; + + major = FIELD_GET(NFP_FW_LOAD_RET_MAJOR, ret_val); + minor = FIELD_GET(NFP_FW_LOAD_RET_MINOR, ret_val); + + if (!nfp_nsp_has_stored_fw_load(state)) + return; + + /* Lower the message level in legacy case */ + if (major == 0 && (minor == 0 || minor == 10)) + level = KERN_DEBUG; + else if (major == 2) + level = KERN_ERR; + else + level = KERN_INFO; + + if (major >= ARRAY_SIZE(major_msg)) + nfp_printk(level, state->cpp, "FW loading status: %x\n", + ret_val); + else if (minor >= ARRAY_SIZE(minor_msg)) + nfp_printk(level, state->cpp, "%s, reason code: %d\n", + major_msg[major], minor); + else + nfp_printk(level, state->cpp, "%s%c %s\n", + major_msg[major], minor ? ',' : '.', + minor_msg[minor]); } int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw) { - return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, fw->size, fw->data, - fw->size, NULL, 0); + struct nfp_nsp_command_buf_arg load_fw = { + { + .code = SPCODE_FW_LOAD, + .option = fw->size, + .error_cb = nfp_nsp_load_fw_extended_msg, + }, + .in_buf = fw->data, + .in_size = fw->size, + }; + int ret; + + ret = nfp_nsp_command_buf(state, &load_fw); + if (ret < 0) + return ret; + + nfp_nsp_load_fw_extended_msg(state, ret); + return 0; } int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw) { - /* The flash time is specified to take a maximum of 70s so we add an - * additional factor to this spec time. - */ - u32 timeout_sec = 2.5 * 70; - - return __nfp_nsp_command_buf(state, SPCODE_NSP_WRITE_FLASH, fw->size, - fw->data, fw->size, NULL, 0, timeout_sec); + struct nfp_nsp_command_buf_arg write_flash = { + { + .code = SPCODE_NSP_WRITE_FLASH, + .option = fw->size, + /* The flash time is specified to take a maximum of 70s + * so we add an additional factor to this spec time. + */ + .timeout_sec = 2.5 * 70, + }, + .in_buf = fw->data, + .in_size = fw->size, + }; + + return nfp_nsp_command_buf(state, &write_flash); } int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size) { - return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0, - buf, size); + struct nfp_nsp_command_buf_arg eth_rescan = { + { + .code = SPCODE_ETH_RESCAN, + .option = size, + }, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, ð_rescan); } int nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf, unsigned int size) { - return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size, - NULL, 0); + struct nfp_nsp_command_buf_arg eth_ctrl = { + { + .code = SPCODE_ETH_CONTROL, + .option = size, + }, + .in_buf = buf, + .in_size = size, + }; + + return nfp_nsp_command_buf(state, ð_ctrl); } int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size) { - return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0, - buf, size); + struct nfp_nsp_command_buf_arg identify = { + { + .code = SPCODE_NSP_IDENTIFY, + .option = size, + }, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &identify); } int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, void *buf, unsigned int size) { - return nfp_nsp_command_buf(state, SPCODE_NSP_SENSORS, sensor_mask, - NULL, 0, buf, size); + struct nfp_nsp_command_buf_arg sensors = { + { + .code = SPCODE_NSP_SENSORS, + .option = sensor_mask, + }, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &sensors); +} + +int nfp_nsp_load_stored_fw(struct nfp_nsp *state) +{ + const struct nfp_nsp_command_arg arg = { + .code = SPCODE_FW_STORED, + .error_cb = nfp_nsp_load_fw_extended_msg, + }; + int ret; + + ret = __nfp_nsp_command(state, &arg); + if (ret < 0) + return ret; + + nfp_nsp_load_fw_extended_msg(state, ret); + return 0; +} + +static int +__nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size) +{ + struct nfp_nsp_command_buf_arg hwinfo_lookup = { + { + .code = SPCODE_HWINFO_LOOKUP, + .option = size, + }, + .in_buf = buf, + .in_size = size, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &hwinfo_lookup); +} + +int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size) +{ + int err; + + size = min_t(u32, size, NFP_HWINFO_LOOKUP_SIZE); + + err = __nfp_nsp_hwinfo_lookup(state, buf, size); + if (err) + return err; + + if (strnlen(buf, size) == size) { + nfp_err(state->cpp, "NSP HWinfo value not NULL-terminated\n"); + return -EINVAL; + } + + return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index f23d9e06f097..ff33ac54097a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ #ifndef NSP_NSP_H #define NSP_NSP_H 1 @@ -50,12 +20,24 @@ int nfp_nsp_device_soft_reset(struct nfp_nsp *state); int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_mac_reinit(struct nfp_nsp *state); +int nfp_nsp_load_stored_fw(struct nfp_nsp *state); +int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size); static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state) { return nfp_nsp_get_abi_ver_minor(state) > 20; } +static inline bool nfp_nsp_has_stored_fw_load(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 23; +} + +static inline bool nfp_nsp_has_hwinfo_lookup(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 24; +} + enum nfp_eth_interface { NFP_INTERFACE_NONE = 0, NFP_INTERFACE_SFP = 1, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c index 5d362f87af08..0997d127144f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017 Netronome Systems, Inc. */ #include <linux/kernel.h> #include <linux/slab.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c index 7ca589660e4d..802c9224bb32 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* Authors: David Brunecz <david.brunecz@netronome.com> * Jakub Kicinski <jakub.kicinski@netronome.com> diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c index d32af598da90..ce7492a6a98f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_resource.c diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c index 9e34216578da..75f012444796 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_rtsym.c @@ -39,6 +9,8 @@ * Espen Skoglund <espen.skoglund@netronome.com> * Francois H. Theron <francois.theron@netronome.com> */ + +#include <asm/unaligned.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> @@ -233,6 +205,229 @@ nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name) return NULL; } +u64 nfp_rtsym_size(const struct nfp_rtsym *sym) +{ + switch (sym->type) { + case NFP_RTSYM_TYPE_NONE: + pr_err("rtsym '%s': type NONE\n", sym->name); + return 0; + default: + pr_warn("rtsym '%s': unknown type: %d\n", sym->name, sym->type); + /* fall through */ + case NFP_RTSYM_TYPE_OBJECT: + case NFP_RTSYM_TYPE_FUNCTION: + return sym->size; + case NFP_RTSYM_TYPE_ABS: + return sizeof(u64); + } +} + +static int +nfp_rtsym_to_dest(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 *cpp_id, u64 *addr) +{ + if (sym->type != NFP_RTSYM_TYPE_OBJECT) { + nfp_err(cpp, "rtsym '%s': direct access to non-object rtsym\n", + sym->name); + return -EINVAL; + } + + *addr = sym->addr + off; + + if (sym->target == NFP_RTSYM_TARGET_EMU_CACHE) { + int locality_off = nfp_cpp_mu_locality_lsb(cpp); + + *addr &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); + *addr |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; + + *cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU, action, token, + sym->domain); + } else if (sym->target < 0) { + nfp_err(cpp, "rtsym '%s': unhandled target encoding: %d\n", + sym->name, sym->target); + return -EINVAL; + } else { + *cpp_id = NFP_CPP_ISLAND_ID(sym->target, action, token, + sym->domain); + } + + return 0; +} + +int __nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len) +{ + u64 sym_size = nfp_rtsym_size(sym); + u32 cpp_id; + u64 addr; + int err; + + if (off > sym_size) { + nfp_err(cpp, "rtsym '%s': read out of bounds: off: %lld + len: %zd > size: %lld\n", + sym->name, off, len, sym_size); + return -ENXIO; + } + len = min_t(size_t, len, sym_size - off); + + if (sym->type == NFP_RTSYM_TYPE_ABS) { + u8 tmp[8]; + + put_unaligned_le64(sym->addr, tmp); + memcpy(buf, &tmp[off], len); + + return len; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_read(cpp, cpp_id, addr, buf, len); +} + +int nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len) +{ + return __nfp_rtsym_read(cpp, sym, NFP_CPP_ACTION_RW, 0, off, buf, len); +} + +int __nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 *value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 4 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': readl out of bounds: off: %lld + 4 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_readl(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 *value) +{ + return __nfp_rtsym_readl(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + +int __nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 *value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 8 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': readq out of bounds: off: %lld + 8 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + if (sym->type == NFP_RTSYM_TYPE_ABS) { + *value = sym->addr; + return 0; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_readq(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 *value) +{ + return __nfp_rtsym_readq(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + +int __nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len) +{ + u64 sym_size = nfp_rtsym_size(sym); + u32 cpp_id; + u64 addr; + int err; + + if (off > sym_size) { + nfp_err(cpp, "rtsym '%s': write out of bounds: off: %lld + len: %zd > size: %lld\n", + sym->name, off, len, sym_size); + return -ENXIO; + } + len = min_t(size_t, len, sym_size - off); + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_write(cpp, cpp_id, addr, buf, len); +} + +int nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len) +{ + return __nfp_rtsym_write(cpp, sym, NFP_CPP_ACTION_RW, 0, off, buf, len); +} + +int __nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 4 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': writel out of bounds: off: %lld + 4 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_writel(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 value) +{ + return __nfp_rtsym_writel(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + +int __nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 8 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': writeq out of bounds: off: %lld + 8 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_writeq(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 value) +{ + return __nfp_rtsym_writeq(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + /** * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol * @rtbl: NFP RTsym table @@ -249,7 +444,7 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error) { const struct nfp_rtsym *sym; - u32 val32, id; + u32 val32; u64 val; int err; @@ -259,20 +454,18 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, goto exit; } - id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); - - switch (sym->size) { + switch (nfp_rtsym_size(sym)) { case 4: - err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32); + err = nfp_rtsym_readl(rtbl->cpp, sym, 0, &val32); val = val32; break; case 8: - err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val); + err = nfp_rtsym_readq(rtbl->cpp, sym, 0, &val); break; default: nfp_err(rtbl->cpp, - "rtsym '%s' unsupported or non-scalar size: %lld\n", - name, sym->size); + "rtsym '%s': unsupported or non-scalar size: %lld\n", + name, nfp_rtsym_size(sym)); err = -EINVAL; break; } @@ -303,25 +496,22 @@ int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, { const struct nfp_rtsym *sym; int err; - u32 id; sym = nfp_rtsym_lookup(rtbl, name); if (!sym) return -ENOENT; - id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); - - switch (sym->size) { + switch (nfp_rtsym_size(sym)) { case 4: - err = nfp_cpp_writel(rtbl->cpp, id, sym->addr, value); + err = nfp_rtsym_writel(rtbl->cpp, sym, 0, value); break; case 8: - err = nfp_cpp_writeq(rtbl->cpp, id, sym->addr, value); + err = nfp_rtsym_writeq(rtbl->cpp, sym, 0, value); break; default: nfp_err(rtbl->cpp, - "rtsym '%s' unsupported or non-scalar size: %lld\n", - name, sym->size); + "rtsym '%s': unsupported or non-scalar size: %lld\n", + name, nfp_rtsym_size(sym)); err = -EINVAL; break; } @@ -335,20 +525,29 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, { const struct nfp_rtsym *sym; u8 __iomem *mem; + u32 cpp_id; + u64 addr; + int err; sym = nfp_rtsym_lookup(rtbl, name); if (!sym) return (u8 __iomem *)ERR_PTR(-ENOENT); + err = nfp_rtsym_to_dest(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0, + &cpp_id, &addr); + if (err) { + nfp_err(rtbl->cpp, "rtsym '%s': mapping failed\n", name); + return (u8 __iomem *)ERR_PTR(err); + } + if (sym->size < min_size) { - nfp_err(rtbl->cpp, "Symbol %s too small\n", name); + nfp_err(rtbl->cpp, "rtsym '%s': too small\n", name); return (u8 __iomem *)ERR_PTR(-EINVAL); } - mem = nfp_cpp_map_area(rtbl->cpp, id, sym->domain, sym->target, - sym->addr, sym->size, area); + mem = nfp_cpp_map_area(rtbl->cpp, id, cpp_id, addr, sym->size, area); if (IS_ERR(mem)) { - nfp_err(rtbl->cpp, "Failed to map symbol %s: %ld\n", + nfp_err(rtbl->cpp, "rtysm '%s': failed to map: %ld\n", name, PTR_ERR(mem)); return mem; } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c index 4ea1e585d945..79470f198a62 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_target.c @@ -39,7 +9,11 @@ * Francois H. Theron <francois.theron@netronome.com> */ +#define pr_fmt(fmt) "NFP target: " fmt + #include <linux/bitops.h> +#include <linux/kernel.h> +#include <linux/printk.h> #include "nfp_cpp.h" @@ -733,8 +707,10 @@ int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address, u32 imb; int err; - if (target < 0 || target >= 16) + if (target < 0 || target >= 16) { + pr_err("Invalid CPP target: %d\n", target); return -EINVAL; + } if (island == 0) { /* Already translated */ @@ -753,8 +729,10 @@ int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address, err = nfp_cppat_addr_encode(cpp_target_address, island, target, ((imb >> 13) & 7), ((imb >> 12) & 1), ((imb >> 6) & 0x3f), ((imb >> 0) & 0x3f)); - if (err) + if (err) { + pr_err("Can't encode CPP address: %d\n", err); return err; + } *cpp_target_id = NFP_CPP_ID(target, NFP_CPP_ID_ACTION_of(cpp_island_id), diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c index d5b587fccaa3..aea8579206ee 100644 --- a/drivers/net/ethernet/netronome/nfp/nic/main.c +++ b/drivers/net/ethernet/netronome/nfp/nic/main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017 Netronome Systems, Inc. */ #include "../nfpcore/nfp_cpp.h" #include "../nfpcore/nfp_nsp.h" diff --git a/drivers/net/ethernet/ni/Kconfig b/drivers/net/ethernet/ni/Kconfig index aa41e5f6e437..c73978474c4b 100644 --- a/drivers/net/ethernet/ni/Kconfig +++ b/drivers/net/ethernet/ni/Kconfig @@ -18,8 +18,9 @@ if NET_VENDOR_NI config NI_XGE_MANAGEMENT_ENET tristate "National Instruments XGE management enet support" - depends on ARCH_ZYNQ + depends on HAS_IOMEM && HAS_DMA select PHYLIB + select OF_MDIO if OF help Simple LAN device for debug or management purposes. Can support either 10G or 1G PHYs via SFP+ ports. diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 76efed058f33..0611f2335b4a 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -106,10 +106,10 @@ (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) struct nixge_hw_dma_bd { - u32 next; - u32 reserved1; - u32 phys; - u32 reserved2; + u32 next_lo; + u32 next_hi; + u32 phys_lo; + u32 phys_hi; u32 reserved3; u32 reserved4; u32 cntrl; @@ -119,11 +119,39 @@ struct nixge_hw_dma_bd { u32 app2; u32 app3; u32 app4; - u32 sw_id_offset; - u32 reserved5; + u32 sw_id_offset_lo; + u32 sw_id_offset_hi; u32 reserved6; }; +#ifdef CONFIG_PHYS_ADDR_T_64BIT +#define nixge_hw_dma_bd_set_addr(bd, field, addr) \ + do { \ + (bd)->field##_lo = lower_32_bits((addr)); \ + (bd)->field##_hi = upper_32_bits((addr)); \ + } while (0) +#else +#define nixge_hw_dma_bd_set_addr(bd, field, addr) \ + ((bd)->field##_lo = lower_32_bits((addr))) +#endif + +#define nixge_hw_dma_bd_set_phys(bd, addr) \ + nixge_hw_dma_bd_set_addr((bd), phys, (addr)) + +#define nixge_hw_dma_bd_set_next(bd, addr) \ + nixge_hw_dma_bd_set_addr((bd), next, (addr)) + +#define nixge_hw_dma_bd_set_offset(bd, addr) \ + nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr)) + +#ifdef CONFIG_PHYS_ADDR_T_64BIT +#define nixge_hw_dma_bd_get_addr(bd, field) \ + (dma_addr_t)((((u64)(bd)->field##_hi) << 32) | ((bd)->field##_lo)) +#else +#define nixge_hw_dma_bd_get_addr(bd, field) \ + (dma_addr_t)((bd)->field##_lo) +#endif + struct nixge_tx_skb { struct sk_buff *skb; dma_addr_t mapping; @@ -176,6 +204,15 @@ static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val) writel(val, priv->dma_regs + offset); } +static void nixge_dma_write_desc_reg(struct nixge_priv *priv, off_t offset, + dma_addr_t addr) +{ + writel(lower_32_bits(addr), priv->dma_regs + offset); +#ifdef CONFIG_PHYS_ADDR_T_64BIT + writel(upper_32_bits(addr), priv->dma_regs + offset + 4); +#endif +} + static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset) { return readl(priv->dma_regs + offset); @@ -202,13 +239,22 @@ static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset) static void nixge_hw_dma_bd_release(struct net_device *ndev) { struct nixge_priv *priv = netdev_priv(ndev); + dma_addr_t phys_addr; + struct sk_buff *skb; int i; for (i = 0; i < RX_BD_NUM; i++) { - dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys, - NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); - dev_kfree_skb((struct sk_buff *) - (priv->rx_bd_v[i].sw_id_offset)); + phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], + phys); + + dma_unmap_single(ndev->dev.parent, phys_addr, + NIXGE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + + skb = (struct sk_buff *)(uintptr_t) + nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], + sw_id_offset); + dev_kfree_skb(skb); } if (priv->rx_bd_v) @@ -231,6 +277,7 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) { struct nixge_priv *priv = netdev_priv(ndev); struct sk_buff *skb; + dma_addr_t phys; u32 cr; int i; @@ -259,27 +306,30 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) goto out; for (i = 0; i < TX_BD_NUM; i++) { - priv->tx_bd_v[i].next = priv->tx_bd_p + - sizeof(*priv->tx_bd_v) * - ((i + 1) % TX_BD_NUM); + nixge_hw_dma_bd_set_next(&priv->tx_bd_v[i], + priv->tx_bd_p + + sizeof(*priv->tx_bd_v) * + ((i + 1) % TX_BD_NUM)); } for (i = 0; i < RX_BD_NUM; i++) { - priv->rx_bd_v[i].next = priv->rx_bd_p + - sizeof(*priv->rx_bd_v) * - ((i + 1) % RX_BD_NUM); + nixge_hw_dma_bd_set_next(&priv->rx_bd_v[i], + priv->rx_bd_p + + sizeof(*priv->rx_bd_v) * + ((i + 1) % RX_BD_NUM)); skb = netdev_alloc_skb_ip_align(ndev, NIXGE_MAX_JUMBO_FRAME_SIZE); if (!skb) goto out; - priv->rx_bd_v[i].sw_id_offset = (u32)skb; - priv->rx_bd_v[i].phys = - dma_map_single(ndev->dev.parent, - skb->data, - NIXGE_MAX_JUMBO_FRAME_SIZE, - DMA_FROM_DEVICE); + nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], (uintptr_t)skb); + phys = dma_map_single(ndev->dev.parent, skb->data, + NIXGE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + + nixge_hw_dma_bd_set_phys(&priv->rx_bd_v[i], phys); + priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE; } @@ -312,18 +362,18 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception. */ - nixge_dma_write_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p); + nixge_dma_write_desc_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p); cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); - nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p + + nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p + (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1))); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the * tail pointer register that the Tx channel will start transmitting. */ - nixge_dma_write_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p); + nixge_dma_write_desc_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p); cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); @@ -451,7 +501,7 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct nixge_priv *priv = netdev_priv(ndev); struct nixge_hw_dma_bd *cur_p; struct nixge_tx_skb *tx_skb; - dma_addr_t tail_p; + dma_addr_t tail_p, cur_phys; skb_frag_t *frag; u32 num_frag; u32 ii; @@ -466,15 +516,16 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } - cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) + cur_phys = dma_map_single(ndev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, cur_phys)) goto drop; + nixge_hw_dma_bd_set_phys(cur_p, cur_phys); cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; tx_skb->skb = NULL; - tx_skb->mapping = cur_p->phys; + tx_skb->mapping = cur_phys; tx_skb->size = skb_headlen(skb); tx_skb->mapped_as_page = false; @@ -485,16 +536,17 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) tx_skb = &priv->tx_skb[priv->tx_bd_tail]; frag = &skb_shinfo(skb)->frags[ii]; - cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0, - skb_frag_size(frag), - DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) + cur_phys = skb_frag_dma_map(ndev->dev.parent, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, cur_phys)) goto frag_err; + nixge_hw_dma_bd_set_phys(cur_p, cur_phys); cur_p->cntrl = skb_frag_size(frag); tx_skb->skb = NULL; - tx_skb->mapping = cur_p->phys; + tx_skb->mapping = cur_phys; tx_skb->size = skb_frag_size(frag); tx_skb->mapped_as_page = true; } @@ -506,7 +558,7 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail; /* Start the transfer */ - nixge_dma_write_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p); + nixge_dma_write_desc_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p); ++priv->tx_bd_tail; priv->tx_bd_tail %= TX_BD_NUM; @@ -537,7 +589,7 @@ static int nixge_recv(struct net_device *ndev, int budget) struct nixge_priv *priv = netdev_priv(ndev); struct sk_buff *skb, *new_skb; struct nixge_hw_dma_bd *cur_p; - dma_addr_t tail_p = 0; + dma_addr_t tail_p = 0, cur_phys = 0; u32 packets = 0; u32 length = 0; u32 size = 0; @@ -549,13 +601,15 @@ static int nixge_recv(struct net_device *ndev, int budget) tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) * priv->rx_bd_ci; - skb = (struct sk_buff *)(cur_p->sw_id_offset); + skb = (struct sk_buff *)(uintptr_t) + nixge_hw_dma_bd_get_addr(cur_p, sw_id_offset); length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; if (length > NIXGE_MAX_JUMBO_FRAME_SIZE) length = NIXGE_MAX_JUMBO_FRAME_SIZE; - dma_unmap_single(ndev->dev.parent, cur_p->phys, + dma_unmap_single(ndev->dev.parent, + nixge_hw_dma_bd_get_addr(cur_p, phys), NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); @@ -579,16 +633,17 @@ static int nixge_recv(struct net_device *ndev, int budget) if (!new_skb) return packets; - cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, - NIXGE_MAX_JUMBO_FRAME_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) { + cur_phys = dma_map_single(ndev->dev.parent, new_skb->data, + NIXGE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(ndev->dev.parent, cur_phys)) { /* FIXME: bail out and clean up */ netdev_err(ndev, "Failed to map ...\n"); } + nixge_hw_dma_bd_set_phys(cur_p, cur_phys); cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE; cur_p->status = 0; - cur_p->sw_id_offset = (u32)new_skb; + nixge_hw_dma_bd_set_offset(cur_p, (uintptr_t)new_skb); ++priv->rx_bd_ci; priv->rx_bd_ci %= RX_BD_NUM; @@ -599,7 +654,7 @@ static int nixge_recv(struct net_device *ndev, int budget) ndev->stats.rx_bytes += size; if (tail_p) - nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p); + nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p); return packets; } @@ -637,6 +692,7 @@ static irqreturn_t nixge_tx_irq(int irq, void *_ndev) struct nixge_priv *priv = netdev_priv(_ndev); struct net_device *ndev = _ndev; unsigned int status; + dma_addr_t phys; u32 cr; status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET); @@ -650,9 +706,11 @@ static irqreturn_t nixge_tx_irq(int irq, void *_ndev) return IRQ_NONE; } if (status & XAXIDMA_IRQ_ERROR_MASK) { + phys = nixge_hw_dma_bd_get_addr(&priv->tx_bd_v[priv->tx_bd_ci], + phys); + netdev_err(ndev, "DMA Tx error 0x%x\n", status); - netdev_err(ndev, "Current BD is at: 0x%x\n", - (priv->tx_bd_v[priv->tx_bd_ci]).phys); + netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys); cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); /* Disable coalesce, delay timer and error interrupts */ @@ -678,6 +736,7 @@ static irqreturn_t nixge_rx_irq(int irq, void *_ndev) struct nixge_priv *priv = netdev_priv(_ndev); struct net_device *ndev = _ndev; unsigned int status; + dma_addr_t phys; u32 cr; status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET); @@ -697,9 +756,10 @@ static irqreturn_t nixge_rx_irq(int irq, void *_ndev) return IRQ_NONE; } if (status & XAXIDMA_IRQ_ERROR_MASK) { + phys = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[priv->rx_bd_ci], + phys); netdev_err(ndev, "DMA Rx error 0x%x\n", status); - netdev_err(ndev, "Current BD is at: 0x%x\n", - (priv->rx_bd_v[priv->rx_bd_ci]).phys); + netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys); cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); /* Disable coalesce, delay timer and error interrupts */ @@ -735,10 +795,10 @@ static void nixge_dma_err_handler(unsigned long data) tx_skb = &lp->tx_skb[i]; nixge_tx_skb_unmap(lp, tx_skb); - cur_p->phys = 0; + nixge_hw_dma_bd_set_phys(cur_p, 0); cur_p->cntrl = 0; cur_p->status = 0; - cur_p->sw_id_offset = 0; + nixge_hw_dma_bd_set_offset(cur_p, 0); } for (i = 0; i < RX_BD_NUM; i++) { @@ -779,18 +839,18 @@ static void nixge_dma_err_handler(unsigned long data) /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception. */ - nixge_dma_write_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); + nixge_dma_write_desc_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET); nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); - nixge_dma_write_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + + nixge_dma_write_desc_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the * tail pointer register that the Tx channel will start transmitting */ - nixge_dma_write_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); + nixge_dma_write_desc_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET); nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 08381ef8bdb4..8b23d2848457 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -797,8 +797,7 @@ static int lpc_mii_probe(struct net_device *ndev) return PTR_ERR(phydev); } - /* mask with MAC supported features */ - phydev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phydev, SPEED_100); phydev->advertising = phydev->supported; diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index a60e1c8d470a..5f0962d353ce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -623,6 +623,7 @@ struct qed_hwfn { void *unzip_buf; struct dbg_tools_data dbg_info; + void *dbg_user_info; /* PWM region specific data */ u16 wid_count; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index f1977aa440e5..dc1c1b616084 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -40,7 +40,6 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/string.h> -#include <linux/bitops.h> #include "qed.h" #include "qed_cxt.h" #include "qed_dev_api.h" diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index f5459de6d60a..8e8fa823d611 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -262,8 +262,9 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, *type = DCBX_PROTOCOL_ROCE_V2; } else { *type = DCBX_MAX_PROTOCOL_TYPE; - DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n", - app_prio_bitmap); + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "No action required, App TLV entry = 0x%x\n", + app_prio_bitmap); return false; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 1aa9fc1c5890..78a638ec7c0a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -3454,7 +3454,8 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn, addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STORM_REG_FILE) + IOR_SET_OFFSET(set_id); - buf[strlen(buf) - 1] = '0' + set_id; + if (strlen(buf) > 0) + buf[strlen(buf) - 1] = '0' + set_id; offset += qed_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, @@ -5563,35 +5564,6 @@ struct block_info { enum block_id id; }; -struct mcp_trace_format { - u32 data; -#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff -#define MCP_TRACE_FORMAT_MODULE_SHIFT 0 -#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 -#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16 -#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 -#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18 -#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 -#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20 -#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 -#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22 -#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 -#define MCP_TRACE_FORMAT_LEN_SHIFT 24 - - char *format_str; -}; - -/* Meta data structure, generated by a perl script during MFW build. therefore, - * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl - * script. - */ -struct mcp_trace_meta { - u32 modules_num; - char **modules; - u32 formats_num; - struct mcp_trace_format *formats; -}; - /* REG fifo element */ struct reg_fifo_element { u64 data; @@ -5714,6 +5686,20 @@ struct igu_fifo_addr_data { enum igu_fifo_addr_types type; }; +struct mcp_trace_meta { + u32 modules_num; + char **modules; + u32 formats_num; + struct mcp_trace_format *formats; + bool is_allocated; +}; + +/* Debug Tools user data */ +struct dbg_tools_user_data { + struct mcp_trace_meta mcp_trace_meta; + const u32 *mcp_trace_user_meta_buf; +}; + /******************************** Constants **********************************/ #define MAX_MSG_LEN 1024 @@ -6137,15 +6123,6 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = { /******************************** Variables **********************************/ -/* MCP Trace meta data array - used in case the dump doesn't contain the - * meta data (e.g. due to no NVRAM access). - */ -static struct user_dbg_array s_mcp_trace_meta_arr = { NULL, 0 }; - -/* Parsed MCP Trace meta data info, based on MCP trace meta array */ -static struct mcp_trace_meta s_mcp_trace_meta; -static bool s_mcp_trace_meta_valid; - /* Temporary buffer, used for print size calculations */ static char s_temp_buf[MAX_MSG_LEN]; @@ -6311,6 +6288,12 @@ static u32 qed_print_section_params(u32 *dump_buf, return dump_offset; } +static struct dbg_tools_user_data * +qed_dbg_get_user_data(struct qed_hwfn *p_hwfn) +{ + return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info; +} + /* Parses the idle check rules and returns the number of characters printed. * In case of parsing error, returns 0. */ @@ -6570,43 +6553,26 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf, return DBG_STATUS_OK; } -/* Frees the specified MCP Trace meta data */ -static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn, - struct mcp_trace_meta *meta) -{ - u32 i; - - s_mcp_trace_meta_valid = false; - - /* Release modules */ - if (meta->modules) { - for (i = 0; i < meta->modules_num; i++) - kfree(meta->modules[i]); - kfree(meta->modules); - } - - /* Release formats */ - if (meta->formats) { - for (i = 0; i < meta->formats_num; i++) - kfree(meta->formats[i].format_str); - kfree(meta->formats); - } -} - /* Allocates and fills MCP Trace meta data based on the specified meta data * dump buffer. * Returns debug status code. */ -static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, - const u32 *meta_buf, - struct mcp_trace_meta *meta) +static enum dbg_status +qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf) { - u8 *meta_buf_bytes = (u8 *)meta_buf; + struct dbg_tools_user_data *dev_user_data; u32 offset = 0, signature, i; + struct mcp_trace_meta *meta; + u8 *meta_buf_bytes; + + dev_user_data = qed_dbg_get_user_data(p_hwfn); + meta = &dev_user_data->mcp_trace_meta; + meta_buf_bytes = (u8 *)meta_buf; /* Free the previous meta before loading a new one. */ - if (s_mcp_trace_meta_valid) - qed_mcp_trace_free_meta(p_hwfn, meta); + if (meta->is_allocated) + qed_mcp_trace_free_meta_data(p_hwfn); memset(meta, 0, sizeof(*meta)); @@ -6674,7 +6640,7 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, format_len, format_ptr->format_str); } - s_mcp_trace_meta_valid = true; + meta->is_allocated = true; return DBG_STATUS_OK; } @@ -6687,21 +6653,26 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, * buffer. * data_size - size in bytes of data to parse. * parsed_buf - destination buffer for parsed data. - * parsed_bytes - size of parsed data in bytes. + * parsed_results_bytes - size of parsed data in bytes. */ -static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, +static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn, + u8 *trace_buf, u32 trace_buf_size, u32 data_offset, u32 data_size, char *parsed_buf, - u32 *parsed_bytes) + u32 *parsed_results_bytes) { + struct dbg_tools_user_data *dev_user_data; + struct mcp_trace_meta *meta; u32 param_mask, param_shift; enum dbg_status status; - *parsed_bytes = 0; + dev_user_data = qed_dbg_get_user_data(p_hwfn); + meta = &dev_user_data->mcp_trace_meta; + *parsed_results_bytes = 0; - if (!s_mcp_trace_meta_valid) + if (!meta->is_allocated) return DBG_STATUS_MCP_TRACE_BAD_DATA; status = DBG_STATUS_OK; @@ -6723,7 +6694,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, format_idx = header & MFW_TRACE_EVENTID_MASK; /* Skip message if its index doesn't exist in the meta data */ - if (format_idx >= s_mcp_trace_meta.formats_num) { + if (format_idx >= meta->formats_num) { u8 format_size = (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >> MFW_TRACE_PRM_SIZE_SHIFT); @@ -6738,7 +6709,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, continue; } - format_ptr = &s_mcp_trace_meta.formats[format_idx]; + format_ptr = &meta->formats[format_idx]; for (i = 0, param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, @@ -6783,19 +6754,20 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, return DBG_STATUS_MCP_TRACE_BAD_DATA; /* Print current message to results buffer */ - *parsed_bytes += - sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes), + *parsed_results_bytes += + sprintf(qed_get_buf_ptr(parsed_buf, + *parsed_results_bytes), "%s %-8s: ", s_mcp_trace_level_str[format_level], - s_mcp_trace_meta.modules[format_module]); - *parsed_bytes += - sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes), + meta->modules[format_module]); + *parsed_results_bytes += + sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes), format_ptr->format_str, params[0], params[1], params[2]); } /* Add string NULL terminator */ - (*parsed_bytes)++; + (*parsed_results_bytes)++; return status; } @@ -6803,24 +6775,25 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, /* Parses an MCP Trace dump buffer. * If result_buf is not NULL, the MCP Trace results are printed to it. * In any case, the required results buffer size is assigned to - * parsed_bytes. + * parsed_results_bytes. * The parsing status is returned. */ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, - char *parsed_buf, - u32 *parsed_bytes) + char *results_buf, + u32 *parsed_results_bytes, + bool free_meta_data) { const char *section_name, *param_name, *param_str_val; u32 data_size, trace_data_dwords, trace_meta_dwords; - u32 offset, results_offset, parsed_buf_bytes; + u32 offset, results_offset, results_buf_bytes; u32 param_num_val, num_section_params; struct mcp_trace *trace; enum dbg_status status; const u32 *meta_buf; u8 *trace_buf; - *parsed_bytes = 0; + *parsed_results_bytes = 0; /* Read global_params section */ dump_buf += qed_read_section_hdr(dump_buf, @@ -6831,7 +6804,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* Print global params */ dump_buf += qed_print_section_params(dump_buf, num_section_params, - parsed_buf, &results_offset); + results_buf, &results_offset); /* Read trace_data section */ dump_buf += qed_read_section_hdr(dump_buf, @@ -6846,6 +6819,9 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* Prepare trace info */ trace = (struct mcp_trace *)dump_buf; + if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + trace_buf = (u8 *)dump_buf + sizeof(*trace); offset = trace->trace_oldest; data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size); @@ -6865,31 +6841,39 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* Choose meta data buffer */ if (!trace_meta_dwords) { /* Dump doesn't include meta data */ - if (!s_mcp_trace_meta_arr.ptr) + struct dbg_tools_user_data *dev_user_data = + qed_dbg_get_user_data(p_hwfn); + + if (!dev_user_data->mcp_trace_user_meta_buf) return DBG_STATUS_MCP_TRACE_NO_META; - meta_buf = s_mcp_trace_meta_arr.ptr; + + meta_buf = dev_user_data->mcp_trace_user_meta_buf; } else { /* Dump includes meta data */ meta_buf = dump_buf; } /* Allocate meta data memory */ - status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &s_mcp_trace_meta); + status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf); if (status != DBG_STATUS_OK) return status; - status = qed_parse_mcp_trace_buf(trace_buf, + status = qed_parse_mcp_trace_buf(p_hwfn, + trace_buf, trace->size, offset, data_size, - parsed_buf ? - parsed_buf + results_offset : + results_buf ? + results_buf + results_offset : NULL, - &parsed_buf_bytes); + &results_buf_bytes); if (status != DBG_STATUS_OK) return status; - *parsed_bytes = results_offset + parsed_buf_bytes; + if (free_meta_data) + qed_mcp_trace_free_meta_data(p_hwfn); + + *parsed_results_bytes = results_offset + results_buf_bytes; return DBG_STATUS_OK; } @@ -7361,6 +7345,16 @@ enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr) return DBG_STATUS_OK; } +enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn) +{ + p_hwfn->dbg_user_info = kzalloc(sizeof(struct dbg_tools_user_data), + GFP_KERNEL); + if (!p_hwfn->dbg_user_info) + return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; + + return DBG_STATUS_OK; +} + const char *qed_dbg_get_status_str(enum dbg_status status) { return (status < @@ -7397,10 +7391,13 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, num_errors, num_warnings); } -void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size) +void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf) { - s_mcp_trace_meta_arr.ptr = data; - s_mcp_trace_meta_arr.size_in_dwords = size; + struct dbg_tools_user_data *dev_user_data = + qed_dbg_get_user_data(p_hwfn); + + dev_user_data->mcp_trace_user_meta_buf = meta_buf; } enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, @@ -7409,7 +7406,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, u32 *results_buf_size) { return qed_parse_mcp_trace_dump(p_hwfn, - dump_buf, NULL, results_buf_size); + dump_buf, NULL, results_buf_size, true); } enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, @@ -7421,20 +7418,61 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, - results_buf, &parsed_buf_size); + results_buf, &parsed_buf_size, true); +} + +enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf, + &parsed_buf_size, false); } -enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf, +enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, + u8 *dump_buf, u32 num_dumped_bytes, char *results_buf) { - u32 parsed_bytes; + u32 parsed_results_bytes; - return qed_parse_mcp_trace_buf(dump_buf, + return qed_parse_mcp_trace_buf(p_hwfn, + dump_buf, num_dumped_bytes, 0, num_dumped_bytes, - results_buf, &parsed_bytes); + results_buf, &parsed_results_bytes); +} + +/* Frees the specified MCP Trace meta data */ +void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn) +{ + struct dbg_tools_user_data *dev_user_data; + struct mcp_trace_meta *meta; + u32 i; + + dev_user_data = qed_dbg_get_user_data(p_hwfn); + meta = &dev_user_data->mcp_trace_meta; + if (!meta->is_allocated) + return; + + /* Release modules */ + if (meta->modules) { + for (i = 0; i < meta->modules_num; i++) + kfree(meta->modules[i]); + kfree(meta->modules); + } + + /* Release formats */ + if (meta->formats) { + for (i = 0; i < meta->formats_num; i++) + kfree(meta->formats[i].format_str); + kfree(meta->formats); + } + + meta->is_allocated = false; } enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 97f073fd3725..7ceb2b97538d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -144,6 +144,12 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn) qm_info->wfq_data = NULL; } +static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->dbg_user_info); + p_hwfn->dbg_user_info = NULL; +} + void qed_resc_free(struct qed_dev *cdev) { int i; @@ -183,6 +189,7 @@ void qed_resc_free(struct qed_dev *cdev) qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); + qed_dbg_user_data_free(p_hwfn); } } @@ -1083,6 +1090,10 @@ int qed_resc_alloc(struct qed_dev *cdev) rc = qed_dcbx_info_alloc(p_hwfn); if (rc) goto alloc_err; + + rc = qed_dbg_alloc_user_data(p_hwfn); + if (rc) + goto alloc_err; } cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); @@ -2668,6 +2679,9 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: link->speed.forced_speed = 10000; break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_20G: + link->speed.forced_speed = 20000; + break; case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: link->speed.forced_speed = 25000; break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index a71382687ef2..f2dfc7a976c0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -274,7 +274,8 @@ struct core_rx_start_ramrod_data { u8 mf_si_mcast_accept_all; struct core_rx_action_on_error action_on_error; u8 gsi_offload_flag; - u8 reserved[6]; + u8 wipe_inner_vlan_pri_en; + u8 reserved[5]; }; /* Ramrod data for rx queue stop ramrod */ @@ -351,7 +352,8 @@ struct core_tx_start_ramrod_data { __le16 pbl_size; __le16 qm_pq_id; u8 gsi_offload_flag; - u8 resrved[3]; + u8 vport_id; + u8 resrved[2]; }; /* Ramrod data for tx queue stop ramrod */ @@ -914,6 +916,16 @@ struct eth_rx_rate_limit { __le16 reserved1; }; +/* Update RSS indirection table entry command */ +struct eth_tstorm_rss_update_data { + u8 valid; + u8 vport_id; + u8 ind_table_index; + u8 reserved; + __le16 ind_table_value; + __le16 reserved1; +}; + struct eth_ustorm_per_pf_stat { struct regpair rcv_lb_ucast_bytes; struct regpair rcv_lb_mcast_bytes; @@ -1241,6 +1253,10 @@ struct rl_update_ramrod_data { u8 rl_id_first; u8 rl_id_last; u8 rl_dc_qcn_flg; + u8 dcqcn_reset_alpha_on_idle; + u8 rl_bc_stage_th; + u8 rl_timer_stage_th; + u8 reserved1; __le32 rl_bc_rate; __le16 rl_max_rate; __le16 rl_r_ai; @@ -1249,7 +1265,7 @@ struct rl_update_ramrod_data { __le32 dcqcn_k_us; __le32 dcqcn_timeuot_us; __le32 qcn_timeuot_us; - __le32 reserved[2]; + __le32 reserved2; }; /* Slowpath Element (SPQE) */ @@ -3322,6 +3338,25 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, struct dbg_attn_block_result *results); +/******************************* Data Types **********************************/ + +struct mcp_trace_format { + u32 data; +#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff +#define MCP_TRACE_FORMAT_MODULE_SHIFT 0 +#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 +#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16 +#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 +#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18 +#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 +#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20 +#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 +#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22 +#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 +#define MCP_TRACE_FORMAT_LEN_SHIFT 24 + char *format_str; +}; + /******************************** Constants **********************************/ #define MAX_NAME_LEN 16 @@ -3337,6 +3372,13 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr); /** + * @brief qed_dbg_alloc_user_data - Allocates user debug data. + * + * @param p_hwfn - HW device data + */ +enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn); + +/** * @brief qed_dbg_get_status_str - Returns a string for the specified status. * * @param status - a debug status code. @@ -3381,8 +3423,7 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, u32 *num_warnings); /** - * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace - * meta data. + * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data. * * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to * no NVRAM access). @@ -3390,7 +3431,8 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, * @param data - pointer to MCP Trace meta data * @param size - size of MCP Trace meta data in dwords */ -void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size); +void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf); /** * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size @@ -3425,19 +3467,45 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, char *results_buf); /** + * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and + * keeps the MCP trace meta data allocated, to support continuous MCP Trace + * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should + * be called to free the meta data. + * + * @param p_hwfn - HW device data + * @param dump_buf - mcp trace dump buffer, starting from the header. + * @param results_buf - buffer for printing the mcp trace results. + * + * @return error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + char *results_buf); + +/** * @brief print_mcp_trace_line - Prints MCP Trace results for a single line * + * @param p_hwfn - HW device data * @param dump_buf - mcp trace dump buffer, starting from the header. * @param num_dumped_bytes - number of bytes that were dumped. * @param results_buf - buffer for printing the mcp trace results. * * @return error if the parsing fails, ok otherwise. */ -enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf, +enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, + u8 *dump_buf, u32 num_dumped_bytes, char *results_buf); /** + * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data. + * Should be called after continuous MCP Trace parsing. + * + * @param p_hwfn - HW device data + */ +void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn); + +/** * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size * for reg_fifo results (in bytes). * @@ -4303,154 +4371,161 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, (IRO[29].base + ((pf_id) * IRO[29].m1)) #define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size) +/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. + * Use eth_tstorm_rss_update_data for update. + */ +#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \ + (IRO[30].base + ((pf_id) * IRO[30].m1)) +#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[30].size) + /* Xstorm queue zone */ #define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[30].base + ((queue_id) * IRO[30].m1)) -#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size) + (IRO[31].base + ((queue_id) * IRO[31].m1)) +#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[31].size) /* Ystorm cqe producer */ #define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[31].base + ((rss_id) * IRO[31].m1)) -#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size) + (IRO[32].base + ((rss_id) * IRO[32].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[32].size) /* Ustorm cqe producer */ #define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[32].base + ((rss_id) * IRO[32].m1)) -#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size) + (IRO[33].base + ((rss_id) * IRO[33].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[33].size) /* Ustorm grq producer */ #define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ - (IRO[33].base + ((pf_id) * IRO[33].m1)) -#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size) + (IRO[34].base + ((pf_id) * IRO[34].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[34].size) /* Tstorm cmdq-cons of given command queue-id */ #define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ - (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1)) -#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size) + (IRO[35].base + ((cmdq_queue_id) * IRO[35].m1)) +#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[35].size) /* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, * BDqueue-id. */ #define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2)) -#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size) + (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) /* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ #define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) -#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) + (IRO[37].base + ((func_id) * IRO[37].m1) + ((bdq_id) * IRO[37].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[37].size) /* Tstorm iSCSI RX stats */ #define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[37].base + ((pf_id) * IRO[37].m1)) -#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size) + (IRO[38].base + ((pf_id) * IRO[38].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) /* Mstorm iSCSI RX stats */ #define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[38].base + ((pf_id) * IRO[38].m1)) -#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) + (IRO[39].base + ((pf_id) * IRO[39].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) /* Ustorm iSCSI RX stats */ #define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[39].base + ((pf_id) * IRO[39].m1)) -#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) + (IRO[40].base + ((pf_id) * IRO[40].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[40].size) /* Xstorm iSCSI TX stats */ #define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[40].base + ((pf_id) * IRO[40].m1)) -#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size) + (IRO[41].base + ((pf_id) * IRO[41].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) /* Ystorm iSCSI TX stats */ #define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[41].base + ((pf_id) * IRO[41].m1)) -#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) + (IRO[42].base + ((pf_id) * IRO[42].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) /* Pstorm iSCSI TX stats */ #define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[42].base + ((pf_id) * IRO[42].m1)) -#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) + (IRO[43].base + ((pf_id) * IRO[43].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[43].size) /* Tstorm FCoE RX stats */ #define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[43].base + ((pf_id) * IRO[43].m1)) -#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size) + (IRO[44].base + ((pf_id) * IRO[44].m1)) +#define TSTORM_FCOE_RX_STATS_SIZE (IRO[44].size) /* Pstorm FCoE TX stats */ #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ - (IRO[44].base + ((pf_id) * IRO[44].m1)) -#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size) + (IRO[45].base + ((pf_id) * IRO[45].m1)) +#define PSTORM_FCOE_TX_STATS_SIZE (IRO[45].size) /* Pstorm RDMA queue statistics */ #define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1)) -#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size) + (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) +#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) /* Tstorm RDMA queue statistics */ #define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) -#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) + (IRO[47].base + ((rdma_stat_counter_id) * IRO[47].m1)) +#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[47].size) /* Xstorm error level for assert */ #define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[47].base + ((pf_id) * IRO[47].m1)) -#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[47].size) + (IRO[48].base + ((pf_id) * IRO[48].m1)) +#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size) /* Ystorm error level for assert */ #define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[48].base + ((pf_id) * IRO[48].m1)) -#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size) + (IRO[49].base + ((pf_id) * IRO[49].m1)) +#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size) /* Pstorm error level for assert */ #define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[49].base + ((pf_id) * IRO[49].m1)) -#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size) + (IRO[50].base + ((pf_id) * IRO[50].m1)) +#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size) /* Tstorm error level for assert */ #define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[50].base + ((pf_id) * IRO[50].m1)) -#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size) + (IRO[51].base + ((pf_id) * IRO[51].m1)) +#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size) /* Mstorm error level for assert */ #define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[51].base + ((pf_id) * IRO[51].m1)) -#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size) + (IRO[52].base + ((pf_id) * IRO[52].m1)) +#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size) /* Ustorm error level for assert */ #define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[52].base + ((pf_id) * IRO[52].m1)) -#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size) + (IRO[53].base + ((pf_id) * IRO[53].m1)) +#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[53].size) /* Xstorm iWARP rxmit stats */ #define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ - (IRO[53].base + ((pf_id) * IRO[53].m1)) -#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[53].size) + (IRO[54].base + ((pf_id) * IRO[54].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[54].size) /* Tstorm RoCE Event Statistics */ #define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ - (IRO[54].base + ((roce_pf_id) * IRO[54].m1)) -#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[54].size) + (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[55].size) /* DCQCN Received Statistics */ #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ - (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[55].size) + (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[56].size) /* RoCE Error Statistics */ #define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ - (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) -#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[56].size) + (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) +#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[57].size) /* DCQCN Sent Statistics */ #define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ - (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) -#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[57].size) + (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[58].size) /* RoCE CQEs Statistics */ #define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ - (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) -#define USTORM_ROCE_CQE_STATS_SIZE (IRO[58].size) + (IRO[59].base + ((roce_pf_id) * IRO[59].m1)) +#define USTORM_ROCE_CQE_STATS_SIZE (IRO[59].size) -static const struct iro iro_arr[59] = { +static const struct iro iro_arr[60] = { {0x0, 0x0, 0x0, 0x0, 0x8}, {0x4cb8, 0x88, 0x0, 0x0, 0x88}, {0x6530, 0x20, 0x0, 0x0, 0x20}, @@ -4461,14 +4536,14 @@ static const struct iro iro_arr[59] = { {0x84, 0x8, 0x0, 0x0, 0x2}, {0x4c48, 0x0, 0x0, 0x0, 0x78}, {0x3e38, 0x0, 0x0, 0x0, 0x78}, - {0x2b78, 0x0, 0x0, 0x0, 0x78}, + {0x3ef8, 0x0, 0x0, 0x0, 0x78}, {0x4c40, 0x0, 0x0, 0x0, 0x78}, {0x4998, 0x0, 0x0, 0x0, 0x78}, {0x7f50, 0x0, 0x0, 0x0, 0x78}, {0xa28, 0x8, 0x0, 0x0, 0x8}, {0x6210, 0x10, 0x0, 0x0, 0x10}, {0xb820, 0x30, 0x0, 0x0, 0x30}, - {0x96c0, 0x30, 0x0, 0x0, 0x30}, + {0xa990, 0x30, 0x0, 0x0, 0x30}, {0x4b68, 0x80, 0x0, 0x0, 0x40}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0x53a8, 0x80, 0x4, 0x0, 0x4}, @@ -4476,11 +4551,12 @@ static const struct iro iro_arr[59] = { {0x4ba8, 0x80, 0x0, 0x0, 0x20}, {0x8158, 0x40, 0x0, 0x0, 0x30}, {0xe770, 0x60, 0x0, 0x0, 0x60}, - {0x2d10, 0x80, 0x0, 0x0, 0x38}, - {0xf2b8, 0x78, 0x0, 0x0, 0x78}, + {0x4090, 0x80, 0x0, 0x0, 0x38}, + {0xfea8, 0x78, 0x0, 0x0, 0x78}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0xaf20, 0x0, 0x0, 0x0, 0xf0}, {0xb010, 0x8, 0x0, 0x0, 0x8}, + {0xc00, 0x8, 0x0, 0x0, 0x8}, {0x1f8, 0x8, 0x0, 0x0, 0x8}, {0xac0, 0x8, 0x0, 0x0, 0x8}, {0x2578, 0x8, 0x0, 0x0, 0x8}, @@ -4492,23 +4568,23 @@ static const struct iro iro_arr[59] = { {0x12908, 0x18, 0x0, 0x0, 0x10}, {0x11aa8, 0x40, 0x0, 0x0, 0x18}, {0xa588, 0x50, 0x0, 0x0, 0x20}, - {0x8700, 0x40, 0x0, 0x0, 0x28}, - {0x10300, 0x18, 0x0, 0x0, 0x10}, + {0x8f00, 0x40, 0x0, 0x0, 0x28}, + {0x10e30, 0x18, 0x0, 0x0, 0x10}, {0xde48, 0x48, 0x0, 0x0, 0x38}, - {0x10768, 0x20, 0x0, 0x0, 0x20}, - {0x2d48, 0x80, 0x0, 0x0, 0x10}, + {0x11298, 0x20, 0x0, 0x0, 0x20}, + {0x40c8, 0x80, 0x0, 0x0, 0x10}, {0x5048, 0x10, 0x0, 0x0, 0x10}, {0xc748, 0x8, 0x0, 0x0, 0x1}, - {0xa128, 0x8, 0x0, 0x0, 0x1}, - {0x10f00, 0x8, 0x0, 0x0, 0x1}, + {0xa928, 0x8, 0x0, 0x0, 0x1}, + {0x11a30, 0x8, 0x0, 0x0, 0x1}, {0xf030, 0x8, 0x0, 0x0, 0x1}, {0x13028, 0x8, 0x0, 0x0, 0x1}, {0x12c58, 0x8, 0x0, 0x0, 0x1}, {0xc9b8, 0x30, 0x0, 0x0, 0x10}, {0xed90, 0x28, 0x0, 0x0, 0x28}, - {0xa520, 0x18, 0x0, 0x0, 0x18}, - {0xa6a0, 0x8, 0x0, 0x0, 0x8}, - {0x13108, 0x8, 0x0, 0x0, 0x8}, + {0xad20, 0x18, 0x0, 0x0, 0x18}, + {0xaea0, 0x8, 0x0, 0x0, 0x8}, + {0x13c38, 0x8, 0x0, 0x0, 0x8}, {0x13c50, 0x18, 0x0, 0x0, 0x18}, }; @@ -5661,6 +5737,14 @@ enum eth_filter_type { MAX_ETH_FILTER_TYPE }; +/* inner to inner vlan priority translation configurations */ +struct eth_in_to_in_pri_map_cfg { + u8 inner_vlan_pri_remap_en; + u8 reserved[7]; + u8 non_rdma_in_to_in_pri_map[8]; + u8 rdma_in_to_in_pri_map[8]; +}; + /* Eth IPv4 Fragment Type */ enum eth_ipv4_frag_type { ETH_IPV4_NOT_FRAG, @@ -6018,6 +6102,14 @@ struct tx_queue_update_ramrod_data { struct regpair reserved1[5]; }; +/* Inner to Inner VLAN priority map update mode */ +enum update_in_to_in_pri_map_mode_enum { + ETH_IN_TO_IN_PRI_MAP_UPDATE_DISABLED, + ETH_IN_TO_IN_PRI_MAP_UPDATE_NON_RDMA_TBL, + ETH_IN_TO_IN_PRI_MAP_UPDATE_RDMA_TBL, + MAX_UPDATE_IN_TO_IN_PRI_MAP_MODE_ENUM +}; + /* Ramrod data for vport update ramrod */ struct vport_filter_update_ramrod_data { struct eth_filter_cmd_header filter_cmd_hdr; @@ -6048,7 +6140,8 @@ struct vport_start_ramrod_data { u8 zero_placement_offset; u8 ctl_frame_mac_check_en; u8 ctl_frame_ethtype_check_en; - u8 reserved[1]; + u8 wipe_inner_vlan_pri_en; + struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg; }; /* Ramrod data for vport stop ramrod */ @@ -6100,7 +6193,9 @@ struct vport_update_ramrod_data_cmn { u8 update_ctl_frame_checks_en_flg; u8 ctl_frame_mac_check_en; u8 ctl_frame_ethtype_check_en; - u8 reserved[15]; + u8 update_in_to_in_pri_map_mode; + u8 in_to_in_pri_map[8]; + u8 reserved[6]; }; struct vport_update_ramrod_mcast { @@ -6929,11 +7024,6 @@ struct mstorm_rdma_task_st_ctx { struct regpair temp[4]; }; -/* The roce task context of Ustorm */ -struct ustorm_rdma_task_st_ctx { - struct regpair temp[2]; -}; - struct e4_ustorm_rdma_task_ag_ctx { u8 reserved; u8 state; @@ -7007,8 +7097,6 @@ struct e4_rdma_task_context { struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context; struct mstorm_rdma_task_st_ctx mstorm_st_context; struct rdif_task_context rdif_context; - struct ustorm_rdma_task_st_ctx ustorm_st_context; - struct regpair ustorm_st_padding[2]; struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context; }; @@ -7388,7 +7476,7 @@ struct e4_ustorm_rdma_conn_ag_ctx { #define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 #define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; - u8 byte3; + u8 nvmf_only; __le16 conn_dpi; __le16 word1; __le32 cq_cons; @@ -7831,7 +7919,12 @@ struct roce_create_qp_req_ramrod_data { struct regpair qp_handle_for_cqe; struct regpair qp_handle_for_async; u8 stats_counter_id; - u8 reserved3[7]; + u8 reserved3[6]; + u8 flags2; +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x7F +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 1 __le16 regular_latency_phy_queue; __le16 dpi; }; @@ -7954,6 +8047,7 @@ enum roce_event_opcode { ROCE_EVENT_DESTROY_QP, ROCE_EVENT_CREATE_UD_QP, ROCE_EVENT_DESTROY_UD_QP, + ROCE_EVENT_FUNC_UPDATE, MAX_ROCE_EVENT_OPCODE }; @@ -7962,7 +8056,13 @@ struct roce_init_func_params { u8 ll2_queue_id; u8 cnp_vlan_priority; u8 cnp_dscp; - u8 reserved; + u8 flags; +#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1 +#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0 +#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1 +#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1 +#define ROCE_INIT_FUNC_PARAMS_RESERVED0_MASK 0x3F +#define ROCE_INIT_FUNC_PARAMS_RESERVED0_SHIFT 2 __le32 cnp_send_timeout; __le16 rl_offset; u8 rl_count_log; @@ -8109,9 +8209,24 @@ enum roce_ramrod_cmd_id { ROCE_RAMROD_DESTROY_QP, ROCE_RAMROD_CREATE_UD_QP, ROCE_RAMROD_DESTROY_UD_QP, + ROCE_RAMROD_FUNC_UPDATE, MAX_ROCE_RAMROD_CMD_ID }; +/* RoCE func init ramrod data */ +struct roce_update_func_params { + u8 cnp_vlan_priority; + u8 cnp_dscp; + __le16 flags; +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1 +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0 +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1 +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1 +#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_MASK 0x3FFF +#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_SHIFT 2 + __le32 cnp_send_timeout; +}; + struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { u8 reserved0; u8 state; @@ -12161,7 +12276,7 @@ struct public_func { #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 u32 status; -#define FUNC_STATUS_VLINK_DOWN 0x00000001 +#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001 u32 mac_upper; #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff @@ -12583,6 +12698,7 @@ struct public_drv_mb { #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 +#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 @@ -12635,6 +12751,7 @@ struct public_drv_mb { /* get MFW feature support response */ #define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 +#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0) @@ -13040,6 +13157,7 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 @@ -13050,6 +13168,7 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 #define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 #define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3 #define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 #define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 #define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index e860bdf0f752..beb8e5d6401a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -935,9 +935,8 @@ qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) } spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); - list_del(&ep->list_entry); - list_add_tail(&ep->list_entry, - &p_hwfn->p_rdma_info->iwarp.ep_free_list); + list_move_tail(&ep->list_entry, + &p_hwfn->p_rdma_info->iwarp.ep_free_list); spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); } @@ -2270,8 +2269,8 @@ static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn) if (rc == -EBUSY) break; - list_del(&mpa_buf->list_entry); - list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_list); + list_move_tail(&mpa_buf->list_entry, + &iwarp_info->mpa_buf_list); if (rc) { /* different error, don't continue */ DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 14ac9cab2653..aa633381aa47 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -63,8 +63,8 @@ #include "qed_sp.h" #include "qed_rdma.h" -#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred) -#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred) +#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered) +#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registered) #define QED_LL2_TX_SIZE (256) #define QED_LL2_RX_SIZE (4096) @@ -796,7 +796,18 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, tx_pkt.vlan = p_buffer->vlan; tx_pkt.bd_flags = bd_flags; tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w; - tx_pkt.tx_dest = p_ll2_conn->tx_dest; + switch (p_ll2_conn->tx_dest) { + case CORE_TX_DEST_NW: + tx_pkt.tx_dest = QED_LL2_TX_DEST_NW; + break; + case CORE_TX_DEST_LB: + tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; + break; + case CORE_TX_DEST_DROP: + default: + tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP; + break; + } tx_pkt.first_frag = first_frag; tx_pkt.first_frag_len = p_buffer->packet_length; tx_pkt.cookie = p_buffer; @@ -1404,7 +1415,7 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) &p_hwfn->p_ll2_info[i], &p_ll2_info->rx_queue.rx_sb_index, &p_ll2_info->rx_queue.p_fw_cons); - p_ll2_info->rx_queue.b_cb_registred = true; + p_ll2_info->rx_queue.b_cb_registered = true; } if (data->input.tx_num_desc) { @@ -1413,7 +1424,7 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) &p_hwfn->p_ll2_info[i], &p_ll2_info->tx_queue.tx_sb_index, &p_ll2_info->tx_queue.p_fw_cons); - p_ll2_info->tx_queue.b_cb_registred = true; + p_ll2_info->tx_queue.b_cb_registered = true; } *data->p_connection_handle = i; @@ -1929,7 +1940,7 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) /* Stop Tx & Rx of connection, if needed */ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { - p_ll2_conn->tx_queue.b_cb_registred = false; + p_ll2_conn->tx_queue.b_cb_registered = false; smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); if (rc) @@ -1940,7 +1951,7 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) } if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { - p_ll2_conn->rx_queue.b_cb_registred = false; + p_ll2_conn->rx_queue.b_cb_registered = false; smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); if (rc) diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index f65817012e97..1a5c1ae01474 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -79,7 +79,7 @@ struct qed_ll2_rx_queue { struct qed_chain rxq_chain; struct qed_chain rcq_chain; u8 rx_sb_index; - bool b_cb_registred; + bool b_cb_registered; __le16 *p_fw_cons; struct list_head active_descq; struct list_head free_descq; @@ -93,7 +93,7 @@ struct qed_ll2_tx_queue { spinlock_t lock; struct qed_chain txq_chain; u8 tx_sb_index; - bool b_cb_registred; + bool b_cb_registered; __le16 *p_fw_cons; struct list_head active_descq; struct list_head free_descq; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 2094d86a7a08..75d217aaf8ce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1337,6 +1337,9 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT) link_params->speed.advertised_speeds |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT) + link_params->speed.advertised_speeds |= + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT) link_params->speed.advertised_speeds |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; @@ -1503,6 +1506,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn, NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT; if (params.speed.advertised_speeds & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) + if_link->advertised_caps |= QED_LM_20000baseKR2_Full_BIT; + if (params.speed.advertised_speeds & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT; if (params.speed.advertised_speeds & @@ -1523,6 +1529,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn, NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT; if (link_caps.speed_capabilities & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) + if_link->supported_caps |= QED_LM_20000baseKR2_Full_BIT; + if (link_caps.speed_capabilities & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT; if (link_caps.speed_capabilities & @@ -1559,6 +1568,8 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if_link->lp_caps |= QED_LM_1000baseT_Full_BIT; if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G) + if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT; if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT; if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 58c7eb9d8e1b..b06e4cb72c6c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1247,6 +1247,52 @@ static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn, p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV; } +static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct public_func *p_data, int pfid) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_FUNC); + u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); + u32 func_addr; + u32 i, size; + + func_addr = SECTION_ADDR(mfw_path_offsize, pfid); + memset(p_data, 0, sizeof(*p_data)); + + size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize)); + for (i = 0; i < size / sizeof(u32); i++) + ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, + func_addr + (i << 2)); + return size; +} + +static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, + struct public_func *p_shmem_info) +{ + struct qed_mcp_function_info *p_info; + + p_info = &p_hwfn->mcp_info->func_info; + + p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config, + FUNC_MF_CFG_MIN_BW); + if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { + DP_INFO(p_hwfn, + "bandwidth minimum out of bounds [%02x]. Set to 1\n", + p_info->bandwidth_min); + p_info->bandwidth_min = 1; + } + + p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config, + FUNC_MF_CFG_MAX_BW); + if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { + DP_INFO(p_hwfn, + "bandwidth maximum out of bounds [%02x]. Set to 100\n", + p_info->bandwidth_max); + p_info->bandwidth_max = 100; + } +} + static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_reset) { @@ -1274,10 +1320,29 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, goto out; } - if (p_hwfn->b_drv_link_init) - p_link->link_up = !!(status & LINK_STATUS_LINK_UP); - else + if (p_hwfn->b_drv_link_init) { + /* Link indication with modern MFW arrives as per-PF + * indication. + */ + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { + struct public_func shmem_info; + + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + p_link->link_up = !!(shmem_info.status & + FUNC_STATUS_VIRTUAL_LINK_UP); + qed_read_pf_bandwidth(p_hwfn, &shmem_info); + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Virtual link_up = %d\n", p_link->link_up); + } else { + p_link->link_up = !!(status & LINK_STATUS_LINK_UP); + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Physical link_up = %d\n", p_link->link_up); + } + } else { p_link->link_up = false; + } p_link->full_duplex = true; switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { @@ -1504,53 +1569,6 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); } -static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, - struct public_func *p_shmem_info) -{ - struct qed_mcp_function_info *p_info; - - p_info = &p_hwfn->mcp_info->func_info; - - p_info->bandwidth_min = (p_shmem_info->config & - FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT; - if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { - DP_INFO(p_hwfn, - "bandwidth minimum out of bounds [%02x]. Set to 1\n", - p_info->bandwidth_min); - p_info->bandwidth_min = 1; - } - - p_info->bandwidth_max = (p_shmem_info->config & - FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT; - if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { - DP_INFO(p_hwfn, - "bandwidth maximum out of bounds [%02x]. Set to 100\n", - p_info->bandwidth_max); - p_info->bandwidth_max = 100; - } -} - -static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct public_func *p_data, int pfid) -{ - u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, - PUBLIC_FUNC); - u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); - u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); - u32 i, size; - - memset(p_data, 0, sizeof(*p_data)); - - size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize)); - for (i = 0; i < size / sizeof(u32); i++) - ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, - func_addr + (i << 2)); - return size; -} - static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_function_info *p_info; @@ -3351,7 +3369,8 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 mcp_resp, mcp_param, features; - features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE; + features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | + DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK; return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, features, &mcp_resp, &mcp_param); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c index 6172354b451c..ffac4ac87394 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -211,9 +211,8 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, if (!p_buffer) break; - list_del(&p_buffer->list_entry); - list_add_tail(&p_buffer->list_entry, - &p_ooo_info->free_buffers_list); + list_move_tail(&p_buffer->list_entry, + &p_ooo_info->free_buffers_list); } list_add_tail(&p_isle->list_entry, &p_ooo_info->free_isles_list); @@ -247,9 +246,8 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, if (!p_buffer) break; - list_del(&p_buffer->list_entry); - list_add_tail(&p_buffer->list_entry, - &p_ooo_info->free_buffers_list); + list_move_tail(&p_buffer->list_entry, + &p_ooo_info->free_buffers_list); } list_add_tail(&p_isle->list_entry, &p_ooo_info->free_isles_list); @@ -353,11 +351,9 @@ void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, u32 cid, u8 drop_isle, u8 drop_size) { - struct qed_ooo_archipelago *p_archipelago = NULL; struct qed_ooo_isle *p_isle = NULL; u8 isle_idx; - p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); for (isle_idx = 0; isle_idx < drop_size; isle_idx++) { p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, drop_isle); if (!p_isle) { @@ -462,7 +458,6 @@ void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn, void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, u32 cid, u8 left_isle) { - struct qed_ooo_archipelago *p_archipelago = NULL; struct qed_ooo_isle *p_right_isle = NULL; struct qed_ooo_isle *p_left_isle = NULL; @@ -475,7 +470,6 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, return; } - p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); list_del(&p_right_isle->list_entry); p_ooo_info->cur_isles_number--; if (left_isle) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 1673fc90027f..c4a6274dd625 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -730,8 +730,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, !list_empty(head)) { struct qed_spq_entry *p_ent = list_first_entry(head, struct qed_spq_entry, list); - list_del(&p_ent->list); - list_add_tail(&p_ent->list, &p_spq->completion_pending); + list_move_tail(&p_ent->list, &p_spq->completion_pending); p_spq->comp_sent_count++; rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent); diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 6a4d266fb8e2..de98a974673b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -440,7 +440,7 @@ struct qede_fastpath { struct qede_tx_queue *txq; struct qede_tx_queue *xdp_tx; -#define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) +#define VEC_NAME_SIZE (FIELD_SIZEOF(struct net_device, name) + 8) char name[VEC_NAME_SIZE]; }; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 19652cd27ca7..7ff50b4488f6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -420,6 +420,7 @@ static const struct qede_link_mode_mapping qed_lm_map[] = { {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT}, {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, + {QED_LM_20000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT}, {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, @@ -535,6 +536,14 @@ static int qede_set_link_ksettings(struct net_device *dev, } params.adv_speeds = QED_LM_10000baseKR_Full_BIT; break; + case SPEED_20000: + if (!(current_link.supported_caps & + QED_LM_20000baseKR2_Full_BIT)) { + DP_INFO(edev, "20G speed not supported\n"); + return -EINVAL; + } + params.adv_speeds = QED_LM_20000baseKR2_Full_BIT; + break; case SPEED_25000: if (!(current_link.supported_caps & QED_LM_25000baseKR_Full_BIT)) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 77e386ebff09..f7c2f32237cb 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -904,13 +904,11 @@ static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, u32 *pay, u32 size) { struct qlcnic_hardware_context *ahw = adapter->ahw; - u32 fw_mbx; u8 i, max = 2, hdr_size, j; hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); max = (size / sizeof(u32)) + hdr_size; - fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0)); for (i = 2, j = 0; j < hdr_size; i++, j++) *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i)); for (; j < max; i++, j++) @@ -936,7 +934,7 @@ static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf) static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type) { struct qlcnic_vf_info *vf = trans->vf; - u32 pay_size, hdr_size; + u32 pay_size; u32 *hdr, *pay; int ret; u8 pci_func = trans->func_id; @@ -947,14 +945,12 @@ static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type) if (type == QLC_BC_COMMAND) { hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag); pay = (u32 *)(trans->req_pay + trans->curr_req_frag); - hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, trans->curr_req_frag); pay_size = (pay_size / sizeof(u32)); } else { hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag); pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag); - hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, trans->curr_rsp_frag); pay_size = (pay_size / sizeof(u32)); diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c index 6c8543fb90c0..4292c89bd35c 100644 --- a/drivers/net/ethernet/qualcomm/qca_7k.c +++ b/drivers/net/ethernet/qualcomm/qca_7k.c @@ -81,8 +81,8 @@ qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result) return ret; } -int -qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) +static int +__qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) { __be16 tx_data[2]; struct spi_transfer transfer[2]; @@ -117,3 +117,33 @@ qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) return ret; } + +int +qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value, int retry) +{ + int ret, i = 0; + u16 confirmed; + + do { + ret = __qcaspi_write_register(qca, reg, value); + if (ret) + return ret; + + if (!retry) + return 0; + + ret = qcaspi_read_register(qca, reg, &confirmed); + if (ret) + return ret; + + ret = confirmed != value; + if (!ret) + return 0; + + i++; + qca->stats.write_verify_failed++; + + } while (i <= retry); + + return ret; +} diff --git a/drivers/net/ethernet/qualcomm/qca_7k.h b/drivers/net/ethernet/qualcomm/qca_7k.h index 27124c2bb77a..356de8ec5d48 100644 --- a/drivers/net/ethernet/qualcomm/qca_7k.h +++ b/drivers/net/ethernet/qualcomm/qca_7k.h @@ -66,6 +66,6 @@ void qcaspi_spi_error(struct qcaspi *qca); int qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result); -int qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value); +int qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value, int retry); #endif /* _QCA_7K_H */ diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index 51d89c86e60f..a9f1bc013364 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -60,6 +60,7 @@ static const char qcaspi_gstrings_stats[][ETH_GSTRING_LEN] = { "Write buffer misses", "Transmit ring full", "SPI errors", + "Write verify errors", }; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 66b775d462fd..d5310504f436 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -69,6 +69,12 @@ static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN; module_param(qcaspi_pluggable, int, 0); MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no)."); +#define QCASPI_WRITE_VERIFY_MIN 0 +#define QCASPI_WRITE_VERIFY_MAX 3 +static int wr_verify = QCASPI_WRITE_VERIFY_MIN; +module_param(wr_verify, int, 0); +MODULE_PARM_DESC(wr_verify, "SPI register write verify trails. Use 0-3."); + #define QCASPI_TX_TIMEOUT (1 * HZ) #define QCASPI_QCA7K_REBOOT_TIME_MS 1000 @@ -77,7 +83,7 @@ start_spi_intr_handling(struct qcaspi *qca, u16 *intr_cause) { *intr_cause = 0; - qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0); + qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify); qcaspi_read_register(qca, SPI_REG_INTR_CAUSE, intr_cause); netdev_dbg(qca->net_dev, "interrupts: 0x%04x\n", *intr_cause); } @@ -90,8 +96,8 @@ end_spi_intr_handling(struct qcaspi *qca, u16 intr_cause) SPI_INT_RDBUF_ERR | SPI_INT_WRBUF_ERR); - qcaspi_write_register(qca, SPI_REG_INTR_CAUSE, intr_cause); - qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, intr_enable); + qcaspi_write_register(qca, SPI_REG_INTR_CAUSE, intr_cause, 0); + qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, intr_enable, wr_verify); netdev_dbg(qca->net_dev, "acking int: 0x%04x\n", intr_cause); } @@ -239,7 +245,7 @@ qcaspi_tx_frame(struct qcaspi *qca, struct sk_buff *skb) len = skb->len; - qcaspi_write_register(qca, SPI_REG_BFR_SIZE, len); + qcaspi_write_register(qca, SPI_REG_BFR_SIZE, len, wr_verify); if (qca->legacy_mode) qcaspi_tx_cmd(qca, QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); @@ -345,6 +351,7 @@ qcaspi_receive(struct qcaspi *qca) /* Read the packet size. */ qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, &available); + netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n", available); @@ -353,7 +360,7 @@ qcaspi_receive(struct qcaspi *qca) return -1; } - qcaspi_write_register(qca, SPI_REG_BFR_SIZE, available); + qcaspi_write_register(qca, SPI_REG_BFR_SIZE, available, wr_verify); if (qca->legacy_mode) qcaspi_tx_cmd(qca, QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); @@ -524,7 +531,7 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event) netdev_dbg(qca->net_dev, "sync: resetting device.\n"); qcaspi_read_register(qca, SPI_REG_SPI_CONFIG, &spi_config); spi_config |= QCASPI_SLAVE_RESET_BIT; - qcaspi_write_register(qca, SPI_REG_SPI_CONFIG, spi_config); + qcaspi_write_register(qca, SPI_REG_SPI_CONFIG, spi_config, 0); qca->sync = QCASPI_SYNC_RESET; qca->stats.trig_reset++; @@ -684,7 +691,7 @@ qcaspi_netdev_close(struct net_device *dev) netif_stop_queue(dev); - qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0); + qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify); free_irq(qca->spi_dev->irq, qca); kthread_stop(qca->spi_thread); @@ -904,6 +911,13 @@ qca_spi_probe(struct spi_device *spi) return -EINVAL; } + if (wr_verify < QCASPI_WRITE_VERIFY_MIN || + wr_verify > QCASPI_WRITE_VERIFY_MAX) { + dev_err(&spi->dev, "Invalid write verify: %d\n", + wr_verify); + return -EINVAL; + } + dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n", QCASPI_DRV_VERSION, qcaspi_clkspeed, diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h index fc0e98726b36..2d2c49726492 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.h +++ b/drivers/net/ethernet/qualcomm/qca_spi.h @@ -73,6 +73,7 @@ struct qcaspi_stats { u64 write_buf_miss; u64 ring_full; u64 spi_err; + u64 write_verify_failed; }; struct qcaspi { diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index aa11b70b9ca4..04aa592f35c3 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -1024,16 +1024,8 @@ static int r6040_mii_probe(struct net_device *dev) return PTR_ERR(phydev); } - /* mask with MAC supported features */ - phydev->supported &= (SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_Autoneg - | SUPPORTED_MII - | SUPPORTED_TP); - - phydev->advertising = phydev->supported; + phy_set_max_speed(phydev, SPEED_100); + lp->old_link = 0; lp->old_duplex = -1; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 3a5e6160bf0d..b68e32186d67 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -77,8 +77,6 @@ static const int multicast_filter_limit = 32; #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) -#define RTL8169_TX_TIMEOUT (6*HZ) - /* write/read MMIO register */ #define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) #define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) @@ -1354,7 +1352,8 @@ static void rtl_irq_enable_all(struct rtl8169_private *tp) static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) { rtl_irq_disable(tp); - rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow); + rtl_ack_events(tp, 0xffff); + /* PCI commit */ RTL_R8(tp, ChipCmd); } @@ -4048,23 +4047,11 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) rtl_hw_phy_config(dev); if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { - netif_dbg(tp, drv, dev, - "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); - RTL_W8(tp, 0x82, 0x01); - } - - pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); - - if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); - - if (tp->mac_version == RTL_GIGA_MAC_VER_02) { netif_dbg(tp, drv, dev, "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(tp, 0x82, 0x01); - netif_dbg(tp, drv, dev, - "Set PHY Reg 0x0bh = 0x00h\n"); - rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 } /* We may have called phy_speed_down before */ @@ -7368,11 +7355,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->cp_cmd = RTL_R16(tp, CPlusCmd); - if ((sizeof(dma_addr_t) > 4) && - (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && - tp->mac_version >= RTL_GIGA_MAC_VER_18)) && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (sizeof(dma_addr_t) > 4 && (use_dac == 1 || (use_dac == -1 && + tp->mac_version >= RTL_GIGA_MAC_VER_18)) && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */ if (!pci_is_pcie(pdev)) @@ -7388,14 +7373,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_init_rxcfg(tp); - rtl_irq_disable(tp); + rtl8169_irq_mask_and_ack(tp); rtl_hw_initialize(tp); rtl_hw_reset(tp); - rtl_ack_events(tp, 0xffff); - pci_set_master(pdev); rtl_init_mdio_ops(tp); @@ -7435,7 +7418,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->dev_addr[i] = RTL_R8(tp, MAC0 + i); dev->ethtool_ops = &rtl8169_ethtool_ops; - dev->watchdog_timeo = RTL8169_TX_TIMEOUT; netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 9b6bf557a2f5..1c6e4df94f01 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -959,7 +959,10 @@ enum RAVB_QUEUE { #define RX_QUEUE_OFFSET 4 #define NUM_RX_QUEUE 2 #define NUM_TX_QUEUE 2 -#define NUM_TX_DESC 2 /* TX descriptors per packet */ + +/* TX descriptors per packet */ +#define NUM_TX_DESC_GEN2 2 +#define NUM_TX_DESC_GEN3 1 struct ravb_tstamp_skb { struct list_head list; @@ -1038,6 +1041,7 @@ struct ravb_private { unsigned no_avb_link:1; unsigned avb_link_active_low:1; unsigned wol_enabled:1; + int num_tx_desc; /* TX descriptors per packet */ }; static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index d6f753925352..defed0d0c51d 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -182,6 +182,7 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) { struct ravb_private *priv = netdev_priv(ndev); struct net_device_stats *stats = &priv->stats[q]; + int num_tx_desc = priv->num_tx_desc; struct ravb_tx_desc *desc; int free_num = 0; int entry; @@ -191,7 +192,7 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) bool txed; entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * - NUM_TX_DESC); + num_tx_desc); desc = &priv->tx_ring[q][entry]; txed = desc->die_dt == DT_FEMPTY; if (free_txed_only && !txed) @@ -200,12 +201,12 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) dma_rmb(); size = le16_to_cpu(desc->ds_tagl) & TX_DS; /* Free the original skb. */ - if (priv->tx_skb[q][entry / NUM_TX_DESC]) { + if (priv->tx_skb[q][entry / num_tx_desc]) { dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), size, DMA_TO_DEVICE); /* Last packet descriptor? */ - if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { - entry /= NUM_TX_DESC; + if (entry % num_tx_desc == num_tx_desc - 1) { + entry /= num_tx_desc; dev_kfree_skb_any(priv->tx_skb[q][entry]); priv->tx_skb[q][entry] = NULL; if (txed) @@ -224,6 +225,7 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) static void ravb_ring_free(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; int ring_size; int i; @@ -249,7 +251,7 @@ static void ravb_ring_free(struct net_device *ndev, int q) ravb_tx_free(ndev, q, false); ring_size = sizeof(struct ravb_tx_desc) * - (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + (priv->num_tx_ring[q] * num_tx_desc + 1); dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], priv->tx_desc_dma[q]); priv->tx_ring[q] = NULL; @@ -278,12 +280,13 @@ static void ravb_ring_free(struct net_device *ndev, int q) static void ravb_ring_format(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; struct ravb_ex_rx_desc *rx_desc; struct ravb_tx_desc *tx_desc; struct ravb_desc *desc; int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * - NUM_TX_DESC; + num_tx_desc; dma_addr_t dma_addr; int i; @@ -318,8 +321,10 @@ static void ravb_ring_format(struct net_device *ndev, int q) for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; i++, tx_desc++) { tx_desc->die_dt = DT_EEMPTY; - tx_desc++; - tx_desc->die_dt = DT_EEMPTY; + if (num_tx_desc > 1) { + tx_desc++; + tx_desc->die_dt = DT_EEMPTY; + } } tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); tx_desc->die_dt = DT_LINKFIX; /* type */ @@ -339,6 +344,7 @@ static void ravb_ring_format(struct net_device *ndev, int q) static int ravb_ring_init(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; struct sk_buff *skb; int ring_size; int i; @@ -362,11 +368,13 @@ static int ravb_ring_init(struct net_device *ndev, int q) priv->rx_skb[q][i] = skb; } - /* Allocate rings for the aligned buffers */ - priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + - DPTR_ALIGN - 1, GFP_KERNEL); - if (!priv->tx_align[q]) - goto error; + if (num_tx_desc > 1) { + /* Allocate rings for the aligned buffers */ + priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + + DPTR_ALIGN - 1, GFP_KERNEL); + if (!priv->tx_align[q]) + goto error; + } /* Allocate all RX descriptors. */ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); @@ -380,7 +388,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) /* Allocate all TX descriptors. */ ring_size = sizeof(struct ravb_tx_desc) * - (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + (priv->num_tx_ring[q] * num_tx_desc + 1); priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, &priv->tx_desc_dma[q], GFP_KERNEL); @@ -1074,8 +1082,11 @@ static int ravb_phy_init(struct net_device *ndev) netdev_info(ndev, "limited PHY to 100Mbit/s\n"); } - /* 10BASE is not supported */ - phydev->supported &= ~PHY_10BT_FEATURES; + /* 10BASE, Pause and Asym Pause is not supported */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); phy_attached_info(phydev); @@ -1485,6 +1496,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; u16 q = skb_get_queue_mapping(skb); struct ravb_tstamp_skb *ts_skb; struct ravb_tx_desc *desc; @@ -1496,7 +1508,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) spin_lock_irqsave(&priv->lock, flags); if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * - NUM_TX_DESC) { + num_tx_desc) { netif_err(priv, tx_queued, ndev, "still transmitting with the full ring!\n"); netif_stop_subqueue(ndev, q); @@ -1507,41 +1519,55 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (skb_put_padto(skb, ETH_ZLEN)) goto exit; - entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); - priv->tx_skb[q][entry / NUM_TX_DESC] = skb; - - buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + - entry / NUM_TX_DESC * DPTR_ALIGN; - len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; - /* Zero length DMA descriptors are problematic as they seem to - * terminate DMA transfers. Avoid them by simply using a length of - * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN. - * - * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of - * data by the call to skb_put_padto() above this is safe with - * respect to both the length of the first DMA descriptor (len) - * overflowing the available data and the length of the second DMA - * descriptor (skb->len - len) being negative. - */ - if (len == 0) - len = DPTR_ALIGN; + entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc); + priv->tx_skb[q][entry / num_tx_desc] = skb; + + if (num_tx_desc > 1) { + buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + + entry / num_tx_desc * DPTR_ALIGN; + len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; + + /* Zero length DMA descriptors are problematic as they seem + * to terminate DMA transfers. Avoid them by simply using a + * length of DPTR_ALIGN (4) when skb data is aligned to + * DPTR_ALIGN. + * + * As skb is guaranteed to have at least ETH_ZLEN (60) + * bytes of data by the call to skb_put_padto() above this + * is safe with respect to both the length of the first DMA + * descriptor (len) overflowing the available data and the + * length of the second DMA descriptor (skb->len - len) + * being negative. + */ + if (len == 0) + len = DPTR_ALIGN; - memcpy(buffer, skb->data, len); - dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - goto drop; + memcpy(buffer, skb->data, len); + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto drop; - desc = &priv->tx_ring[q][entry]; - desc->ds_tagl = cpu_to_le16(len); - desc->dptr = cpu_to_le32(dma_addr); + desc = &priv->tx_ring[q][entry]; + desc->ds_tagl = cpu_to_le16(len); + desc->dptr = cpu_to_le32(dma_addr); - buffer = skb->data + len; - len = skb->len - len; - dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - goto unmap; + buffer = skb->data + len; + len = skb->len - len; + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto unmap; - desc++; + desc++; + } else { + desc = &priv->tx_ring[q][entry]; + len = skb->len; + dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto drop; + } desc->ds_tagl = cpu_to_le16(len); desc->dptr = cpu_to_le32(dma_addr); @@ -1549,9 +1575,11 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (q == RAVB_NC) { ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); if (!ts_skb) { - desc--; - dma_unmap_single(ndev->dev.parent, dma_addr, len, - DMA_TO_DEVICE); + if (num_tx_desc > 1) { + desc--; + dma_unmap_single(ndev->dev.parent, dma_addr, + len, DMA_TO_DEVICE); + } goto unmap; } ts_skb->skb = skb; @@ -1568,15 +1596,18 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_tx_timestamp(skb); /* Descriptor type must be set after all the above writes */ dma_wmb(); - desc->die_dt = DT_FEND; - desc--; - desc->die_dt = DT_FSTART; - + if (num_tx_desc > 1) { + desc->die_dt = DT_FEND; + desc--; + desc->die_dt = DT_FSTART; + } else { + desc->die_dt = DT_FSINGLE; + } ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); - priv->cur_tx[q] += NUM_TX_DESC; + priv->cur_tx[q] += num_tx_desc; if (priv->cur_tx[q] - priv->dirty_tx[q] > - (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && + (priv->num_tx_ring[q] - 1) * num_tx_desc && !ravb_tx_free(ndev, q, true)) netif_stop_subqueue(ndev, q); @@ -1590,7 +1621,7 @@ unmap: le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); drop: dev_kfree_skb_any(skb); - priv->tx_skb[q][entry / NUM_TX_DESC] = NULL; + priv->tx_skb[q][entry / num_tx_desc] = NULL; goto exit; } @@ -2076,6 +2107,9 @@ static int ravb_probe(struct platform_device *pdev) ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU; + priv->num_tx_desc = chip_id == RCAR_GEN2 ? + NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3; + /* Set function */ ndev->netdev_ops = &ravb_netdev_ops; ndev->ethtool_ops = &ravb_ethtool_ops; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index a9da1ad4b4f2..690aee88f0eb 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -298,8 +298,8 @@ static int sxgbe_init_phy(struct net_device *ndev) /* Stop Advertising 1000BASE Capability if interface is not GMII */ if ((phy_iface == PHY_INTERFACE_MODE_MII) || (phy_iface == PHY_INTERFACE_MODE_RMII)) - phydev->advertising &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); + phy_set_max_speed(phydev, SPEED_1000); + if (phydev->phy_id == 0) { phy_disconnect(phydev); return -ENODEV; diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index b1b53f6c452f..8355dfbb8ec3 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -513,7 +513,8 @@ static void smc911x_hardware_send_pkt(struct net_device *dev) * now, or set the card to generates an interrupt when ready * for the packet. */ -static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); unsigned int free; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index b944828f9ea3..8d6cff8bd162 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -638,7 +638,8 @@ done: if (!THROTTLE_TX_PKTS) * now, or set the card to generates an interrupt when ready * for the packet. */ -static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index f0afb88d7bc2..99a5a8a7c777 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -1048,10 +1048,10 @@ static int smsc911x_mii_probe(struct net_device *dev) phy_attached_info(phydev); + phy_set_max_speed(phydev, SPEED_100); + /* mask with MAC supported features */ - phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); pdata->last_duplex = -1; pdata->last_carrier = -1; @@ -1786,7 +1786,8 @@ static int smsc911x_stop(struct net_device *dev) } /* Entry point for transmitting a packet */ -static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); unsigned int freespace; diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 2fa3c1d03abc..9b6366b20110 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -1135,10 +1135,10 @@ static int smsc9420_mii_probe(struct net_device *dev) return PTR_ERR(phydev); } + phy_set_max_speed(phydev, SPEED_100); + /* mask with MAC supported features */ - phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); phy_attached_info(phydev); diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index f7ecceeb1e28..6732f5cbde08 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -461,16 +461,7 @@ static int ave_ethtool_set_pauseparam(struct net_device *ndev, priv->pause_rx = pause->rx_pause; priv->pause_tx = pause->tx_pause; - phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - if (pause->rx_pause) - phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - if (pause->tx_pause) - phydev->advertising ^= ADVERTISED_Asym_Pause; - - if (pause->autoneg) { - if (netif_running(ndev)) - phy_start_aneg(phydev); - } + phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause); return 0; } @@ -904,11 +895,11 @@ static void ave_rxfifo_reset(struct net_device *ndev) /* assert reset */ writel(AVE_GRR_RXFFR, priv->base + AVE_GRR); - usleep_range(40, 50); + udelay(50); /* negate reset */ writel(0, priv->base + AVE_GRR); - usleep_range(10, 20); + udelay(20); /* negate interrupt status */ writel(AVE_GI_RXOVF, priv->base + AVE_GISR); @@ -1125,11 +1116,8 @@ static void ave_phy_adjust_link(struct net_device *ndev) rmt_adv |= LPA_PAUSE_CAP; if (phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - if (phydev->advertising & ADVERTISED_Pause) - lcl_adv |= ADVERTISE_PAUSE_CAP; - if (phydev->advertising & ADVERTISED_Asym_Pause) - lcl_adv |= ADVERTISE_PAUSE_ASYM; + lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising); cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (cap & FLOW_CTRL_TX) txcr |= AVE_TXCR_FLOCTR; @@ -1223,11 +1211,10 @@ static int ave_init(struct net_device *ndev) phy_ethtool_get_wol(phydev, &wol); device_set_wakeup_capable(&ndev->dev, !!wol.supported); - if (!phy_interface_is_rgmii(phydev)) { - phydev->supported &= ~PHY_GBIT_FEATURES; - phydev->supported |= PHY_BASIC_FEATURES; - } - phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + if (!phy_interface_is_rgmii(phydev)) + phy_set_max_speed(phydev, SPEED_100); + + phy_support_asym_pause(phydev); phy_attached_info(phydev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 0a80fa25afe3..d6bb953685fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -119,11 +119,23 @@ #define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x))) #define XGMAC_TQS GENMASK(25, 16) #define XGMAC_TQS_SHIFT 16 +#define XGMAC_Q2TCMAP GENMASK(10, 8) +#define XGMAC_Q2TCMAP_SHIFT 8 #define XGMAC_TTC GENMASK(6, 4) #define XGMAC_TTC_SHIFT 4 #define XGMAC_TXQEN GENMASK(3, 2) #define XGMAC_TXQEN_SHIFT 2 #define XGMAC_TSF BIT(1) +#define XGMAC_MTL_TCx_ETS_CONTROL(x) (0x00001110 + (0x80 * (x))) +#define XGMAC_MTL_TCx_QUANTUM_WEIGHT(x) (0x00001118 + (0x80 * (x))) +#define XGMAC_MTL_TCx_SENDSLOPE(x) (0x0000111c + (0x80 * (x))) +#define XGMAC_MTL_TCx_HICREDIT(x) (0x00001120 + (0x80 * (x))) +#define XGMAC_MTL_TCx_LOCREDIT(x) (0x00001124 + (0x80 * (x))) +#define XGMAC_CC BIT(3) +#define XGMAC_TSA GENMASK(1, 0) +#define XGMAC_SP (0x0 << 0) +#define XGMAC_CBS (0x1 << 0) +#define XGMAC_ETS (0x2 << 0) #define XGMAC_MTL_RXQ_OPMODE(x) (0x00001140 + (0x80 * (x))) #define XGMAC_RQS GENMASK(25, 16) #define XGMAC_RQS_SHIFT 16 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index d182f82f7b58..64b8cb88ea45 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -177,6 +177,23 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue, writel(value, ioaddr + reg); } +static void dwxgmac2_config_cbs(struct mac_device_info *hw, + u32 send_slope, u32 idle_slope, + u32 high_credit, u32 low_credit, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue)); + writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue)); + writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue)); + writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue)); + + value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); + value |= XGMAC_CC | XGMAC_CBS; + writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); +} + static int dwxgmac2_host_irq_status(struct mac_device_info *hw, struct stmmac_extra_stats *x) { @@ -316,7 +333,7 @@ const struct stmmac_ops dwxgmac210_ops = { .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms, .set_mtl_tx_queue_weight = NULL, .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma, - .config_cbs = NULL, + .config_cbs = dwxgmac2_config_cbs, .dump_regs = NULL, .host_irq_status = dwxgmac2_host_irq_status, .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 20909036e002..6c5092e7771c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -182,6 +182,9 @@ static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode, value |= 0x7 << XGMAC_TTC_SHIFT; } + /* Use static TC to Queue mapping */ + value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP; + value &= ~XGMAC_TXQEN; if (qmode != MTL_QUEUE_AVB) value |= 0x2 << XGMAC_TXQEN_SHIFT; @@ -374,6 +377,21 @@ static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan) writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); } +static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) +{ + u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); + + value &= ~XGMAC_TXQEN; + if (qmode != MTL_QUEUE_AVB) { + value |= 0x2 << XGMAC_TXQEN_SHIFT; + writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel)); + } else { + value |= 0x1 << XGMAC_TXQEN_SHIFT; + } + + writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); +} + static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) { u32 value; @@ -407,5 +425,6 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = { .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr, .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr, .enable_tso = dwxgmac2_enable_tso, + .qmode = dwxgmac2_qmode, .set_bfsize = dwxgmac2_set_bfsize, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 357309a6d6a5..81b966a8261b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -133,7 +133,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac4_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, - .tc = NULL, + .tc = &dwmac510_tc_ops, .setup = dwmac4_setup, .quirks = stmmac_dwmac4_quirks, }, { @@ -150,7 +150,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac410_ops, .hwtimestamp = &stmmac_ptp, .mode = &dwmac4_ring_mode_ops, - .tc = NULL, + .tc = &dwmac510_tc_ops, .setup = dwmac4_setup, .quirks = NULL, }, { @@ -167,7 +167,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac410_ops, .hwtimestamp = &stmmac_ptp, .mode = &dwmac4_ring_mode_ops, - .tc = NULL, + .tc = &dwmac510_tc_ops, .setup = dwmac4_setup, .quirks = NULL, }, { @@ -201,7 +201,7 @@ static const struct stmmac_hwif_entry { .mac = &dwxgmac210_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, - .tc = NULL, + .tc = &dwmac510_tc_ops, .setup = dwxgmac2_setup, .quirks = NULL, }, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 75896d6ba6e2..076a8be18d67 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -991,17 +991,20 @@ static int stmmac_init_phy(struct net_device *dev) if ((interface == PHY_INTERFACE_MODE_MII) || (interface == PHY_INTERFACE_MODE_RMII) || (max_speed < 1000 && max_speed > 0)) - phydev->advertising &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); + phy_set_max_speed(phydev, SPEED_100); /* * Half-duplex mode not supported with multiqueue * half-duplex can only works with single queue */ - if (tx_cnt > 1) - phydev->supported &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_100baseT_Half | - SUPPORTED_10baseT_Half); + if (tx_cnt > 1) { + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + } /* * Broken HW is sometimes missing the pull-up resistor on the diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index d42f47f6c632..644e42c181ee 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -113,7 +113,7 @@ static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb, } /* Wrappers to common functions */ -static int vsw_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t vsw_start_xmit(struct sk_buff *skb, struct net_device *dev) { return sunvnet_start_xmit_common(skb, dev, vsw_tx_port_find); } diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index f047b2797156..720b7ac77f3b 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -950,7 +950,8 @@ static void bigmac_tx_timeout(struct net_device *dev) } /* Put a packet on the wire. */ -static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); int len, entry; diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 06da2f59fcbf..863fd602fd33 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2999,7 +2999,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, /* Now make sure pci_dev cookie is there. */ #ifdef CONFIG_SPARC dp = pci_device_to_OF_node(pdev); - strcpy(prom_name, dp->name); + snprintf(prom_name, sizeof(prom_name), "%pOFn", dp); #else if (is_quattro_p(pdev)) strcpy(prom_name, "SUNW,qfe"); diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index 7fe0d5e33922..1468fa0a54e9 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -570,7 +570,7 @@ out: } /* Get a packet queued to go onto the wire. */ -static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t qe_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); struct sunqe_buffers *qbufs = qep->buffers; diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 12539b357a78..590172818b92 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -247,7 +247,7 @@ static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb, } /* Wrappers to common functions */ -static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) { return sunvnet_start_xmit_common(skb, dev, vnet_tx_port_find); } diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index d8f4c3f28150..baa3088b475c 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -1216,9 +1216,10 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) return skb; } -static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, - struct vnet_port *(*vnet_tx_port) - (struct sk_buff *, struct net_device *)) +static netdev_tx_t +vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, + struct vnet_port *(*vnet_tx_port) + (struct sk_buff *, struct net_device *)) { struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; @@ -1321,9 +1322,10 @@ out_dropped: return NETDEV_TX_OK; } -int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, - struct vnet_port *(*vnet_tx_port) - (struct sk_buff *, struct net_device *)) +netdev_tx_t +sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, + struct vnet_port *(*vnet_tx_port) + (struct sk_buff *, struct net_device *)) { struct vnet_port *port = NULL; struct vio_dring_state *dr; diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h index 1ea0b016580a..2b808d2482d6 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.h +++ b/drivers/net/ethernet/sun/sunvnet_common.h @@ -136,9 +136,10 @@ int sunvnet_close_common(struct net_device *dev); void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp); int sunvnet_set_mac_addr_common(struct net_device *dev, void *p); void sunvnet_tx_timeout_common(struct net_device *dev); -int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, - struct vnet_port *(*vnet_tx_port) - (struct sk_buff *, struct net_device *)); +netdev_tx_t +sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, + struct vnet_port *(*vnet_tx_port) + (struct sk_buff *, struct net_device *)); #ifdef CONFIG_NET_POLL_CONTROLLER void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp); #endif diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 832bce07c385..16dcbf36f8cc 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -484,13 +484,13 @@ enum { }; #define CPSW_STAT(m) CPSW_STATS, \ - sizeof(((struct cpsw_hw_stats *)0)->m), \ + FIELD_SIZEOF(struct cpsw_hw_stats, m), \ offsetof(struct cpsw_hw_stats, m) #define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \ - sizeof(((struct cpdma_chan_stats *)0)->m), \ + FIELD_SIZEOF(struct cpdma_chan_stats, m), \ offsetof(struct cpdma_chan_stats, m) #define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \ - sizeof(((struct cpdma_chan_stats *)0)->m), \ + FIELD_SIZEOF(struct cpdma_chan_stats, m), \ offsetof(struct cpdma_chan_stats, m) static const struct cpsw_stats cpsw_gstrings_stats[] = { diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index a1d335a3c5e4..1f612268c998 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -225,17 +225,6 @@ static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap) return 0; } -static const char *netcp_node_name(struct device_node *node) -{ - const char *name; - - if (of_property_read_string(node, "label", &name) < 0) - name = node->name; - if (!name) - name = "unknown"; - return name; -} - /* Module management routines */ static int netcp_register_interface(struct netcp_intf *netcp) { @@ -267,8 +256,13 @@ static int netcp_module_probe(struct netcp_device *netcp_device, } for_each_available_child_of_node(devices, child) { - const char *name = netcp_node_name(child); + const char *name; + char node_name[32]; + if (of_property_read_string(node, "label", &name) < 0) { + snprintf(node_name, sizeof(node_name), "%pOFn", child); + name = node_name; + } if (!strcasecmp(module->name, name)) break; } @@ -2209,8 +2203,8 @@ static int netcp_probe(struct platform_device *pdev) for_each_available_child_of_node(interfaces, child) { ret = netcp_create_interface(netcp_device, child); if (ret) { - dev_err(dev, "could not create interface(%s)\n", - child->name); + dev_err(dev, "could not create interface(%pOFn)\n", + child); goto probe_quit_interface; } } diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 72b98e27c992..0397ccb6597e 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3137,15 +3137,15 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, for_each_child_of_node(node, port) { slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL); if (!slave) { - dev_err(dev, "memory alloc failed for secondary port(%s), skipping...\n", - port->name); + dev_err(dev, "memory alloc failed for secondary port(%pOFn), skipping...\n", + port); continue; } if (init_slave(gbe_dev, slave, port)) { dev_err(dev, - "Failed to initialize secondary port(%s), skipping...\n", - port->name); + "Failed to initialize secondary port(%pOFn), skipping...\n", + port); devm_kfree(dev, slave); continue; } @@ -3239,8 +3239,8 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't xlate xgbe of node(%s) ss address at %d\n", - node->name, XGBE_SS_REG_INDEX); + "Can't xlate xgbe of node(%pOFn) ss address at %d\n", + node, XGBE_SS_REG_INDEX); return ret; } @@ -3254,8 +3254,8 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't xlate xgbe of node(%s) sm address at %d\n", - node->name, XGBE_SM_REG_INDEX); + "Can't xlate xgbe of node(%pOFn) sm address at %d\n", + node, XGBE_SM_REG_INDEX); return ret; } @@ -3269,8 +3269,8 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't xlate xgbe serdes of node(%s) address at %d\n", - node->name, XGBE_SERDES_REG_INDEX); + "Can't xlate xgbe serdes of node(%pOFn) address at %d\n", + node, XGBE_SERDES_REG_INDEX); return ret; } @@ -3347,8 +3347,8 @@ static int get_gbe_resource_version(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't translate of node(%s) of gbe ss address at %d\n", - node->name, GBE_SS_REG_INDEX); + "Can't translate of node(%pOFn) of gbe ss address at %d\n", + node, GBE_SS_REG_INDEX); return ret; } @@ -3372,8 +3372,8 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't translate of gbe node(%s) address at index %d\n", - node->name, GBE_SGMII34_REG_INDEX); + "Can't translate of gbe node(%pOFn) address at index %d\n", + node, GBE_SGMII34_REG_INDEX); return ret; } @@ -3388,8 +3388,8 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't translate of gbe node(%s) address at index %d\n", - node->name, GBE_SM_REG_INDEX); + "Can't translate of gbe node(%pOFn) address at index %d\n", + node, GBE_SM_REG_INDEX); return ret; } @@ -3498,8 +3498,8 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't translate of gbenu node(%s) addr at index %d\n", - node->name, GBENU_SM_REG_INDEX); + "Can't translate of gbenu node(%pOFn) addr at index %d\n", + node, GBENU_SM_REG_INDEX); return ret; } @@ -3642,7 +3642,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs, gbe_dev->ss_regs); } else { - dev_err(dev, "unknown GBE node(%s)\n", node->name); + dev_err(dev, "unknown GBE node(%pOFn)\n", node); ret = -ENODEV; } @@ -3667,8 +3667,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, for_each_child_of_node(interfaces, interface) { ret = of_property_read_u32(interface, "slave-port", &slave_num); if (ret) { - dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n", - interface->name); + dev_err(dev, "missing slave-port parameter, skipping interface configuration for %pOFn\n", + interface); continue; } gbe_dev->num_slaves++; diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 88d74aef218a..75237c81c63d 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -845,9 +845,9 @@ static int gelic_card_kick_txdma(struct gelic_card *card, * @skb: packet to send out * @netdev: interface device structure * - * returns 0 on success, <0 on failure + * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure */ -int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) +netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct gelic_card *card = netdev_card(netdev); struct gelic_descr *descr; diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index 003d0452d9cb..fbbf9b54b173 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h @@ -370,7 +370,7 @@ void gelic_card_up(struct gelic_card *card); void gelic_card_down(struct gelic_card *card); int gelic_net_open(struct net_device *netdev); int gelic_net_stop(struct net_device *netdev); -int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev); +netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev); void gelic_net_set_multi(struct net_device *netdev); void gelic_net_tx_timeout(struct net_device *netdev); int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card); diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 302079e22b06..00ab417694ad 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c @@ -1094,7 +1094,7 @@ static int gelic_wl_get_encode(struct net_device *netdev, struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); struct iw_point *enc = &data->encoding; unsigned long irqflag; - unsigned int key_index, index_specified; + unsigned int key_index; int ret = 0; pr_debug("%s: <-\n", __func__); @@ -1105,13 +1105,10 @@ static int gelic_wl_get_encode(struct net_device *netdev, return -EINVAL; spin_lock_irqsave(&wl->lock, irqflag); - if (key_index) { - index_specified = 1; + if (key_index) key_index--; - } else { - index_specified = 0; + else key_index = wl->current_key; - } if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) { switch (wl->auth_method) { diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index d925b8203996..23417266b7ec 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -880,9 +880,9 @@ out: * @skb: packet to send out * @netdev: interface device structure * - * returns 0 on success, !0 on failure + * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure */ -static int +static netdev_tx_t spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) { int cnt; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index cce9c9ed46aa..6a71c2c0f17d 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -474,7 +474,8 @@ static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_ /* Index to functions, as function prototypes. */ static int tc35815_open(struct net_device *dev); -static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t tc35815_send_packet(struct sk_buff *skb, + struct net_device *dev); static irqreturn_t tc35815_interrupt(int irq, void *dev_id); static int tc35815_rx(struct net_device *dev, int limit); static int tc35815_poll(struct napi_struct *napi, int budget); @@ -628,7 +629,7 @@ static int tc_mii_probe(struct net_device *dev) phy_attached_info(phydev); /* mask with MAC supported features */ - phydev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phydev, SPEED_100); dropmask = 0; if (options.speed == 10) dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; @@ -1248,7 +1249,8 @@ tc35815_open(struct net_device *dev) * invariant will hold if you make sure that the netif_*_queue() * calls are done at the proper times. */ -static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); struct TxFD *txfd; diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 60abc9250f56..2241f9897092 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -674,7 +674,8 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) return 0; } -static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t +temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct cdmac_bd *cur_p; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index f24f48f33802..12a14609ec47 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -653,7 +653,8 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp, * start the transmission. Additionally if checksum offloading is supported, * it populates AXI Stream Control fields with appropriate values. */ -static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t +axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { u32 ii; u32 num_frag; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 42f1f518dad6..639e3e99af46 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -941,8 +941,7 @@ static int xemaclite_open(struct net_device *dev) } /* EmacLite doesn't support giga-bit speeds */ - lp->phy_dev->supported &= (PHY_BASIC_FEATURES); - lp->phy_dev->advertising = lp->phy_dev->supported; + phy_set_max_speed(lp->phy_dev, SPEED_100); /* Don't advertise 1000BASE-T Full/Half duplex speeds */ phy_write(lp->phy_dev, MII_CTRL1000, 0); @@ -1020,9 +1019,10 @@ static int xemaclite_close(struct net_device *dev) * deferred and the Tx queue is stopped so that the deferred socket buffer can * be transmitted when the Emaclite device is free to transmit data. * - * Return: 0, always. + * Return: NETDEV_TX_OK, always. */ -static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) +static netdev_tx_t +xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) { struct net_local *lp = netdev_priv(dev); struct sk_buff *new_skb; @@ -1044,7 +1044,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) /* Take the time stamp now, since we can't do this in an ISR. */ skb_tx_timestamp(new_skb); spin_unlock_irqrestore(&lp->reset_lock, flags); - return 0; + return NETDEV_TX_OK; } spin_unlock_irqrestore(&lp->reset_lock, flags); @@ -1053,7 +1053,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) dev->stats.tx_bytes += len; dev_consume_skb_any(new_skb); - return 0; + return NETDEV_TX_OK; } /** diff --git a/drivers/net/fddi/skfp/ecm.c b/drivers/net/fddi/skfp/ecm.c index eee9ba91346a..c813e462183d 100644 --- a/drivers/net/fddi/skfp/ecm.c +++ b/drivers/net/fddi/skfp/ecm.c @@ -30,7 +30,6 @@ * * The following external HW dependent functions are referenced : * sm_pm_bypass_req() - * sm_pm_ls_latch() * sm_pm_get_ls() * * The following HW dependent events are required : @@ -356,8 +355,6 @@ static void ecm_fsm(struct s_smc *smc, int cmd) */ start_ecm_timer(smc,smc->s.ecm_check_poll,0) ; smc->e.ecm_line_state = TRUE ; /* flag to pcm: report Q/HLS */ - (void) sm_pm_ls_latch(smc,PA,1) ; /* enable line state latch */ - (void) sm_pm_ls_latch(smc,PB,1) ; /* enable line state latch */ ACTIONS_DONE() ; break ; case EC6_CHECK : diff --git a/drivers/net/fddi/skfp/h/cmtdef.h b/drivers/net/fddi/skfp/h/cmtdef.h index 5d6891154367..448d66c2e372 100644 --- a/drivers/net/fddi/skfp/h/cmtdef.h +++ b/drivers/net/fddi/skfp/h/cmtdef.h @@ -513,7 +513,6 @@ void pcm_status_state(struct s_smc *smc, int np, int *type, int *state, void plc_config_mux(struct s_smc *smc, int mux); void sm_lem_evaluate(struct s_smc *smc); void mac_update_counter(struct s_smc *smc); -void sm_pm_ls_latch(struct s_smc *smc, int phy, int on_off); void sm_ma_control(struct s_smc *smc, int mode); void sm_mac_check_beacon_claim(struct s_smc *smc); void config_mux(struct s_smc *smc, int mux); @@ -656,14 +655,6 @@ void dump_hex(char *p, int len); #ifndef PNMI_INIT #define PNMI_INIT(smc) /* Nothing */ #endif -#ifndef PNMI_GET_ID -#define PNMI_GET_ID( smc, ndis_oid, buf, len, BytesWritten, BytesNeeded ) \ - ( 1 ? (-1) : (-1) ) -#endif -#ifndef PNMI_SET_ID -#define PNMI_SET_ID( smc, ndis_oid, buf, len, BytesRead, BytesNeeded, \ - set_type) ( 1 ? (-1) : (-1) ) -#endif /* * SMT_PANIC defines diff --git a/drivers/net/fddi/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c index a9ecf923f63d..6ef44c480bd5 100644 --- a/drivers/net/fddi/skfp/pcmplc.c +++ b/drivers/net/fddi/skfp/pcmplc.c @@ -30,7 +30,6 @@ * The following external HW dependent functions are referenced : * sm_pm_control() * sm_ph_linestate() - * sm_pm_ls_latch() * * The following HW dependent events are required : * PC_QLS @@ -1248,16 +1247,6 @@ static void sm_ph_lem_stop(struct s_smc *smc, int np) CLEAR(PLC(np,PL_INTR_MASK),PL_LE_CTR) ; } -/* ARGSUSED */ -void sm_pm_ls_latch(struct s_smc *smc, int phy, int on_off) -/* int on_off; en- or disable ident. ls */ -{ - SK_UNUSED(smc) ; - - phy = phy ; on_off = on_off ; -} - - /* * PCM pseudo code * receive actions are called AFTER the bit n is received, diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 6acb6b5718b9..82eccc930c5c 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -69,6 +69,7 @@ struct geneve_dev { struct gro_cells gro_cells; bool collect_md; bool use_udp6_rx_checksums; + bool ttl_inherit; }; struct geneve_sock { @@ -843,7 +844,11 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, ttl = key->ttl; } else { tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb); - ttl = key->ttl ? : ip4_dst_hoplimit(&rt->dst); + if (geneve->ttl_inherit) + ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); + else + ttl = key->ttl; + ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); } df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; @@ -889,7 +894,11 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, } else { prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel), ip_hdr(skb), skb); - ttl = key->ttl ? : ip6_dst_hoplimit(dst); + if (geneve->ttl_inherit) + ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); + else + ttl = key->ttl; + ttl = ttl ? : ip6_dst_hoplimit(dst); } err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr)); if (unlikely(err)) @@ -1091,6 +1100,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { [IFLA_GENEVE_UDP_CSUM] = { .type = NLA_U8 }, [IFLA_GENEVE_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, [IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, + [IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 }, }; static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], @@ -1170,7 +1180,8 @@ static bool geneve_dst_addr_equal(struct ip_tunnel_info *a, static int geneve_configure(struct net *net, struct net_device *dev, struct netlink_ext_ack *extack, const struct ip_tunnel_info *info, - bool metadata, bool ipv6_rx_csum) + bool metadata, bool ipv6_rx_csum, + bool ttl_inherit) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *t, *geneve = netdev_priv(dev); @@ -1219,6 +1230,7 @@ static int geneve_configure(struct net *net, struct net_device *dev, geneve->info = *info; geneve->collect_md = metadata; geneve->use_udp6_rx_checksums = ipv6_rx_csum; + geneve->ttl_inherit = ttl_inherit; err = register_netdevice(dev); if (err) @@ -1237,7 +1249,8 @@ static void init_tnl_info(struct ip_tunnel_info *info, __u16 dst_port) static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack, struct ip_tunnel_info *info, bool *metadata, - bool *use_udp6_rx_checksums, bool changelink) + bool *use_udp6_rx_checksums, bool *ttl_inherit, + bool changelink) { int attrtype; @@ -1312,8 +1325,15 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], info->key.tun_id = tunid; } - if (data[IFLA_GENEVE_TTL]) + if (data[IFLA_GENEVE_TTL_INHERIT]) { + if (nla_get_u8(data[IFLA_GENEVE_TTL_INHERIT])) + *ttl_inherit = true; + else + *ttl_inherit = false; + } else if (data[IFLA_GENEVE_TTL]) { info->key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); + *ttl_inherit = false; + } if (data[IFLA_GENEVE_TOS]) info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); @@ -1438,17 +1458,18 @@ static int geneve_newlink(struct net *net, struct net_device *dev, { bool use_udp6_rx_checksums = false; struct ip_tunnel_info info; + bool ttl_inherit = false; bool metadata = false; int err; init_tnl_info(&info, GENEVE_UDP_PORT); err = geneve_nl2info(tb, data, extack, &info, &metadata, - &use_udp6_rx_checksums, false); + &use_udp6_rx_checksums, &ttl_inherit, false); if (err) return err; err = geneve_configure(net, dev, extack, &info, metadata, - use_udp6_rx_checksums); + use_udp6_rx_checksums, ttl_inherit); if (err) return err; @@ -1511,6 +1532,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_info info; bool metadata; bool use_udp6_rx_checksums; + bool ttl_inherit; int err; /* If the geneve device is configured for metadata (or externally @@ -1523,8 +1545,9 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], memcpy(&info, &geneve->info, sizeof(info)); metadata = geneve->collect_md; use_udp6_rx_checksums = geneve->use_udp6_rx_checksums; + ttl_inherit = geneve->ttl_inherit; err = geneve_nl2info(tb, data, extack, &info, &metadata, - &use_udp6_rx_checksums, true); + &use_udp6_rx_checksums, &ttl_inherit, true); if (err) return err; @@ -1537,6 +1560,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], geneve->info = info; geneve->collect_md = metadata; geneve->use_udp6_rx_checksums = use_udp6_rx_checksums; + geneve->ttl_inherit = ttl_inherit; geneve_unquiesce(geneve, gs4, gs6); return 0; @@ -1562,6 +1586,7 @@ static size_t geneve_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_CSUM */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */ + nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL_INHERIT */ 0; } @@ -1569,6 +1594,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); struct ip_tunnel_info *info = &geneve->info; + bool ttl_inherit = geneve->ttl_inherit; bool metadata = geneve->collect_md; __u8 tmp_vni[3]; __u32 vni; @@ -1614,6 +1640,9 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) goto nla_put_failure; #endif + if (nla_put_u8(skb, IFLA_GENEVE_TTL_INHERIT, ttl_inherit)) + goto nla_put_failure; + return 0; nla_put_failure: @@ -1650,7 +1679,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, return dev; init_tnl_info(&info, dst_port); - err = geneve_configure(net, dev, NULL, &info, true, true); + err = geneve_configure(net, dev, NULL, &info, true, true, false); if (err) { free_netdev(dev); return ERR_PTR(err); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index a32ded5b4f41..ef6f766f6389 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -185,7 +185,9 @@ struct rndis_device { /* Interface */ struct rndis_message; +struct ndis_offload_params; struct netvsc_device; +struct netvsc_channel; struct net_device_context; extern u32 netvsc_ring_bytes; @@ -203,10 +205,7 @@ void netvsc_linkstatus_callback(struct net_device *net, struct rndis_message *resp); int netvsc_recv_callback(struct net_device *net, struct netvsc_device *nvdev, - struct vmbus_channel *channel, - void *data, u32 len, - const struct ndis_tcp_ip_checksum_info *csum_info, - const struct ndis_pkt_8021q_info *vlan); + struct netvsc_channel *nvchan); void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); @@ -220,9 +219,12 @@ void rndis_filter_device_remove(struct hv_device *dev, struct netvsc_device *nvdev); int rndis_filter_set_rss_param(struct rndis_device *rdev, const u8 *key); +int rndis_filter_set_offload_params(struct net_device *ndev, + struct netvsc_device *nvdev, + struct ndis_offload_params *req_offloads); int rndis_filter_receive(struct net_device *ndev, struct netvsc_device *net_dev, - struct vmbus_channel *channel, + struct netvsc_channel *nvchan, void *data, u32 buflen); int rndis_filter_set_device_mac(struct netvsc_device *ndev, @@ -524,6 +526,8 @@ struct nvsp_2_vsc_capability { u64 ieee8021q:1; u64 correlation_id:1; u64 teaming:1; + u64 vsubnetid:1; + u64 rsc:1; }; }; } __packed; @@ -826,7 +830,7 @@ struct nvsp_message { #define NETVSC_SUPPORTED_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | \ NETIF_F_TSO | NETIF_F_IPV6_CSUM | \ - NETIF_F_TSO6) + NETIF_F_TSO6 | NETIF_F_LRO) #define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */ #define VRSS_CHANNEL_MAX 64 @@ -852,6 +856,18 @@ struct multi_recv_comp { u32 next; /* next entry for writing */ }; +#define NVSP_RSC_MAX 562 /* Max #RSC frags in a vmbus xfer page pkt */ + +struct nvsc_rsc { + const struct ndis_pkt_8021q_info *vlan; + const struct ndis_tcp_ip_checksum_info *csum_info; + u8 is_last; /* last RNDIS msg in a vmtransfer_page */ + u32 cnt; /* #fragments in an RSC packet */ + u32 pktlen; /* Full packet length */ + void *data[NVSP_RSC_MAX]; + u32 len[NVSP_RSC_MAX]; +}; + struct netvsc_stats { u64 packets; u64 bytes; @@ -955,6 +971,7 @@ struct netvsc_channel { struct multi_send_data msd; struct multi_recv_comp mrc; atomic_t queue_sends; + struct nvsc_rsc rsc; struct netvsc_stats tx_stats; struct netvsc_stats rx_stats; @@ -1136,7 +1153,8 @@ struct rndis_oobd { /* Packet extension field contents associated with a Data message. */ struct rndis_per_packet_info { u32 size; - u32 type; + u32 type:31; + u32 internal:1; u32 ppi_offset; }; @@ -1157,6 +1175,25 @@ enum ndis_per_pkt_info_type { MAX_PER_PKT_INFO }; +enum rndis_per_pkt_info_interal_type { + RNDIS_PKTINFO_ID = 1, + /* Add more memebers here */ + + RNDIS_PKTINFO_MAX +}; + +#define RNDIS_PKTINFO_SUBALLOC BIT(0) +#define RNDIS_PKTINFO_1ST_FRAG BIT(1) +#define RNDIS_PKTINFO_LAST_FRAG BIT(2) + +#define RNDIS_PKTINFO_ID_V1 1 + +struct rndis_pktinfo_id { + u8 ver; + u8 flag; + u16 pkt_id; +}; + struct ndis_pkt_8021q_info { union { struct { diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index fe01e141c8f8..922054c1d544 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -542,6 +542,9 @@ static int negotiate_nvsp_ver(struct hv_device *device, init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1; } + if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61) + init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1; + trace_nvsp_send(ndev, init_packet); ret = vmbus_sendpacket(device->channel, init_packet, @@ -1111,11 +1114,12 @@ static void enq_receive_complete(struct net_device *ndev, static int netvsc_receive(struct net_device *ndev, struct netvsc_device *net_device, - struct vmbus_channel *channel, + struct netvsc_channel *nvchan, const struct vmpacket_descriptor *desc, const struct nvsp_message *nvsp) { struct net_device_context *net_device_ctx = netdev_priv(ndev); + struct vmbus_channel *channel = nvchan->channel; const struct vmtransfer_page_packet_header *vmxferpage_packet = container_of(desc, const struct vmtransfer_page_packet_header, d); u16 q_idx = channel->offermsg.offer.sub_channel_index; @@ -1150,6 +1154,7 @@ static int netvsc_receive(struct net_device *ndev, int ret; if (unlikely(offset + buflen > net_device->recv_buf_size)) { + nvchan->rsc.cnt = 0; status = NVSP_STAT_FAIL; netif_err(net_device_ctx, rx_err, ndev, "Packet offset:%u + len:%u too big\n", @@ -1160,11 +1165,13 @@ static int netvsc_receive(struct net_device *ndev, data = recv_buf + offset; + nvchan->rsc.is_last = (i == count - 1); + trace_rndis_recv(ndev, q_idx, data); /* Pass it to the upper layer */ ret = rndis_filter_receive(ndev, net_device, - channel, data, buflen); + nvchan, data, buflen); if (unlikely(ret != NVSP_STAT_SUCCESS)) status = NVSP_STAT_FAIL; @@ -1223,12 +1230,13 @@ static void netvsc_receive_inband(struct net_device *ndev, } static int netvsc_process_raw_pkt(struct hv_device *device, - struct vmbus_channel *channel, + struct netvsc_channel *nvchan, struct netvsc_device *net_device, struct net_device *ndev, const struct vmpacket_descriptor *desc, int budget) { + struct vmbus_channel *channel = nvchan->channel; const struct nvsp_message *nvmsg = hv_pkt_data(desc); trace_nvsp_recv(ndev, channel, nvmsg); @@ -1240,7 +1248,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device, break; case VM_PKT_DATA_USING_XFER_PAGES: - return netvsc_receive(ndev, net_device, channel, + return netvsc_receive(ndev, net_device, nvchan, desc, nvmsg); break; @@ -1284,7 +1292,7 @@ int netvsc_poll(struct napi_struct *napi, int budget) nvchan->desc = hv_pkt_iter_first(channel); while (nvchan->desc && work_done < budget) { - work_done += netvsc_process_raw_pkt(device, channel, net_device, + work_done += netvsc_process_raw_pkt(device, nvchan, net_device, ndev, nvchan->desc, budget); nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 3af6d8d15233..9bcaf204a7d4 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -226,6 +226,7 @@ static inline void *init_ppi_data(struct rndis_message *msg, ppi->size = ppi_size; ppi->type = pkt_type; + ppi->internal = 0; ppi->ppi_offset = sizeof(struct rndis_per_packet_info); rndis_pkt->per_pkt_info_len += ppi_size; @@ -744,14 +745,16 @@ void netvsc_linkstatus_callback(struct net_device *net, } static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, - struct napi_struct *napi, - const struct ndis_tcp_ip_checksum_info *csum_info, - const struct ndis_pkt_8021q_info *vlan, - void *data, u32 buflen) + struct netvsc_channel *nvchan) { + struct napi_struct *napi = &nvchan->napi; + const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan; + const struct ndis_tcp_ip_checksum_info *csum_info = + nvchan->rsc.csum_info; struct sk_buff *skb; + int i; - skb = napi_alloc_skb(napi, buflen); + skb = napi_alloc_skb(napi, nvchan->rsc.pktlen); if (!skb) return skb; @@ -759,7 +762,8 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, * Copy to skb. This copy is needed here since the memory pointed by * hv_netvsc_packet cannot be deallocated */ - skb_put_data(skb, data, buflen); + for (i = 0; i < nvchan->rsc.cnt; i++) + skb_put_data(skb, nvchan->rsc.data[i], nvchan->rsc.len[i]); skb->protocol = eth_type_trans(skb, net); @@ -792,14 +796,11 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, */ int netvsc_recv_callback(struct net_device *net, struct netvsc_device *net_device, - struct vmbus_channel *channel, - void *data, u32 len, - const struct ndis_tcp_ip_checksum_info *csum_info, - const struct ndis_pkt_8021q_info *vlan) + struct netvsc_channel *nvchan) { struct net_device_context *net_device_ctx = netdev_priv(net); + struct vmbus_channel *channel = nvchan->channel; u16 q_idx = channel->offermsg.offer.sub_channel_index; - struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; struct sk_buff *skb; struct netvsc_stats *rx_stats; @@ -807,8 +808,8 @@ int netvsc_recv_callback(struct net_device *net, return NVSP_STAT_FAIL; /* Allocate a skb - TODO direct I/O to pages? */ - skb = netvsc_alloc_recv_skb(net, &nvchan->napi, - csum_info, vlan, data, len); + skb = netvsc_alloc_recv_skb(net, nvchan); + if (unlikely(!skb)) { ++net_device_ctx->eth_stats.rx_no_memory; rcu_read_unlock(); @@ -825,7 +826,7 @@ int netvsc_recv_callback(struct net_device *net, rx_stats = &nvchan->rx_stats; u64_stats_update_begin(&rx_stats->syncp); rx_stats->packets++; - rx_stats->bytes += len; + rx_stats->bytes += nvchan->rsc.pktlen; if (skb->pkt_type == PACKET_BROADCAST) ++rx_stats->broadcast; @@ -1006,6 +1007,8 @@ static void netvsc_init_settings(struct net_device *dev) ndc->speed = SPEED_UNKNOWN; ndc->duplex = DUPLEX_FULL; + + dev->features = NETIF_F_LRO; } static int netvsc_get_link_ksettings(struct net_device *dev, @@ -1562,26 +1565,6 @@ netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info) return -EOPNOTSUPP; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void netvsc_poll_controller(struct net_device *dev) -{ - struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *ndev; - int i; - - rcu_read_lock(); - ndev = rcu_dereference(ndc->nvdev); - if (ndev) { - for (i = 0; i < ndev->num_chn; i++) { - struct netvsc_channel *nvchan = &ndev->chan_table[i]; - - napi_schedule(&nvchan->napi); - } - } - rcu_read_unlock(); -} -#endif - static u32 netvsc_get_rxfh_key_size(struct net_device *dev) { return NETVSC_HASH_KEYLEN; @@ -1733,6 +1716,33 @@ static int netvsc_set_ringparam(struct net_device *ndev, return ret; } +static int netvsc_set_features(struct net_device *ndev, + netdev_features_t features) +{ + netdev_features_t change = features ^ ndev->features; + struct net_device_context *ndevctx = netdev_priv(ndev); + struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); + struct ndis_offload_params offloads; + + if (!nvdev || nvdev->destroy) + return -ENODEV; + + if (!(change & NETIF_F_LRO)) + return 0; + + memset(&offloads, 0, sizeof(struct ndis_offload_params)); + + if (features & NETIF_F_LRO) { + offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED; + offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED; + } else { + offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED; + offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED; + } + + return rndis_filter_set_offload_params(ndev, nvdev, &offloads); +} + static u32 netvsc_get_msglevel(struct net_device *ndev) { struct net_device_context *ndev_ctx = netdev_priv(ndev); @@ -1776,14 +1786,12 @@ static const struct net_device_ops device_ops = { .ndo_start_xmit = netvsc_start_xmit, .ndo_change_rx_flags = netvsc_change_rx_flags, .ndo_set_rx_mode = netvsc_set_rx_mode, + .ndo_set_features = netvsc_set_features, .ndo_change_mtu = netvsc_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = netvsc_set_mac_addr, .ndo_select_queue = netvsc_select_queue, .ndo_get_stats64 = netvsc_get_stats64, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = netvsc_poll_controller, -#endif }; /* diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 2a5209f23f29..8b537a049c1e 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -342,7 +342,8 @@ static void rndis_filter_receive_response(struct net_device *ndev, * Get the Per-Packet-Info with the specified type * return NULL if not found. */ -static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type) +static inline void *rndis_get_ppi(struct rndis_packet *rpkt, + u32 type, u8 internal) { struct rndis_per_packet_info *ppi; int len; @@ -355,7 +356,7 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type) len = rpkt->per_pkt_info_len; while (len > 0) { - if (ppi->type == type) + if (ppi->type == type && ppi->internal == internal) return (void *)((ulong)ppi + ppi->ppi_offset); len -= ppi->size; ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size); @@ -364,17 +365,41 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type) return NULL; } +static inline +void rsc_add_data(struct netvsc_channel *nvchan, + const struct ndis_pkt_8021q_info *vlan, + const struct ndis_tcp_ip_checksum_info *csum_info, + void *data, u32 len) +{ + u32 cnt = nvchan->rsc.cnt; + + if (cnt) { + nvchan->rsc.pktlen += len; + } else { + nvchan->rsc.vlan = vlan; + nvchan->rsc.csum_info = csum_info; + nvchan->rsc.pktlen = len; + } + + nvchan->rsc.data[cnt] = data; + nvchan->rsc.len[cnt] = len; + nvchan->rsc.cnt++; +} + static int rndis_filter_receive_data(struct net_device *ndev, struct netvsc_device *nvdev, - struct vmbus_channel *channel, + struct netvsc_channel *nvchan, struct rndis_message *msg, u32 data_buflen) { struct rndis_packet *rndis_pkt = &msg->msg.pkt; const struct ndis_tcp_ip_checksum_info *csum_info; const struct ndis_pkt_8021q_info *vlan; + const struct rndis_pktinfo_id *pktinfo_id; u32 data_offset; void *data; + bool rsc_more = false; + int ret; /* Remove the rndis header and pass it back up the stack */ data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset; @@ -393,25 +418,59 @@ static int rndis_filter_receive_data(struct net_device *ndev, return NVSP_STAT_FAIL; } - vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO); + vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO, 0); + + csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO, 0); - csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO); + pktinfo_id = rndis_get_ppi(rndis_pkt, RNDIS_PKTINFO_ID, 1); data = (void *)msg + data_offset; - /* - * Remove the rndis trailer padding from rndis packet message + /* Identify RSC frags, drop erroneous packets */ + if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) { + if (pktinfo_id->flag & RNDIS_PKTINFO_1ST_FRAG) + nvchan->rsc.cnt = 0; + else if (nvchan->rsc.cnt == 0) + goto drop; + + rsc_more = true; + + if (pktinfo_id->flag & RNDIS_PKTINFO_LAST_FRAG) + rsc_more = false; + + if (rsc_more && nvchan->rsc.is_last) + goto drop; + } else { + nvchan->rsc.cnt = 0; + } + + if (unlikely(nvchan->rsc.cnt >= NVSP_RSC_MAX)) + goto drop; + + /* Put data into per channel structure. + * Also, remove the rndis trailer padding from rndis packet message * rndis_pkt->data_len tell us the real data length, we only copy * the data packet to the stack, without the rndis trailer padding */ - return netvsc_recv_callback(ndev, nvdev, channel, - data, rndis_pkt->data_len, - csum_info, vlan); + rsc_add_data(nvchan, vlan, csum_info, data, rndis_pkt->data_len); + + if (rsc_more) + return NVSP_STAT_SUCCESS; + + ret = netvsc_recv_callback(ndev, nvdev, nvchan); + nvchan->rsc.cnt = 0; + + return ret; + +drop: + /* Drop incomplete packet */ + nvchan->rsc.cnt = 0; + return NVSP_STAT_FAIL; } int rndis_filter_receive(struct net_device *ndev, struct netvsc_device *net_dev, - struct vmbus_channel *channel, + struct netvsc_channel *nvchan, void *data, u32 buflen) { struct net_device_context *net_device_ctx = netdev_priv(ndev); @@ -422,7 +481,7 @@ int rndis_filter_receive(struct net_device *ndev, switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: - return rndis_filter_receive_data(ndev, net_dev, channel, + return rndis_filter_receive_data(ndev, net_dev, nvchan, rndis_msg, buflen); case RNDIS_MSG_INIT_C: case RNDIS_MSG_QUERY_C: @@ -657,7 +716,7 @@ cleanup: return ret; } -static int +int rndis_filter_set_offload_params(struct net_device *ndev, struct netvsc_device *nvdev, struct ndis_offload_params *req_offloads) @@ -1184,6 +1243,18 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, } } + if (hwcaps.rsc.ip4 && hwcaps.rsc.ip6) { + net->hw_features |= NETIF_F_LRO; + + if (net->features & NETIF_F_LRO) { + offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED; + offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED; + } else { + offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED; + offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED; + } + } + /* In case some hw_features disappeared we need to remove them from * net->features list as they're no longer supported. */ diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index bf70ab892e69..51b5198d5943 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -37,8 +37,6 @@ MODULE_LICENSE("GPL"); static LIST_HEAD(hwsim_phys); static DEFINE_MUTEX(hwsim_phys_lock); -static LIST_HEAD(hwsim_ifup_phys); - static struct platform_device *mac802154hwsim_dev; /* MAC802154_HWSIM netlink family */ @@ -85,7 +83,6 @@ struct hwsim_phy { struct list_head edges; struct list_head list; - struct list_head list_ifup; }; static int hwsim_add_one(struct genl_info *info, struct device *dev, @@ -159,9 +156,6 @@ static int hwsim_hw_start(struct ieee802154_hw *hw) struct hwsim_phy *phy = hw->priv; phy->suspended = false; - list_add_rcu(&phy->list_ifup, &hwsim_ifup_phys); - synchronize_rcu(); - return 0; } @@ -170,8 +164,6 @@ static void hwsim_hw_stop(struct ieee802154_hw *hw) struct hwsim_phy *phy = hw->priv; phy->suspended = true; - list_del_rcu(&phy->list_ifup); - synchronize_rcu(); } static int diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 04891429a554..44de81e5f140 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -132,11 +132,6 @@ static const struct reg_sequence mar20a_iar_overwrites[] = { }; #define MCR20A_VALID_CHANNELS (0x07FFF800) - -struct mcr20a_platform_data { - int rst_gpio; -}; - #define MCR20A_MAX_BUF (127) #define printdev(X) (&X->spi->dev) @@ -412,7 +407,6 @@ struct mcr20a_local { struct spi_device *spi; struct ieee802154_hw *hw; - struct mcr20a_platform_data *pdata; struct regmap *regmap_dar; struct regmap *regmap_iar; @@ -976,20 +970,6 @@ static irqreturn_t mcr20a_irq_isr(int irq, void *data) return IRQ_HANDLED; } -static int mcr20a_get_platform_data(struct spi_device *spi, - struct mcr20a_platform_data *pdata) -{ - int ret = 0; - - if (!spi->dev.of_node) - return -EINVAL; - - pdata->rst_gpio = of_get_named_gpio(spi->dev.of_node, "rst_b-gpio", 0); - dev_dbg(&spi->dev, "rst_b-gpio: %d\n", pdata->rst_gpio); - - return ret; -} - static void mcr20a_hw_setup(struct mcr20a_local *lp) { u8 i; @@ -1249,7 +1229,7 @@ mcr20a_probe(struct spi_device *spi) { struct ieee802154_hw *hw; struct mcr20a_local *lp; - struct mcr20a_platform_data *pdata; + struct gpio_desc *rst_b; int irq_type; int ret = -ENOMEM; @@ -1260,48 +1240,32 @@ mcr20a_probe(struct spi_device *spi) return -EINVAL; } - pdata = kmalloc(sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return -ENOMEM; - - /* set mcr20a platform data */ - ret = mcr20a_get_platform_data(spi, pdata); - if (ret < 0) { - dev_crit(&spi->dev, "mcr20a_get_platform_data failed.\n"); - goto free_pdata; - } - - /* init reset gpio */ - if (gpio_is_valid(pdata->rst_gpio)) { - ret = devm_gpio_request_one(&spi->dev, pdata->rst_gpio, - GPIOF_OUT_INIT_HIGH, "reset"); - if (ret) - goto free_pdata; + rst_b = devm_gpiod_get(&spi->dev, "rst_b", GPIOD_OUT_HIGH); + if (IS_ERR(rst_b)) { + ret = PTR_ERR(rst_b); + if (ret != -EPROBE_DEFER) + dev_err(&spi->dev, "Failed to get 'rst_b' gpio: %d", ret); + return ret; } /* reset mcr20a */ - if (gpio_is_valid(pdata->rst_gpio)) { - usleep_range(10, 20); - gpio_set_value_cansleep(pdata->rst_gpio, 0); - usleep_range(10, 20); - gpio_set_value_cansleep(pdata->rst_gpio, 1); - usleep_range(120, 240); - } + usleep_range(10, 20); + gpiod_set_value_cansleep(rst_b, 1); + usleep_range(10, 20); + gpiod_set_value_cansleep(rst_b, 0); + usleep_range(120, 240); /* allocate ieee802154_hw and private data */ hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops); if (!hw) { dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n"); - ret = -ENOMEM; - goto free_pdata; + return ret; } /* init mcr20a local data */ lp = hw->priv; lp->hw = hw; lp->spi = spi; - lp->spi->dev.platform_data = pdata; - lp->pdata = pdata; /* init ieee802154_hw */ hw->parent = &spi->dev; @@ -1370,8 +1334,6 @@ mcr20a_probe(struct spi_device *spi) free_dev: ieee802154_free_hw(lp->hw); -free_pdata: - kfree(pdata); return ret; } diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 30612497643c..a7207fa7e451 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -59,12 +59,6 @@ #include <net/net_namespace.h> #include <linux/u64_stats_sync.h> -struct pcpu_lstats { - u64 packets; - u64 bytes; - struct u64_stats_sync syncp; -}; - /* The higher levels take care of making this non-reentrant (it's * called with bh's disabled). */ diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 7de88b33d5b9..4bb90b6867a2 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -64,9 +64,9 @@ struct macsec_eth_header { #define MACSEC_NUM_AN 4 /* 2 bits for the association number */ -#define for_each_rxsc(secy, sc) \ +#define for_each_rxsc(secy, sc) \ for (sc = rcu_dereference_bh(secy->rx_sc); \ - sc; \ + sc; \ sc = rcu_dereference_bh(sc->next)) #define for_each_rxsc_rtnl(secy, sc) \ for (sc = rtnl_dereference(secy->rx_sc); \ @@ -1142,6 +1142,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) list_for_each_entry_rcu(macsec, &rxd->secys, secys) { struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); + sc = sc ? macsec_rxsc_get(sc) : NULL; if (sc) { @@ -1584,7 +1585,6 @@ static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, return rx_sa; } - static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, @@ -2156,7 +2156,7 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) } static int copy_tx_sa_stats(struct sk_buff *skb, - struct macsec_tx_sa_stats __percpu *pstats) + struct macsec_tx_sa_stats __percpu *pstats) { struct macsec_tx_sa_stats sum = {0, }; int cpu; @@ -2176,7 +2176,7 @@ static int copy_tx_sa_stats(struct sk_buff *skb, } static int copy_rx_sa_stats(struct sk_buff *skb, - struct macsec_rx_sa_stats __percpu *pstats) + struct macsec_rx_sa_stats __percpu *pstats) { struct macsec_rx_sa_stats sum = {0, }; int cpu; @@ -2202,7 +2202,7 @@ static int copy_rx_sa_stats(struct sk_buff *skb, } static int copy_rx_sc_stats(struct sk_buff *skb, - struct pcpu_rx_sc_stats __percpu *pstats) + struct pcpu_rx_sc_stats __percpu *pstats) { struct macsec_rx_sc_stats sum = {0, }; int cpu; @@ -2266,7 +2266,7 @@ static int copy_rx_sc_stats(struct sk_buff *skb, } static int copy_tx_sc_stats(struct sk_buff *skb, - struct pcpu_tx_sc_stats __percpu *pstats) + struct pcpu_tx_sc_stats __percpu *pstats) { struct macsec_tx_sc_stats sum = {0, }; int cpu; @@ -2306,7 +2306,7 @@ static int copy_tx_sc_stats(struct sk_buff *skb, } static int copy_secy_stats(struct sk_buff *skb, - struct pcpu_secy_stats __percpu *pstats) + struct pcpu_secy_stats __percpu *pstats) { struct macsec_dev_stats sum = {0, }; int cpu; @@ -2962,13 +2962,11 @@ static int macsec_get_iflink(const struct net_device *dev) return macsec_priv(dev)->real_dev->ifindex; } - static int macsec_get_nest_level(struct net_device *dev) { return macsec_priv(dev)->nest_level; } - static const struct net_device_ops macsec_netdev_ops = { .ndo_init = macsec_dev_init, .ndo_uninit = macsec_dev_uninit, diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index 7ae1856d1f18..e964d312f4ca 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -19,7 +19,6 @@ #include <linux/ethtool.h> #include <linux/module.h> #include <linux/slab.h> -#include <linux/netdevice.h> #include <linux/netpoll.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> @@ -603,6 +602,9 @@ static int net_failover_slave_unregister(struct net_device *slave_dev, primary_dev = rtnl_dereference(nfo_info->primary_dev); standby_dev = rtnl_dereference(nfo_info->standby_dev); + if (WARN_ON_ONCE(slave_dev != primary_dev && slave_dev != standby_dev)) + return -ENODEV; + vlan_vids_del_by_dev(slave_dev, failover_dev); dev_uc_unsync(slave_dev, failover_dev); dev_mc_unsync(slave_dev, failover_dev); @@ -762,8 +764,10 @@ struct failover *net_failover_create(struct net_device *standby_dev) netif_carrier_off(failover_dev); failover = failover_register(failover_dev, &net_failover_ops); - if (IS_ERR(failover)) + if (IS_ERR(failover)) { + err = PTR_ERR(failover); goto err_failover_register; + } return failover; diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 81444208b216..cb3518474f0e 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -86,8 +86,14 @@ nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn) return 0; } +static int nsim_bpf_finalize(struct bpf_verifier_env *env) +{ + return 0; +} + static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = { - .insn_hook = nsim_bpf_verify_insn, + .insn_hook = nsim_bpf_verify_insn, + .finalize = nsim_bpf_finalize, }; static bool nsim_xdp_offload_active(struct netdevsim *ns) diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c index 4b22955de191..dd0db7534cb3 100644 --- a/drivers/net/nlmon.c +++ b/drivers/net/nlmon.c @@ -6,12 +6,6 @@ #include <linux/if_arp.h> #include <net/rtnetlink.h> -struct pcpu_lstats { - u64 packets; - u64 bytes; - struct u64_stats_sync syncp; -}; - static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev) { int len = skb->len; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 82070792edbb..3d187cd50eb0 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -240,7 +240,7 @@ config AT803X_PHY config BCM63XX_PHY tristate "Broadcom 63xx SOCs internal PHY" - depends on BCM63XX + depends on BCM63XX || COMPILE_TEST select BCM_NET_PHYLIB ---help--- Currently supports the 6348 and 6358 PHYs. diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c index 319edc9c8ec7..632472cab3bb 100644 --- a/drivers/net/phy/aquantia.c +++ b/drivers/net/phy/aquantia.c @@ -115,7 +115,7 @@ static struct phy_driver aquantia_driver[] = { .phy_id = PHY_ID_AQ1202, .phy_id_mask = 0xfffffff0, .name = "Aquantia AQ1202", - .features = PHY_AQUANTIA_FEATURES, + .features = PHY_10GBIT_FULL_FEATURES, .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, @@ -127,7 +127,7 @@ static struct phy_driver aquantia_driver[] = { .phy_id = PHY_ID_AQ2104, .phy_id_mask = 0xfffffff0, .name = "Aquantia AQ2104", - .features = PHY_AQUANTIA_FEATURES, + .features = PHY_10GBIT_FULL_FEATURES, .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, @@ -139,7 +139,7 @@ static struct phy_driver aquantia_driver[] = { .phy_id = PHY_ID_AQR105, .phy_id_mask = 0xfffffff0, .name = "Aquantia AQR105", - .features = PHY_AQUANTIA_FEATURES, + .features = PHY_10GBIT_FULL_FEATURES, .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, @@ -151,7 +151,7 @@ static struct phy_driver aquantia_driver[] = { .phy_id = PHY_ID_AQR106, .phy_id_mask = 0xfffffff0, .name = "Aquantia AQR106", - .features = PHY_AQUANTIA_FEATURES, + .features = PHY_10GBIT_FULL_FEATURES, .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, @@ -163,7 +163,7 @@ static struct phy_driver aquantia_driver[] = { .phy_id = PHY_ID_AQR107, .phy_id_mask = 0xfffffff0, .name = "Aquantia AQR107", - .features = PHY_AQUANTIA_FEATURES, + .features = PHY_10GBIT_FULL_FEATURES, .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, @@ -175,7 +175,7 @@ static struct phy_driver aquantia_driver[] = { .phy_id = PHY_ID_AQR405, .phy_id_mask = 0xfffffff0, .name = "Aquantia AQR405", - .features = PHY_AQUANTIA_FEATURES, + .features = PHY_10GBIT_FULL_FEATURES, .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 411cf1072bae..e74a047a846e 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -357,7 +357,7 @@ static int at803x_aneg_done(struct phy_device *phydev) /* check if the SGMII link is OK. */ if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) { - pr_warn("803x_aneg_done: SGMII link is not ok\n"); + phydev_warn(phydev, "803x_aneg_done: SGMII link is not ok\n"); aneg_done = 0; } /* switch back to copper page */ diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index cf14613745c9..d95bffdec4c1 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -42,6 +42,9 @@ static int bcm63xx_config_init(struct phy_device *phydev) { int reg, err; + /* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */ + phydev->supported |= SUPPORTED_Pause; + reg = phy_read(phydev, MII_BCM63XX_IR); if (reg < 0) return reg; @@ -65,8 +68,7 @@ static struct phy_driver bcm63xx_driver[] = { .phy_id = 0x00406000, .phy_id_mask = 0xfffffc00, .name = "Broadcom BCM63XX (1)", - /* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */ - .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), + .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL, .config_init = bcm63xx_config_init, .ack_interrupt = bcm_phy_ack_intr, @@ -75,8 +77,7 @@ static struct phy_driver bcm63xx_driver[] = { /* same phy as above, with just a different OUI */ .phy_id = 0x002bdc00, .phy_id_mask = 0xfffffc00, - .name = "Broadcom BCM63XX (2)", - .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), + .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL, .config_init = bcm63xx_config_init, .ack_interrupt = bcm_phy_ack_intr, diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 29aa8d772b0c..edd4d44a386d 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -553,16 +553,17 @@ static void enable_status_frames(struct phy_device *phydev, bool on) mutex_unlock(&clock->extreg_lock); if (!phydev->attached_dev) { - pr_warn("expected to find an attached netdevice\n"); + phydev_warn(phydev, + "expected to find an attached netdevice\n"); return; } if (on) { if (dev_mc_add(phydev->attached_dev, status_frame_dst)) - pr_warn("failed to add mc address\n"); + phydev_warn(phydev, "failed to add mc address\n"); } else { if (dev_mc_del(phydev->attached_dev, status_frame_dst)) - pr_warn("failed to delete mc address\n"); + phydev_warn(phydev, "failed to delete mc address\n"); } } @@ -686,9 +687,9 @@ static void recalibrate(struct dp83640_clock *clock) * read out and correct offsets */ val = ext_read(master, PAGE4, PTP_STS); - pr_info("master PTP_STS 0x%04hx\n", val); + phydev_info(master, "master PTP_STS 0x%04hx\n", val); val = ext_read(master, PAGE4, PTP_ESTS); - pr_info("master PTP_ESTS 0x%04hx\n", val); + phydev_info(master, "master PTP_ESTS 0x%04hx\n", val); event_ts.ns_lo = ext_read(master, PAGE4, PTP_EDATA); event_ts.ns_hi = ext_read(master, PAGE4, PTP_EDATA); event_ts.sec_lo = ext_read(master, PAGE4, PTP_EDATA); @@ -698,15 +699,16 @@ static void recalibrate(struct dp83640_clock *clock) list_for_each(this, &clock->phylist) { tmp = list_entry(this, struct dp83640_private, list); val = ext_read(tmp->phydev, PAGE4, PTP_STS); - pr_info("slave PTP_STS 0x%04hx\n", val); + phydev_info(tmp->phydev, "slave PTP_STS 0x%04hx\n", val); val = ext_read(tmp->phydev, PAGE4, PTP_ESTS); - pr_info("slave PTP_ESTS 0x%04hx\n", val); + phydev_info(tmp->phydev, "slave PTP_ESTS 0x%04hx\n", val); event_ts.ns_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA); event_ts.ns_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA); event_ts.sec_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA); event_ts.sec_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA); diff = now - (s64) phy2txts(&event_ts); - pr_info("slave offset %lld nanoseconds\n", diff); + phydev_info(tmp->phydev, "slave offset %lld nanoseconds\n", + diff); diff += ADJTIME_FIX; ts = ns_to_timespec64(diff); tdr_write(0, tmp->phydev, &ts, PTP_STEP_CLK); diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c index a9a4edfa23c8..565e49e7f76f 100644 --- a/drivers/net/phy/et1011c.c +++ b/drivers/net/phy/et1011c.c @@ -91,8 +91,7 @@ static struct phy_driver et1011c_driver[] = { { .phy_id = 0x0282f014, .name = "ET1011C", .phy_id_mask = 0xfffffff0, - .features = (PHY_BASIC_FEATURES | SUPPORTED_1000baseT_Full), - .flags = PHY_POLL, + .features = PHY_GBIT_FEATURES, .config_aneg = et1011c_config_aneg, .read_status = et1011c_read_status, } }; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index f7c69ca34056..cbec296107bd 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -265,7 +265,7 @@ static int marvell_set_polarity(struct phy_device *phydev, int polarity) return err; } - return 0; + return val != reg; } static int marvell_set_downshift(struct phy_device *phydev, bool enable, @@ -287,12 +287,15 @@ static int marvell_set_downshift(struct phy_device *phydev, bool enable, static int marvell_config_aneg(struct phy_device *phydev) { + int changed = 0; int err; err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; + changed = err; + err = phy_write(phydev, MII_M1111_PHY_LED_CONTROL, MII_M1111_PHY_LED_DIRECT); if (err < 0) @@ -302,7 +305,7 @@ static int marvell_config_aneg(struct phy_device *phydev) if (err < 0) return err; - if (phydev->autoneg != AUTONEG_ENABLE) { + if (phydev->autoneg != AUTONEG_ENABLE || changed) { /* A write to speed/duplex bits (that is performed by * genphy_config_aneg() call above) must be followed by * a software reset. Otherwise, the write has no effect. @@ -350,42 +353,6 @@ static int m88e1101_config_aneg(struct phy_device *phydev) return marvell_config_aneg(phydev); } -static int m88e1111_config_aneg(struct phy_device *phydev) -{ - int err; - - /* The Marvell PHY has an errata which requires - * that certain registers get written in order - * to restart autonegotiation - */ - err = genphy_soft_reset(phydev); - - err = marvell_set_polarity(phydev, phydev->mdix_ctrl); - if (err < 0) - return err; - - err = phy_write(phydev, MII_M1111_PHY_LED_CONTROL, - MII_M1111_PHY_LED_DIRECT); - if (err < 0) - return err; - - err = genphy_config_aneg(phydev); - if (err < 0) - return err; - - if (phydev->autoneg != AUTONEG_ENABLE) { - /* A write to speed/duplex bits (that is performed by - * genphy_config_aneg() call above) must be followed by - * a software reset. Otherwise, the write has no effect. - */ - err = genphy_soft_reset(phydev); - if (err < 0) - return err; - } - - return 0; -} - #ifdef CONFIG_OF_MDIO /* Set and/or override some configuration registers based on the * marvell,reg-init property stored in the of_node for the phydev. @@ -479,6 +446,7 @@ static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev) static int m88e1121_config_aneg(struct phy_device *phydev) { + int changed = 0; int err = 0; if (phy_interface_is_rgmii(phydev)) { @@ -487,15 +455,26 @@ static int m88e1121_config_aneg(struct phy_device *phydev) return err; } - err = genphy_soft_reset(phydev); + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; - err = marvell_set_polarity(phydev, phydev->mdix_ctrl); + changed = err; + + err = genphy_config_aneg(phydev); if (err < 0) return err; - return genphy_config_aneg(phydev); + if (phydev->autoneg != AUTONEG_ENABLE || changed) { + /* A software reset is used to ensure a "commit" of the + * changes is done. + */ + err = genphy_soft_reset(phydev); + if (err < 0) + return err; + } + + return 0; } static int m88e1318_config_aneg(struct phy_device *phydev) @@ -659,7 +638,7 @@ static void marvell_config_led(struct phy_device *phydev) err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, MII_PHY_LED_CTRL, def_config); if (err < 0) - pr_warn("Fail to config marvell phy LED.\n"); + phydev_warn(phydev, "Fail to config marvell phy LED.\n"); } static int marvell_config_init(struct phy_device *phydev) @@ -2067,7 +2046,7 @@ static struct phy_driver marvell_drivers[] = { .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1111_config_init, - .config_aneg = &m88e1111_config_aneg, + .config_aneg = &marvell_config_aneg, .read_status = &marvell_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, @@ -2222,7 +2201,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1510, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1510", - .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE, + .features = PHY_GBIT_FIBRE_FEATURES, .flags = PHY_HAS_INTERRUPT, .probe = &m88e1510_probe, .config_init = &m88e1510_config_init, diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index f77a2d9e7f9d..1c9d039eec63 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -337,9 +337,9 @@ static int mv3310_config_init(struct phy_device *phydev) } if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported)) - dev_warn(&phydev->mdio.dev, - "PHY supports (%*pb) more modes than phylib supports, some modes not supported.\n", - __ETHTOOL_LINK_MODE_MASK_NBITS, supported); + phydev_warn(phydev, + "PHY supports (%*pb) more modes than phylib supports, some modes not supported.\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, supported); phydev->supported &= mask; phydev->advertising &= phydev->supported; @@ -535,16 +535,7 @@ static struct phy_driver mv3310_drivers[] = { .phy_id = 0x002b09aa, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "mv88x3310", - .features = SUPPORTED_10baseT_Full | - SUPPORTED_10baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_TP | - SUPPORTED_FIBRE | - SUPPORTED_10000baseT_Full | - SUPPORTED_Backplane, + .features = PHY_10GBIT_FEATURES, .soft_reset = gen10g_no_soft_reset, .config_init = mv3310_config_init, .probe = mv3310_probe, diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 8d370667fa1b..df75efa96a7d 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -16,6 +16,7 @@ #include <linux/module.h> #include <linux/io.h> #include <linux/delay.h> +#include <linux/clk.h> #include <linux/of.h> #include <linux/of_platform.h> @@ -45,6 +46,8 @@ struct unimac_mdio_priv { void __iomem *base; int (*wait_func) (void *wait_func_data); void *wait_func_data; + struct clk *clk; + u32 clk_freq; }; static inline u32 unimac_mdio_readl(struct unimac_mdio_priv *priv, u32 offset) @@ -189,6 +192,35 @@ static int unimac_mdio_reset(struct mii_bus *bus) return 0; } +static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv) +{ + unsigned long rate; + u32 reg, div; + + /* Keep the hardware default values */ + if (!priv->clk_freq) + return; + + if (!priv->clk) + rate = 250000000; + else + rate = clk_get_rate(priv->clk); + + div = (rate / (2 * priv->clk_freq)) - 1; + if (div & ~MDIO_CLK_DIV_MASK) { + pr_warn("Incorrect MDIO clock frequency, ignoring\n"); + return; + } + + /* The MDIO clock is the reference clock (typicaly 250Mhz) divided by + * 2 x (MDIO_CLK_DIV + 1) + */ + reg = unimac_mdio_readl(priv, MDIO_CFG); + reg &= ~(MDIO_CLK_DIV_MASK << MDIO_CLK_DIV_SHIFT); + reg |= div << MDIO_CLK_DIV_SHIFT; + unimac_mdio_writel(priv, reg, MDIO_CFG); +} + static int unimac_mdio_probe(struct platform_device *pdev) { struct unimac_mdio_pdata *pdata = pdev->dev.platform_data; @@ -217,9 +249,26 @@ static int unimac_mdio_probe(struct platform_device *pdev) return -ENOMEM; } + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (PTR_ERR(priv->clk) == -EPROBE_DEFER) + return PTR_ERR(priv->clk); + else + priv->clk = NULL; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + if (of_property_read_u32(np, "clock-frequency", &priv->clk_freq)) + priv->clk_freq = 0; + + unimac_mdio_clk_set(priv); + priv->mii_bus = mdiobus_alloc(); - if (!priv->mii_bus) - return -ENOMEM; + if (!priv->mii_bus) { + ret = -ENOMEM; + goto out_clk_disable; + } bus = priv->mii_bus; bus->priv = priv; @@ -253,6 +302,8 @@ static int unimac_mdio_probe(struct platform_device *pdev) out_mdio_free: mdiobus_free(bus); +out_clk_disable: + clk_disable_unprepare(priv->clk); return ret; } @@ -262,10 +313,37 @@ static int unimac_mdio_remove(struct platform_device *pdev) mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int __maybe_unused unimac_mdio_suspend(struct device *d) +{ + struct unimac_mdio_priv *priv = dev_get_drvdata(d); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int __maybe_unused unimac_mdio_resume(struct device *d) +{ + struct unimac_mdio_priv *priv = dev_get_drvdata(d); + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + unimac_mdio_clk_set(priv); return 0; } +static SIMPLE_DEV_PM_OPS(unimac_mdio_pm_ops, + unimac_mdio_suspend, unimac_mdio_resume); + static const struct of_device_id unimac_mdio_ids[] = { { .compatible = "brcm,genet-mdio-v5", }, { .compatible = "brcm,genet-mdio-v4", }, @@ -281,6 +359,7 @@ static struct platform_driver unimac_mdio_driver = { .driver = { .name = UNIMAC_MDIO_DRV_NAME, .of_match_table = unimac_mdio_ids, + .pm = &unimac_mdio_pm_ops, }, .probe = unimac_mdio_probe, .remove = unimac_mdio_remove, diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/phy/mdio-thunder.c index 564616968cad..1546f6398831 100644 --- a/drivers/net/phy/mdio-thunder.c +++ b/drivers/net/phy/mdio-thunder.c @@ -73,8 +73,8 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev, err = of_address_to_resource(node, 0, &r); if (err) { dev_err(&pdev->dev, - "Couldn't translate address for \"%s\"\n", - node->name); + "Couldn't translate address for \"%pOFn\"\n", + node); break; } diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 98f4b1f706df..2e59a8419b17 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -38,7 +38,6 @@ #include <linux/phy.h> #include <linux/io.h> #include <linux/uaccess.h> -#include <linux/gpio/consumer.h> #include <asm/irq.h> diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 2d67937866a3..04b12e34da58 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -88,7 +88,7 @@ static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr, /* Save current page */ save_page = phy_save_page(phydev); if (save_page < 0) { - pr_warn("Failed to get current page\n"); + phydev_warn(phydev, "Failed to get current page\n"); goto err; } @@ -98,14 +98,14 @@ static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr, ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_LOW_DATA, (data & 0xFFFF)); if (ret < 0) { - pr_warn("Failed to write TR low data\n"); + phydev_warn(phydev, "Failed to write TR low data\n"); goto err; } ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_HIGH_DATA, (data & 0x00FF0000) >> 16); if (ret < 0) { - pr_warn("Failed to write TR high data\n"); + phydev_warn(phydev, "Failed to write TR high data\n"); goto err; } @@ -115,14 +115,15 @@ static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr, ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_CR, buf); if (ret < 0) { - pr_warn("Failed to write data in reg\n"); + phydev_warn(phydev, "Failed to write data in reg\n"); goto err; } usleep_range(1000, 2000);/* Wait for Data to be written */ val = __phy_read(phydev, LAN88XX_EXT_PAGE_TR_CR); if (!(val & 0x8000)) - pr_warn("TR Register[0x%X] configuration failed\n", regaddr); + phydev_warn(phydev, "TR Register[0x%X] configuration failed\n", + regaddr); err: return phy_restore_page(phydev, save_page, ret); } @@ -137,7 +138,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev) */ err = lan88xx_TR_reg_set(phydev, 0x0F82, 0x12B00A); if (err < 0) - pr_warn("Failed to Set Register[0x0F82]\n"); + phydev_warn(phydev, "Failed to Set Register[0x0F82]\n"); /* Get access to Channel b'10, Node b'1101, Register 0x06. * Write 24-bit value 0xD2C46F to register. Setting SSTrKf1000Slv, @@ -145,7 +146,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev) */ err = lan88xx_TR_reg_set(phydev, 0x168C, 0xD2C46F); if (err < 0) - pr_warn("Failed to Set Register[0x168C]\n"); + phydev_warn(phydev, "Failed to Set Register[0x168C]\n"); /* Get access to Channel b'10, Node b'1111, Register 0x11. * Write 24-bit value 0x620 to register. Setting rem_upd_done_thresh @@ -153,7 +154,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev) */ err = lan88xx_TR_reg_set(phydev, 0x17A2, 0x620); if (err < 0) - pr_warn("Failed to Set Register[0x17A2]\n"); + phydev_warn(phydev, "Failed to Set Register[0x17A2]\n"); /* Get access to Channel b'10, Node b'1101, Register 0x10. * Write 24-bit value 0xEEFFDD to register. Setting @@ -162,7 +163,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev) */ err = lan88xx_TR_reg_set(phydev, 0x16A0, 0xEEFFDD); if (err < 0) - pr_warn("Failed to Set Register[0x16A0]\n"); + phydev_warn(phydev, "Failed to Set Register[0x16A0]\n"); /* Get access to Channel b'10, Node b'1101, Register 0x13. * Write 24-bit value 0x071448 to register. Setting @@ -170,7 +171,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev) */ err = lan88xx_TR_reg_set(phydev, 0x16A6, 0x071448); if (err < 0) - pr_warn("Failed to Set Register[0x16A6]\n"); + phydev_warn(phydev, "Failed to Set Register[0x16A6]\n"); /* Get access to Channel b'10, Node b'1101, Register 0x12. * Write 24-bit value 0x13132F to register. Setting @@ -178,7 +179,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev) */ err = lan88xx_TR_reg_set(phydev, 0x16A4, 0x13132F); if (err < 0) - pr_warn("Failed to Set Register[0x16A4]\n"); + phydev_warn(phydev, "Failed to Set Register[0x16A4]\n"); /* Get access to Channel b'10, Node b'1101, Register 0x14. * Write 24-bit value 0x0 to register. Setting eee_3level_delay, @@ -186,7 +187,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev) */ err = lan88xx_TR_reg_set(phydev, 0x16A8, 0x0); if (err < 0) - pr_warn("Failed to Set Register[0x16A8]\n"); + phydev_warn(phydev, "Failed to Set Register[0x16A8]\n"); /* Get access to Channel b'01, Node b'1111, Register 0x34. * Write 24-bit value 0x91B06C to register. Setting @@ -195,7 +196,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev) */ err = lan88xx_TR_reg_set(phydev, 0x0FE8, 0x91B06C); if (err < 0) - pr_warn("Failed to Set Register[0x0FE8]\n"); + phydev_warn(phydev, "Failed to Set Register[0x0FE8]\n"); /* Get access to Channel b'01, Node b'1111, Register 0x3E. * Write 24-bit value 0xC0A028 to register. Setting @@ -204,7 +205,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev) */ err = lan88xx_TR_reg_set(phydev, 0x0FFC, 0xC0A028); if (err < 0) - pr_warn("Failed to Set Register[0x0FFC]\n"); + phydev_warn(phydev, "Failed to Set Register[0x0FFC]\n"); /* Get access to Channel b'01, Node b'1111, Register 0x35. * Write 24-bit value 0x041600 to register. Setting @@ -213,14 +214,14 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev) */ err = lan88xx_TR_reg_set(phydev, 0x0FEA, 0x041600); if (err < 0) - pr_warn("Failed to Set Register[0x0FEA]\n"); + phydev_warn(phydev, "Failed to Set Register[0x0FEA]\n"); /* Get access to Channel b'10, Node b'1101, Register 0x03. * Write 24-bit value 0x000004 to register. Setting TrFreeze bits. */ err = lan88xx_TR_reg_set(phydev, 0x1686, 0x000004); if (err < 0) - pr_warn("Failed to Set Register[0x1686]\n"); + phydev_warn(phydev, "Failed to Set Register[0x1686]\n"); } static int lan88xx_probe(struct phy_device *phydev) diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index b1917dd1978a..c600a8509d60 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -46,7 +46,7 @@ static struct phy_driver microchip_t1_phy_driver[] = { .phy_id_mask = 0xfffffff0, .name = "Microchip LAN87xx T1", - .features = SUPPORTED_100baseT_Full, + .features = PHY_BASIC_T1_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = genphy_config_init, diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index 84ca9ff40ae0..bffe077dc75f 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c @@ -6,6 +6,8 @@ * Copyright (c) 2016 Microsemi Corporation */ +#include <linux/firmware.h> +#include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mdio.h> @@ -32,6 +34,15 @@ enum rgmii_rx_clock_delay { #define DISABLE_HP_AUTO_MDIX_MASK 0x0080 #define DISABLE_PAIR_SWAP_CORR_MASK 0x0020 #define DISABLE_POLARITY_CORR_MASK 0x0010 +#define PARALLEL_DET_IGNORE_ADVERTISED 0x0008 + +#define MSCC_PHY_EXT_CNTL_STATUS 22 +#define SMI_BROADCAST_WR_EN 0x0001 + +#define MSCC_PHY_ERR_RX_CNT 19 +#define MSCC_PHY_ERR_FALSE_CARRIER_CNT 20 +#define MSCC_PHY_ERR_LINK_DISCONNECT_CNT 21 +#define ERR_CNT_MASK GENMASK(7, 0) #define MSCC_PHY_EXT_PHY_CNTL_1 23 #define MAC_IF_SELECTION_MASK 0x1800 @@ -39,7 +50,22 @@ enum rgmii_rx_clock_delay { #define MAC_IF_SELECTION_RMII 1 #define MAC_IF_SELECTION_RGMII 2 #define MAC_IF_SELECTION_POS 11 +#define VSC8584_MAC_IF_SELECTION_MASK 0x1000 +#define VSC8584_MAC_IF_SELECTION_SGMII 0 +#define VSC8584_MAC_IF_SELECTION_1000BASEX 1 +#define VSC8584_MAC_IF_SELECTION_POS 12 #define FAR_END_LOOPBACK_MODE_MASK 0x0008 +#define MEDIA_OP_MODE_MASK 0x0700 +#define MEDIA_OP_MODE_COPPER 0 +#define MEDIA_OP_MODE_SERDES 1 +#define MEDIA_OP_MODE_1000BASEX 2 +#define MEDIA_OP_MODE_100BASEFX 3 +#define MEDIA_OP_MODE_AMS_COPPER_SERDES 5 +#define MEDIA_OP_MODE_AMS_COPPER_1000BASEX 6 +#define MEDIA_OP_MODE_AMS_COPPER_100BASEFX 7 +#define MEDIA_OP_MODE_POS 8 + +#define MSCC_PHY_EXT_PHY_CNTL_2 24 #define MII_VSC85XX_INT_MASK 25 #define MII_VSC85XX_INT_MASK_MASK 0xa000 @@ -54,27 +80,48 @@ enum rgmii_rx_clock_delay { #define HP_AUTO_MDIX_X_OVER_IND_MASK 0x2000 #define MSCC_PHY_LED_MODE_SEL 29 -#define LED_1_MODE_SEL_MASK 0x00F0 -#define LED_0_MODE_SEL_MASK 0x000F -#define LED_1_MODE_SEL_POS 4 +#define LED_MODE_SEL_POS(x) ((x) * 4) +#define LED_MODE_SEL_MASK(x) (GENMASK(3, 0) << LED_MODE_SEL_POS(x)) +#define LED_MODE_SEL(x, mode) (((mode) << LED_MODE_SEL_POS(x)) & LED_MODE_SEL_MASK(x)) #define MSCC_EXT_PAGE_ACCESS 31 #define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */ #define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */ #define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */ +#define MSCC_PHY_PAGE_EXTENDED_3 0x0003 /* Extended reg - page 3 */ +#define MSCC_PHY_PAGE_EXTENDED_4 0x0004 /* Extended reg - page 4 */ +/* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs + * in the same package. + */ +#define MSCC_PHY_PAGE_EXTENDED_GPIO 0x0010 /* Extended reg - GPIO */ +#define MSCC_PHY_PAGE_TEST 0x2a30 /* Test reg */ +#define MSCC_PHY_PAGE_TR 0x52b5 /* Token ring registers */ /* Extended Page 1 Registers */ +#define MSCC_PHY_CU_MEDIA_CRC_VALID_CNT 18 +#define VALID_CRC_CNT_CRC_MASK GENMASK(13, 0) + #define MSCC_PHY_EXT_MODE_CNTL 19 #define FORCE_MDI_CROSSOVER_MASK 0x000C #define FORCE_MDI_CROSSOVER_MDIX 0x000C #define FORCE_MDI_CROSSOVER_MDI 0x0008 #define MSCC_PHY_ACTIPHY_CNTL 20 +#define PHY_ADDR_REVERSED 0x0200 #define DOWNSHIFT_CNTL_MASK 0x001C #define DOWNSHIFT_EN 0x0010 #define DOWNSHIFT_CNTL_POS 2 +#define MSCC_PHY_EXT_PHY_CNTL_4 23 +#define PHY_CNTL_4_ADDR_POS 11 + +#define MSCC_PHY_VERIPHY_CNTL_2 25 + +#define MSCC_PHY_VERIPHY_CNTL_3 26 + /* Extended Page 2 Registers */ +#define MSCC_PHY_CU_PMD_TX_CNTL 16 + #define MSCC_PHY_RGMII_CNTL 20 #define RGMII_RX_CLK_DELAY_MASK 0x0070 #define RGMII_RX_CLK_DELAY_POS 4 @@ -90,11 +137,90 @@ enum rgmii_rx_clock_delay { #define SECURE_ON_ENABLE 0x8000 #define SECURE_ON_PASSWD_LEN_4 0x4000 +/* Extended Page 3 Registers */ +#define MSCC_PHY_SERDES_TX_VALID_CNT 21 +#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT 22 +#define MSCC_PHY_SERDES_RX_VALID_CNT 28 +#define MSCC_PHY_SERDES_RX_CRC_ERR_CNT 29 + +/* Extended page GPIO Registers */ +#define MSCC_DW8051_CNTL_STATUS 0 +#define MICRO_NSOFT_RESET 0x8000 +#define RUN_FROM_INT_ROM 0x4000 +#define AUTOINC_ADDR 0x2000 +#define PATCH_RAM_CLK 0x1000 +#define MICRO_PATCH_EN 0x0080 +#define DW8051_CLK_EN 0x0010 +#define MICRO_CLK_EN 0x0008 +#define MICRO_CLK_DIVIDE(x) ((x) >> 1) +#define MSCC_DW8051_VLD_MASK 0xf1ff + +/* x Address in range 1-4 */ +#define MSCC_TRAP_ROM_ADDR(x) ((x) * 2 + 1) +#define MSCC_PATCH_RAM_ADDR(x) (((x) + 1) * 2) +#define MSCC_INT_MEM_ADDR 11 + +#define MSCC_INT_MEM_CNTL 12 +#define READ_SFR 0x6000 +#define READ_PRAM 0x4000 +#define READ_ROM 0x2000 +#define READ_RAM 0x0000 +#define INT_MEM_WRITE_EN 0x1000 +#define EN_PATCH_RAM_TRAP_ADDR(x) (0x0100 << ((x) - 1)) +#define INT_MEM_DATA_M 0x00ff +#define INT_MEM_DATA(x) (INT_MEM_DATA_M & (x)) + +#define MSCC_PHY_PROC_CMD 18 +#define PROC_CMD_NCOMPLETED 0x8000 +#define PROC_CMD_FAILED 0x4000 +#define PROC_CMD_SGMII_PORT(x) ((x) << 8) +#define PROC_CMD_FIBER_PORT(x) (0x0100 << (x) % 4) +#define PROC_CMD_QSGMII_PORT 0x0c00 +#define PROC_CMD_RST_CONF_PORT 0x0080 +#define PROC_CMD_RECONF_PORT 0x0000 +#define PROC_CMD_READ_MOD_WRITE_PORT 0x0040 +#define PROC_CMD_WRITE 0x0040 +#define PROC_CMD_READ 0x0000 +#define PROC_CMD_FIBER_DISABLE 0x0020 +#define PROC_CMD_FIBER_100BASE_FX 0x0010 +#define PROC_CMD_FIBER_1000BASE_X 0x0000 +#define PROC_CMD_SGMII_MAC 0x0030 +#define PROC_CMD_QSGMII_MAC 0x0020 +#define PROC_CMD_NO_MAC_CONF 0x0000 +#define PROC_CMD_1588_DEFAULT_INIT 0x0010 +#define PROC_CMD_NOP 0x000f +#define PROC_CMD_PHY_INIT 0x000a +#define PROC_CMD_CRC16 0x0008 +#define PROC_CMD_FIBER_MEDIA_CONF 0x0001 +#define PROC_CMD_MCB_ACCESS_MAC_CONF 0x0000 +#define PROC_CMD_NCOMPLETED_TIMEOUT_MS 500 + +#define MSCC_PHY_MAC_CFG_FASTLINK 19 +#define MAC_CFG_MASK 0xc000 +#define MAC_CFG_SGMII 0x0000 +#define MAC_CFG_QSGMII 0x4000 + +/* Test page Registers */ +#define MSCC_PHY_TEST_PAGE_5 5 +#define MSCC_PHY_TEST_PAGE_8 8 +#define MSCC_PHY_TEST_PAGE_9 9 +#define MSCC_PHY_TEST_PAGE_20 20 +#define MSCC_PHY_TEST_PAGE_24 24 + +/* Token ring page Registers */ +#define MSCC_PHY_TR_CNTL 16 +#define TR_WRITE 0x8000 +#define TR_ADDR(x) (0x7fff & (x)) +#define MSCC_PHY_TR_LSB 17 +#define MSCC_PHY_TR_MSB 18 + /* Microsemi PHY ID's */ #define PHY_ID_VSC8530 0x00070560 #define PHY_ID_VSC8531 0x00070570 #define PHY_ID_VSC8540 0x00070760 #define PHY_ID_VSC8541 0x00070770 +#define PHY_ID_VSC8574 0x000704a0 +#define PHY_ID_VSC8584 0x000707c0 #define MSCC_VDDMAC_1500 1500 #define MSCC_VDDMAC_1800 1800 @@ -103,16 +229,160 @@ enum rgmii_rx_clock_delay { #define DOWNSHIFT_COUNT_MAX 5 +#define MAX_LEDS 4 + +#define VSC8584_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \ + BIT(VSC8531_LINK_1000_ACTIVITY) | \ + BIT(VSC8531_LINK_100_ACTIVITY) | \ + BIT(VSC8531_LINK_10_ACTIVITY) | \ + BIT(VSC8531_LINK_100_1000_ACTIVITY) | \ + BIT(VSC8531_LINK_10_1000_ACTIVITY) | \ + BIT(VSC8531_LINK_10_100_ACTIVITY) | \ + BIT(VSC8584_LINK_100FX_1000X_ACTIVITY) | \ + BIT(VSC8531_DUPLEX_COLLISION) | \ + BIT(VSC8531_COLLISION) | \ + BIT(VSC8531_ACTIVITY) | \ + BIT(VSC8584_100FX_1000X_ACTIVITY) | \ + BIT(VSC8531_AUTONEG_FAULT) | \ + BIT(VSC8531_SERIAL_MODE) | \ + BIT(VSC8531_FORCE_LED_OFF) | \ + BIT(VSC8531_FORCE_LED_ON)) + +#define VSC85XX_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \ + BIT(VSC8531_LINK_1000_ACTIVITY) | \ + BIT(VSC8531_LINK_100_ACTIVITY) | \ + BIT(VSC8531_LINK_10_ACTIVITY) | \ + BIT(VSC8531_LINK_100_1000_ACTIVITY) | \ + BIT(VSC8531_LINK_10_1000_ACTIVITY) | \ + BIT(VSC8531_LINK_10_100_ACTIVITY) | \ + BIT(VSC8531_DUPLEX_COLLISION) | \ + BIT(VSC8531_COLLISION) | \ + BIT(VSC8531_ACTIVITY) | \ + BIT(VSC8531_AUTONEG_FAULT) | \ + BIT(VSC8531_SERIAL_MODE) | \ + BIT(VSC8531_FORCE_LED_OFF) | \ + BIT(VSC8531_FORCE_LED_ON)) + +#define MSCC_VSC8584_REVB_INT8051_FW "mscc_vsc8584_revb_int8051_fb48.bin" +#define MSCC_VSC8584_REVB_INT8051_FW_START_ADDR 0xe800 +#define MSCC_VSC8584_REVB_INT8051_FW_CRC 0xfb48 + +#define MSCC_VSC8574_REVB_INT8051_FW "mscc_vsc8574_revb_int8051_29e8.bin" +#define MSCC_VSC8574_REVB_INT8051_FW_START_ADDR 0x4000 +#define MSCC_VSC8574_REVB_INT8051_FW_CRC 0x29e8 + +#define VSC8584_REVB 0x0001 +#define MSCC_DEV_REV_MASK GENMASK(3, 0) + +struct reg_val { + u16 reg; + u32 val; +}; + +struct vsc85xx_hw_stat { + const char *string; + u8 reg; + u16 page; + u16 mask; +}; + +static const struct vsc85xx_hw_stat vsc85xx_hw_stats[] = { + { + .string = "phy_receive_errors", + .reg = MSCC_PHY_ERR_RX_CNT, + .page = MSCC_PHY_PAGE_STANDARD, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_false_carrier", + .reg = MSCC_PHY_ERR_FALSE_CARRIER_CNT, + .page = MSCC_PHY_PAGE_STANDARD, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_cu_media_link_disconnect", + .reg = MSCC_PHY_ERR_LINK_DISCONNECT_CNT, + .page = MSCC_PHY_PAGE_STANDARD, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_cu_media_crc_good_count", + .reg = MSCC_PHY_CU_MEDIA_CRC_VALID_CNT, + .page = MSCC_PHY_PAGE_EXTENDED, + .mask = VALID_CRC_CNT_CRC_MASK, + }, { + .string = "phy_cu_media_crc_error_count", + .reg = MSCC_PHY_EXT_PHY_CNTL_4, + .page = MSCC_PHY_PAGE_EXTENDED, + .mask = ERR_CNT_MASK, + }, +}; + +static const struct vsc85xx_hw_stat vsc8584_hw_stats[] = { + { + .string = "phy_receive_errors", + .reg = MSCC_PHY_ERR_RX_CNT, + .page = MSCC_PHY_PAGE_STANDARD, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_false_carrier", + .reg = MSCC_PHY_ERR_FALSE_CARRIER_CNT, + .page = MSCC_PHY_PAGE_STANDARD, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_cu_media_link_disconnect", + .reg = MSCC_PHY_ERR_LINK_DISCONNECT_CNT, + .page = MSCC_PHY_PAGE_STANDARD, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_cu_media_crc_good_count", + .reg = MSCC_PHY_CU_MEDIA_CRC_VALID_CNT, + .page = MSCC_PHY_PAGE_EXTENDED, + .mask = VALID_CRC_CNT_CRC_MASK, + }, { + .string = "phy_cu_media_crc_error_count", + .reg = MSCC_PHY_EXT_PHY_CNTL_4, + .page = MSCC_PHY_PAGE_EXTENDED, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_serdes_tx_good_pkt_count", + .reg = MSCC_PHY_SERDES_TX_VALID_CNT, + .page = MSCC_PHY_PAGE_EXTENDED_3, + .mask = VALID_CRC_CNT_CRC_MASK, + }, { + .string = "phy_serdes_tx_bad_crc_count", + .reg = MSCC_PHY_SERDES_TX_CRC_ERR_CNT, + .page = MSCC_PHY_PAGE_EXTENDED_3, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_serdes_rx_good_pkt_count", + .reg = MSCC_PHY_SERDES_RX_VALID_CNT, + .page = MSCC_PHY_PAGE_EXTENDED_3, + .mask = VALID_CRC_CNT_CRC_MASK, + }, { + .string = "phy_serdes_rx_bad_crc_count", + .reg = MSCC_PHY_SERDES_RX_CRC_ERR_CNT, + .page = MSCC_PHY_PAGE_EXTENDED_3, + .mask = ERR_CNT_MASK, + }, +}; + struct vsc8531_private { int rate_magic; - u8 led_0_mode; - u8 led_1_mode; + u16 supp_led_modes; + u32 leds_mode[MAX_LEDS]; + u8 nleds; + const struct vsc85xx_hw_stat *hw_stats; + u64 *stats; + int nstats; + bool pkg_init; + /* For multiple port PHYs; the MDIO address of the base PHY in the + * package. + */ + unsigned int base_addr; }; #ifdef CONFIG_OF_MDIO struct vsc8531_edge_rate_table { - u16 vddmac; - u8 slowdown[8]; + u32 vddmac; + u32 slowdown[8]; }; static const struct vsc8531_edge_rate_table edge_table[] = { @@ -123,12 +393,66 @@ static const struct vsc8531_edge_rate_table edge_table[] = { }; #endif /* CONFIG_OF_MDIO */ -static int vsc85xx_phy_page_set(struct phy_device *phydev, u16 page) +static int vsc85xx_phy_read_page(struct phy_device *phydev) { - int rc; + return __phy_read(phydev, MSCC_EXT_PAGE_ACCESS); +} - rc = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page); - return rc; +static int vsc85xx_phy_write_page(struct phy_device *phydev, int page) +{ + return __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page); +} + +static int vsc85xx_get_sset_count(struct phy_device *phydev) +{ + struct vsc8531_private *priv = phydev->priv; + + if (!priv) + return 0; + + return priv->nstats; +} + +static void vsc85xx_get_strings(struct phy_device *phydev, u8 *data) +{ + struct vsc8531_private *priv = phydev->priv; + int i; + + if (!priv) + return; + + for (i = 0; i < priv->nstats; i++) + strlcpy(data + i * ETH_GSTRING_LEN, priv->hw_stats[i].string, + ETH_GSTRING_LEN); +} + +static u64 vsc85xx_get_stat(struct phy_device *phydev, int i) +{ + struct vsc8531_private *priv = phydev->priv; + int val; + + val = phy_read_paged(phydev, priv->hw_stats[i].page, + priv->hw_stats[i].reg); + if (val < 0) + return U64_MAX; + + val = val & priv->hw_stats[i].mask; + priv->stats[i] += val; + + return priv->stats[i]; +} + +static void vsc85xx_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + struct vsc8531_private *priv = phydev->priv; + int i; + + if (!priv) + return; + + for (i = 0; i < priv->nstats; i++) + data[i] = vsc85xx_get_stat(phydev, i); } static int vsc85xx_led_cntl_set(struct phy_device *phydev, @@ -140,14 +464,8 @@ static int vsc85xx_led_cntl_set(struct phy_device *phydev, mutex_lock(&phydev->lock); reg_val = phy_read(phydev, MSCC_PHY_LED_MODE_SEL); - if (led_num) { - reg_val &= ~LED_1_MODE_SEL_MASK; - reg_val |= (((u16)mode << LED_1_MODE_SEL_POS) & - LED_1_MODE_SEL_MASK); - } else { - reg_val &= ~LED_0_MODE_SEL_MASK; - reg_val |= ((u16)mode & LED_0_MODE_SEL_MASK); - } + reg_val &= ~LED_MODE_SEL_MASK(led_num); + reg_val |= LED_MODE_SEL(led_num, (u16)mode); rc = phy_write(phydev, MSCC_PHY_LED_MODE_SEL, reg_val); mutex_unlock(&phydev->lock); @@ -173,7 +491,7 @@ static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix) u16 reg_val; reg_val = phy_read(phydev, MSCC_PHY_BYPASS_CONTROL); - if ((mdix == ETH_TP_MDI) || (mdix == ETH_TP_MDI_X)) { + if (mdix == ETH_TP_MDI || mdix == ETH_TP_MDI_X) { reg_val |= (DISABLE_PAIR_SWAP_CORR_MASK | DISABLE_POLARITY_CORR_MASK | DISABLE_HP_AUTO_MDIX_MASK); @@ -183,25 +501,20 @@ static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix) DISABLE_HP_AUTO_MDIX_MASK); } rc = phy_write(phydev, MSCC_PHY_BYPASS_CONTROL, reg_val); - if (rc != 0) + if (rc) return rc; - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED); - if (rc != 0) - return rc; + reg_val = 0; - reg_val = phy_read(phydev, MSCC_PHY_EXT_MODE_CNTL); - reg_val &= ~(FORCE_MDI_CROSSOVER_MASK); if (mdix == ETH_TP_MDI) - reg_val |= FORCE_MDI_CROSSOVER_MDI; + reg_val = FORCE_MDI_CROSSOVER_MDI; else if (mdix == ETH_TP_MDI_X) - reg_val |= FORCE_MDI_CROSSOVER_MDIX; - rc = phy_write(phydev, MSCC_PHY_EXT_MODE_CNTL, reg_val); - if (rc != 0) - return rc; + reg_val = FORCE_MDI_CROSSOVER_MDIX; - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); - if (rc != 0) + rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED, + MSCC_PHY_EXT_MODE_CNTL, FORCE_MDI_CROSSOVER_MASK, + reg_val); + if (rc < 0) return rc; return genphy_restart_aneg(phydev); @@ -209,30 +522,24 @@ static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix) static int vsc85xx_downshift_get(struct phy_device *phydev, u8 *count) { - int rc; u16 reg_val; - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED); - if (rc != 0) - goto out; + reg_val = phy_read_paged(phydev, MSCC_PHY_PAGE_EXTENDED, + MSCC_PHY_ACTIPHY_CNTL); + if (reg_val < 0) + return reg_val; - reg_val = phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL); reg_val &= DOWNSHIFT_CNTL_MASK; if (!(reg_val & DOWNSHIFT_EN)) *count = DOWNSHIFT_DEV_DISABLE; else *count = ((reg_val & ~DOWNSHIFT_EN) >> DOWNSHIFT_CNTL_POS) + 2; - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); -out: - return rc; + return 0; } static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count) { - int rc; - u16 reg_val; - if (count == DOWNSHIFT_DEV_DEFAULT_COUNT) { /* Default downshift count 3 (i.e. Bit3:2 = 0b01) */ count = ((1 << DOWNSHIFT_CNTL_POS) | DOWNSHIFT_EN); @@ -244,21 +551,9 @@ static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count) count = (((count - 2) << DOWNSHIFT_CNTL_POS) | DOWNSHIFT_EN); } - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED); - if (rc != 0) - goto out; - - reg_val = phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL); - reg_val &= ~(DOWNSHIFT_CNTL_MASK); - reg_val |= count; - rc = phy_write(phydev, MSCC_PHY_ACTIPHY_CNTL, reg_val); - if (rc != 0) - goto out; - - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); - -out: - return rc; + return phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED, + MSCC_PHY_ACTIPHY_CNTL, DOWNSHIFT_CNTL_MASK, + count); } static int vsc85xx_wol_set(struct phy_device *phydev, @@ -272,46 +567,48 @@ static int vsc85xx_wol_set(struct phy_device *phydev, u8 *mac_addr = phydev->attached_dev->dev_addr; mutex_lock(&phydev->lock); - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2); - if (rc != 0) + rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2); + if (rc < 0) { + rc = phy_restore_page(phydev, rc, rc); goto out_unlock; + } if (wol->wolopts & WAKE_MAGIC) { /* Store the device address for the magic packet */ for (i = 0; i < ARRAY_SIZE(pwd); i++) pwd[i] = mac_addr[5 - (i * 2 + 1)] << 8 | mac_addr[5 - i * 2]; - phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, pwd[0]); - phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, pwd[1]); - phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, pwd[2]); + __phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, pwd[0]); + __phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, pwd[1]); + __phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, pwd[2]); } else { - phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, 0); - phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, 0); - phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, 0); + __phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, 0); + __phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, 0); + __phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, 0); } if (wol_conf->wolopts & WAKE_MAGICSECURE) { for (i = 0; i < ARRAY_SIZE(pwd); i++) pwd[i] = wol_conf->sopass[5 - (i * 2 + 1)] << 8 | wol_conf->sopass[5 - i * 2]; - phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, pwd[0]); - phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, pwd[1]); - phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, pwd[2]); + __phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, pwd[0]); + __phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, pwd[1]); + __phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, pwd[2]); } else { - phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, 0); - phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, 0); - phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, 0); + __phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, 0); + __phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, 0); + __phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, 0); } - reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL); + reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL); if (wol_conf->wolopts & WAKE_MAGICSECURE) reg_val |= SECURE_ON_ENABLE; else reg_val &= ~SECURE_ON_ENABLE; - phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val); + __phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val); - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); - if (rc != 0) + rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc); + if (rc < 0) goto out_unlock; if (wol->wolopts & WAKE_MAGIC) { @@ -319,14 +616,14 @@ static int vsc85xx_wol_set(struct phy_device *phydev, reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK); reg_val |= MII_VSC85XX_INT_MASK_WOL; rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val); - if (rc != 0) + if (rc) goto out_unlock; } else { /* Disable the WOL interrupt */ reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK); reg_val &= (~MII_VSC85XX_INT_MASK_WOL); rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val); - if (rc != 0) + if (rc) goto out_unlock; } /* Clear WOL iterrupt status */ @@ -348,17 +645,17 @@ static void vsc85xx_wol_get(struct phy_device *phydev, struct ethtool_wolinfo *wol_conf = wol; mutex_lock(&phydev->lock); - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2); - if (rc != 0) + rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2); + if (rc < 0) goto out_unlock; - reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL); + reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL); if (reg_val & SECURE_ON_ENABLE) wol_conf->wolopts |= WAKE_MAGICSECURE; if (wol_conf->wolopts & WAKE_MAGICSECURE) { - pwd[0] = phy_read(phydev, MSCC_PHY_WOL_LOWER_PASSWD); - pwd[1] = phy_read(phydev, MSCC_PHY_WOL_MID_PASSWD); - pwd[2] = phy_read(phydev, MSCC_PHY_WOL_UPPER_PASSWD); + pwd[0] = __phy_read(phydev, MSCC_PHY_WOL_LOWER_PASSWD); + pwd[1] = __phy_read(phydev, MSCC_PHY_WOL_MID_PASSWD); + pwd[2] = __phy_read(phydev, MSCC_PHY_WOL_UPPER_PASSWD); for (i = 0; i < ARRAY_SIZE(pwd); i++) { wol_conf->sopass[5 - i * 2] = pwd[i] & 0x00ff; wol_conf->sopass[5 - (i * 2 + 1)] = (pwd[i] & 0xff00) @@ -366,18 +663,16 @@ static void vsc85xx_wol_get(struct phy_device *phydev, } } - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); - out_unlock: + phy_restore_page(phydev, rc, rc > 0 ? 0 : rc); mutex_unlock(&phydev->lock); } #ifdef CONFIG_OF_MDIO static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev) { - u8 sd; - u16 vdd; - int rc, i, j; + u32 vdd, sd; + int i, j; struct device *dev = &phydev->mdio.dev; struct device_node *of_node = dev->of_node; u8 sd_array_size = ARRAY_SIZE(edge_table[0].slowdown); @@ -385,12 +680,10 @@ static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev) if (!of_node) return -ENODEV; - rc = of_property_read_u16(of_node, "vsc8531,vddmac", &vdd); - if (rc != 0) + if (of_property_read_u32(of_node, "vsc8531,vddmac", &vdd)) vdd = MSCC_VDDMAC_3300; - rc = of_property_read_u8(of_node, "vsc8531,edge-slowdown", &sd); - if (rc != 0) + if (of_property_read_u32(of_node, "vsc8531,edge-slowdown", &sd)) sd = 0; for (i = 0; i < ARRAY_SIZE(edge_table); i++) @@ -404,19 +697,20 @@ static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev) static int vsc85xx_dt_led_mode_get(struct phy_device *phydev, char *led, - u8 default_mode) + u32 default_mode) { + struct vsc8531_private *priv = phydev->priv; struct device *dev = &phydev->mdio.dev; struct device_node *of_node = dev->of_node; - u8 led_mode; + u32 led_mode; int err; if (!of_node) return -ENODEV; led_mode = default_mode; - err = of_property_read_u8(of_node, led, &led_mode); - if (!err && (led_mode > 15 || led_mode == 7 || led_mode == 11)) { + err = of_property_read_u32(of_node, led, &led_mode); + if (!err && !(BIT(led_mode) & priv->supp_led_modes)) { phydev_err(phydev, "DT %s invalid\n", led); return -EINVAL; } @@ -438,24 +732,36 @@ static int vsc85xx_dt_led_mode_get(struct phy_device *phydev, } #endif /* CONFIG_OF_MDIO */ +static int vsc85xx_dt_led_modes_get(struct phy_device *phydev, + u32 *default_mode) +{ + struct vsc8531_private *priv = phydev->priv; + char led_dt_prop[28]; + int i, ret; + + for (i = 0; i < priv->nleds; i++) { + ret = sprintf(led_dt_prop, "vsc8531,led-%d-mode", i); + if (ret < 0) + return ret; + + ret = vsc85xx_dt_led_mode_get(phydev, led_dt_prop, + default_mode[i]); + if (ret < 0) + return ret; + priv->leds_mode[i] = ret; + } + + return 0; +} + static int vsc85xx_edge_rate_cntl_set(struct phy_device *phydev, u8 edge_rate) { int rc; - u16 reg_val; mutex_lock(&phydev->lock); - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2); - if (rc != 0) - goto out_unlock; - reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL); - reg_val &= ~(EDGE_RATE_CNTL_MASK); - reg_val |= (edge_rate << EDGE_RATE_CNTL_POS); - rc = phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val); - if (rc != 0) - goto out_unlock; - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); - -out_unlock: + rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2, + MSCC_PHY_WOL_MAC_CONTROL, EDGE_RATE_CNTL_MASK, + edge_rate << EDGE_RATE_CNTL_POS); mutex_unlock(&phydev->lock); return rc; @@ -486,7 +792,7 @@ static int vsc85xx_mac_if_set(struct phy_device *phydev, goto out_unlock; } rc = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, reg_val); - if (rc != 0) + if (rc) goto out_unlock; rc = genphy_soft_reset(phydev); @@ -504,17 +810,17 @@ static int vsc85xx_default_config(struct phy_device *phydev) phydev->mdix_ctrl = ETH_TP_MDI_AUTO; mutex_lock(&phydev->lock); - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2); - if (rc != 0) + rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2); + if (rc < 0) goto out_unlock; reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL); reg_val &= ~(RGMII_RX_CLK_DELAY_MASK); reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS); phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val); - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); out_unlock: + rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc); mutex_unlock(&phydev->lock); return rc; @@ -543,9 +849,812 @@ static int vsc85xx_set_tunable(struct phy_device *phydev, } } +/* mdiobus lock should be locked when using this function */ +static void vsc85xx_tr_write(struct phy_device *phydev, u16 addr, u32 val) +{ + __phy_write(phydev, MSCC_PHY_TR_MSB, val >> 16); + __phy_write(phydev, MSCC_PHY_TR_LSB, val & GENMASK(15, 0)); + __phy_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(addr)); +} + +static int vsc85xx_eee_init_seq_set(struct phy_device *phydev) +{ + const struct reg_val init_eee[] = { + {0x0f82, 0x0012b00a}, + {0x1686, 0x00000004}, + {0x168c, 0x00d2c46f}, + {0x17a2, 0x00000620}, + {0x16a0, 0x00eeffdd}, + {0x16a6, 0x00071448}, + {0x16a4, 0x0013132f}, + {0x16a8, 0x00000000}, + {0x0ffc, 0x00c0a028}, + {0x0fe8, 0x0091b06c}, + {0x0fea, 0x00041600}, + {0x0f80, 0x00000af4}, + {0x0fec, 0x00901809}, + {0x0fee, 0x0000a6a1}, + {0x0ffe, 0x00b01007}, + {0x16b0, 0x00eeff00}, + {0x16b2, 0x00007000}, + {0x16b4, 0x00000814}, + }; + unsigned int i; + int oldpage; + + mutex_lock(&phydev->lock); + oldpage = phy_select_page(phydev, MSCC_PHY_PAGE_TR); + if (oldpage < 0) + goto out_unlock; + + for (i = 0; i < ARRAY_SIZE(init_eee); i++) + vsc85xx_tr_write(phydev, init_eee[i].reg, init_eee[i].val); + +out_unlock: + oldpage = phy_restore_page(phydev, oldpage, oldpage); + mutex_unlock(&phydev->lock); + + return oldpage; +} + +/* phydev->bus->mdio_lock should be locked when using this function */ +static int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val) +{ + struct vsc8531_private *priv = phydev->priv; + + if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) { + dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n"); + dump_stack(); + } + + return __mdiobus_write(phydev->mdio.bus, priv->base_addr, regnum, val); +} + +/* phydev->bus->mdio_lock should be locked when using this function */ +static int phy_base_read(struct phy_device *phydev, u32 regnum) +{ + struct vsc8531_private *priv = phydev->priv; + + if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) { + dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n"); + dump_stack(); + } + + return __mdiobus_read(phydev->mdio.bus, priv->base_addr, regnum); +} + +/* bus->mdio_lock should be locked when using this function */ +static void vsc8584_csr_write(struct phy_device *phydev, u16 addr, u32 val) +{ + phy_base_write(phydev, MSCC_PHY_TR_MSB, val >> 16); + phy_base_write(phydev, MSCC_PHY_TR_LSB, val & GENMASK(15, 0)); + phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(addr)); +} + +/* bus->mdio_lock should be locked when using this function */ +static int vsc8584_cmd(struct phy_device *phydev, u16 val) +{ + unsigned long deadline; + u16 reg_val; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_NCOMPLETED | val); + + deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS); + do { + reg_val = phy_base_read(phydev, MSCC_PHY_PROC_CMD); + } while (time_before(jiffies, deadline) && + (reg_val & PROC_CMD_NCOMPLETED) && + !(reg_val & PROC_CMD_FAILED)); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + if (reg_val & PROC_CMD_FAILED) + return -EIO; + + if (reg_val & PROC_CMD_NCOMPLETED) + return -ETIMEDOUT; + + return 0; +} + +/* bus->mdio_lock should be locked when using this function */ +static int vsc8584_micro_deassert_reset(struct phy_device *phydev, + bool patch_en) +{ + u32 enable, release; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + enable = RUN_FROM_INT_ROM | MICRO_CLK_EN | DW8051_CLK_EN; + release = MICRO_NSOFT_RESET | RUN_FROM_INT_ROM | DW8051_CLK_EN | + MICRO_CLK_EN; + + if (patch_en) { + enable |= MICRO_PATCH_EN; + release |= MICRO_PATCH_EN; + + /* Clear all patches */ + phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_RAM); + } + + /* Enable 8051 Micro clock; CLEAR/SET patch present; disable PRAM clock + * override and addr. auto-incr; operate at 125 MHz + */ + phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, enable); + /* Release 8051 Micro SW reset */ + phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, release); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + return 0; +} + +/* bus->mdio_lock should be locked when using this function */ +static int vsc8584_micro_assert_reset(struct phy_device *phydev) +{ + int ret; + u16 reg; + + ret = vsc8584_cmd(phydev, PROC_CMD_NOP); + if (ret) + return ret; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL); + reg &= ~EN_PATCH_RAM_TRAP_ADDR(4); + phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg); + + phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(4), 0x005b); + phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(4), 0x005b); + + reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL); + reg |= EN_PATCH_RAM_TRAP_ADDR(4); + phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg); + + phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_NOP); + + reg = phy_base_read(phydev, MSCC_DW8051_CNTL_STATUS); + reg &= ~MICRO_NSOFT_RESET; + phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, reg); + + phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_MCB_ACCESS_MAC_CONF | + PROC_CMD_SGMII_PORT(0) | PROC_CMD_NO_MAC_CONF | + PROC_CMD_READ); + + reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL); + reg &= ~EN_PATCH_RAM_TRAP_ADDR(4); + phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + return 0; +} + +/* bus->mdio_lock should be locked when using this function */ +static int vsc8584_get_fw_crc(struct phy_device *phydev, u16 start, u16 size, + u16 *crc) +{ + int ret; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED); + + phy_base_write(phydev, MSCC_PHY_VERIPHY_CNTL_2, start); + phy_base_write(phydev, MSCC_PHY_VERIPHY_CNTL_3, size); + + /* Start Micro command */ + ret = vsc8584_cmd(phydev, PROC_CMD_CRC16); + if (ret) + goto out; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED); + + *crc = phy_base_read(phydev, MSCC_PHY_VERIPHY_CNTL_2); + +out: + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + return ret; +} + +/* bus->mdio_lock should be locked when using this function */ +static int vsc8584_patch_fw(struct phy_device *phydev, + const struct firmware *fw) +{ + int i, ret; + + ret = vsc8584_micro_assert_reset(phydev); + if (ret) { + dev_err(&phydev->mdio.dev, + "%s: failed to assert reset of micro\n", __func__); + return ret; + } + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + /* Hold 8051 Micro in SW Reset, Enable auto incr address and patch clock + * Disable the 8051 Micro clock + */ + phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, RUN_FROM_INT_ROM | + AUTOINC_ADDR | PATCH_RAM_CLK | MICRO_CLK_EN | + MICRO_CLK_DIVIDE(2)); + phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_PRAM | INT_MEM_WRITE_EN | + INT_MEM_DATA(2)); + phy_base_write(phydev, MSCC_INT_MEM_ADDR, 0x0000); + + for (i = 0; i < fw->size; i++) + phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_PRAM | + INT_MEM_WRITE_EN | fw->data[i]); + + /* Clear internal memory access */ + phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_RAM); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + return 0; +} + +/* bus->mdio_lock should be locked when using this function */ +static bool vsc8574_is_serdes_init(struct phy_device *phydev) +{ + u16 reg; + bool ret; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + reg = phy_base_read(phydev, MSCC_TRAP_ROM_ADDR(1)); + if (reg != 0x3eb7) { + ret = false; + goto out; + } + + reg = phy_base_read(phydev, MSCC_PATCH_RAM_ADDR(1)); + if (reg != 0x4012) { + ret = false; + goto out; + } + + reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL); + if (reg != EN_PATCH_RAM_TRAP_ADDR(1)) { + ret = false; + goto out; + } + + reg = phy_base_read(phydev, MSCC_DW8051_CNTL_STATUS); + if ((MICRO_NSOFT_RESET | RUN_FROM_INT_ROM | DW8051_CLK_EN | + MICRO_CLK_EN) != (reg & MSCC_DW8051_VLD_MASK)) { + ret = false; + goto out; + } + + ret = true; +out: + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + return ret; +} + +/* bus->mdio_lock should be locked when using this function */ +static int vsc8574_config_pre_init(struct phy_device *phydev) +{ + const struct reg_val pre_init1[] = { + {0x0fae, 0x000401bd}, + {0x0fac, 0x000f000f}, + {0x17a0, 0x00a0f147}, + {0x0fe4, 0x00052f54}, + {0x1792, 0x0027303d}, + {0x07fe, 0x00000704}, + {0x0fe0, 0x00060150}, + {0x0f82, 0x0012b00a}, + {0x0f80, 0x00000d74}, + {0x02e0, 0x00000012}, + {0x03a2, 0x00050208}, + {0x03b2, 0x00009186}, + {0x0fb0, 0x000e3700}, + {0x1688, 0x00049f81}, + {0x0fd2, 0x0000ffff}, + {0x168a, 0x00039fa2}, + {0x1690, 0x0020640b}, + {0x0258, 0x00002220}, + {0x025a, 0x00002a20}, + {0x025c, 0x00003060}, + {0x025e, 0x00003fa0}, + {0x03a6, 0x0000e0f0}, + {0x0f92, 0x00001489}, + {0x16a2, 0x00007000}, + {0x16a6, 0x00071448}, + {0x16a0, 0x00eeffdd}, + {0x0fe8, 0x0091b06c}, + {0x0fea, 0x00041600}, + {0x16b0, 0x00eeff00}, + {0x16b2, 0x00007000}, + {0x16b4, 0x00000814}, + {0x0f90, 0x00688980}, + {0x03a4, 0x0000d8f0}, + {0x0fc0, 0x00000400}, + {0x07fa, 0x0050100f}, + {0x0796, 0x00000003}, + {0x07f8, 0x00c3ff98}, + {0x0fa4, 0x0018292a}, + {0x168c, 0x00d2c46f}, + {0x17a2, 0x00000620}, + {0x16a4, 0x0013132f}, + {0x16a8, 0x00000000}, + {0x0ffc, 0x00c0a028}, + {0x0fec, 0x00901c09}, + {0x0fee, 0x0004a6a1}, + {0x0ffe, 0x00b01807}, + }; + const struct reg_val pre_init2[] = { + {0x0486, 0x0008a518}, + {0x0488, 0x006dc696}, + {0x048a, 0x00000912}, + {0x048e, 0x00000db6}, + {0x049c, 0x00596596}, + {0x049e, 0x00000514}, + {0x04a2, 0x00410280}, + {0x04a4, 0x00000000}, + {0x04a6, 0x00000000}, + {0x04a8, 0x00000000}, + {0x04aa, 0x00000000}, + {0x04ae, 0x007df7dd}, + {0x04b0, 0x006d95d4}, + {0x04b2, 0x00492410}, + }; + struct device *dev = &phydev->mdio.dev; + const struct firmware *fw; + unsigned int i; + u16 crc, reg; + bool serdes_init; + int ret; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + /* all writes below are broadcasted to all PHYs in the same package */ + reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS); + reg |= SMI_BROADCAST_WR_EN; + phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg); + + phy_base_write(phydev, MII_VSC85XX_INT_MASK, 0); + + /* The below register writes are tweaking analog and electrical + * configuration that were determined through characterization by PHY + * engineers. These don't mean anything more than "these are the best + * values". + */ + phy_base_write(phydev, MSCC_PHY_EXT_PHY_CNTL_2, 0x0040); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST); + + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_20, 0x4320); + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_24, 0x0c00); + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_9, 0x18ca); + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1b20); + + reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8); + reg |= 0x8000; + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR); + + for (i = 0; i < ARRAY_SIZE(pre_init1); i++) + vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_2); + + phy_base_write(phydev, MSCC_PHY_CU_PMD_TX_CNTL, 0x028e); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR); + + for (i = 0; i < ARRAY_SIZE(pre_init2); i++) + vsc8584_csr_write(phydev, pre_init2[i].reg, pre_init2[i].val); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST); + + reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8); + reg &= ~0x8000; + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + /* end of write broadcasting */ + reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS); + reg &= ~SMI_BROADCAST_WR_EN; + phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg); + + ret = request_firmware(&fw, MSCC_VSC8574_REVB_INT8051_FW, dev); + if (ret) { + dev_err(dev, "failed to load firmware %s, ret: %d\n", + MSCC_VSC8574_REVB_INT8051_FW, ret); + return ret; + } + + /* Add one byte to size for the one added by the patch_fw function */ + ret = vsc8584_get_fw_crc(phydev, + MSCC_VSC8574_REVB_INT8051_FW_START_ADDR, + fw->size + 1, &crc); + if (ret) + goto out; + + if (crc == MSCC_VSC8574_REVB_INT8051_FW_CRC) { + serdes_init = vsc8574_is_serdes_init(phydev); + + if (!serdes_init) { + ret = vsc8584_micro_assert_reset(phydev); + if (ret) { + dev_err(dev, + "%s: failed to assert reset of micro\n", + __func__); + return ret; + } + } + } else { + dev_dbg(dev, "FW CRC is not the expected one, patching FW\n"); + + serdes_init = false; + + if (vsc8584_patch_fw(phydev, fw)) + dev_warn(dev, + "failed to patch FW, expect non-optimal device\n"); + } + + if (!serdes_init) { + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(1), 0x3eb7); + phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(1), 0x4012); + phy_base_write(phydev, MSCC_INT_MEM_CNTL, + EN_PATCH_RAM_TRAP_ADDR(1)); + + vsc8584_micro_deassert_reset(phydev, false); + + /* Add one byte to size for the one added by the patch_fw + * function + */ + ret = vsc8584_get_fw_crc(phydev, + MSCC_VSC8574_REVB_INT8051_FW_START_ADDR, + fw->size + 1, &crc); + if (ret) + goto out; + + if (crc != MSCC_VSC8574_REVB_INT8051_FW_CRC) + dev_warn(dev, + "FW CRC after patching is not the expected one, expect non-optimal device\n"); + } + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + ret = vsc8584_cmd(phydev, PROC_CMD_1588_DEFAULT_INIT | + PROC_CMD_PHY_INIT); + +out: + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + release_firmware(fw); + + return ret; +} + +/* bus->mdio_lock should be locked when using this function */ +static int vsc8584_config_pre_init(struct phy_device *phydev) +{ + const struct reg_val pre_init1[] = { + {0x07fa, 0x0050100f}, + {0x1688, 0x00049f81}, + {0x0f90, 0x00688980}, + {0x03a4, 0x0000d8f0}, + {0x0fc0, 0x00000400}, + {0x0f82, 0x0012b002}, + {0x1686, 0x00000004}, + {0x168c, 0x00d2c46f}, + {0x17a2, 0x00000620}, + {0x16a0, 0x00eeffdd}, + {0x16a6, 0x00071448}, + {0x16a4, 0x0013132f}, + {0x16a8, 0x00000000}, + {0x0ffc, 0x00c0a028}, + {0x0fe8, 0x0091b06c}, + {0x0fea, 0x00041600}, + {0x0f80, 0x00fffaff}, + {0x0fec, 0x00901809}, + {0x0ffe, 0x00b01007}, + {0x16b0, 0x00eeff00}, + {0x16b2, 0x00007000}, + {0x16b4, 0x00000814}, + }; + const struct reg_val pre_init2[] = { + {0x0486, 0x0008a518}, + {0x0488, 0x006dc696}, + {0x048a, 0x00000912}, + }; + const struct firmware *fw; + struct device *dev = &phydev->mdio.dev; + unsigned int i; + u16 crc, reg; + int ret; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + /* all writes below are broadcasted to all PHYs in the same package */ + reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS); + reg |= SMI_BROADCAST_WR_EN; + phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg); + + phy_base_write(phydev, MII_VSC85XX_INT_MASK, 0); + + reg = phy_base_read(phydev, MSCC_PHY_BYPASS_CONTROL); + reg |= PARALLEL_DET_IGNORE_ADVERTISED; + phy_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, reg); + + /* The below register writes are tweaking analog and electrical + * configuration that were determined through characterization by PHY + * engineers. These don't mean anything more than "these are the best + * values". + */ + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_3); + + phy_base_write(phydev, MSCC_PHY_SERDES_TX_CRC_ERR_CNT, 0x2000); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST); + + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1f20); + + reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8); + reg |= 0x8000; + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR); + + phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(0x2fa4)); + + reg = phy_base_read(phydev, MSCC_PHY_TR_MSB); + reg &= ~0x007f; + reg |= 0x0019; + phy_base_write(phydev, MSCC_PHY_TR_MSB, reg); + + phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(0x0fa4)); + + for (i = 0; i < ARRAY_SIZE(pre_init1); i++) + vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_2); + + phy_base_write(phydev, MSCC_PHY_CU_PMD_TX_CNTL, 0x028e); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR); + + for (i = 0; i < ARRAY_SIZE(pre_init2); i++) + vsc8584_csr_write(phydev, pre_init2[i].reg, pre_init2[i].val); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST); + + reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8); + reg &= ~0x8000; + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + /* end of write broadcasting */ + reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS); + reg &= ~SMI_BROADCAST_WR_EN; + phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg); + + ret = request_firmware(&fw, MSCC_VSC8584_REVB_INT8051_FW, dev); + if (ret) { + dev_err(dev, "failed to load firmware %s, ret: %d\n", + MSCC_VSC8584_REVB_INT8051_FW, ret); + return ret; + } + + /* Add one byte to size for the one added by the patch_fw function */ + ret = vsc8584_get_fw_crc(phydev, + MSCC_VSC8584_REVB_INT8051_FW_START_ADDR, + fw->size + 1, &crc); + if (ret) + goto out; + + if (crc != MSCC_VSC8584_REVB_INT8051_FW_CRC) { + dev_dbg(dev, "FW CRC is not the expected one, patching FW\n"); + if (vsc8584_patch_fw(phydev, fw)) + dev_warn(dev, + "failed to patch FW, expect non-optimal device\n"); + } + + vsc8584_micro_deassert_reset(phydev, false); + + /* Add one byte to size for the one added by the patch_fw function */ + ret = vsc8584_get_fw_crc(phydev, + MSCC_VSC8584_REVB_INT8051_FW_START_ADDR, + fw->size + 1, &crc); + if (ret) + goto out; + + if (crc != MSCC_VSC8584_REVB_INT8051_FW_CRC) + dev_warn(dev, + "FW CRC after patching is not the expected one, expect non-optimal device\n"); + + ret = vsc8584_micro_assert_reset(phydev); + if (ret) + goto out; + + vsc8584_micro_deassert_reset(phydev, true); + +out: + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + release_firmware(fw); + + return ret; +} + +/* Check if one PHY has already done the init of the parts common to all PHYs + * in the Quad PHY package. + */ +static bool vsc8584_is_pkg_init(struct phy_device *phydev, bool reversed) +{ + struct mdio_device **map = phydev->mdio.bus->mdio_map; + struct vsc8531_private *vsc8531; + struct phy_device *phy; + int i, addr; + + /* VSC8584 is a Quad PHY */ + for (i = 0; i < 4; i++) { + vsc8531 = phydev->priv; + + if (reversed) + addr = vsc8531->base_addr - i; + else + addr = vsc8531->base_addr + i; + + phy = container_of(map[addr], struct phy_device, mdio); + + if ((phy->phy_id & phydev->drv->phy_id_mask) != + (phydev->drv->phy_id & phydev->drv->phy_id_mask)) + continue; + + vsc8531 = phy->priv; + + if (vsc8531 && vsc8531->pkg_init) + return true; + } + + return false; +} + +static int vsc8584_config_init(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531 = phydev->priv; + u16 addr, val; + int ret, i; + + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + + mutex_lock(&phydev->mdio.bus->mdio_lock); + + __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, + MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED); + addr = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, + MSCC_PHY_EXT_PHY_CNTL_4); + addr >>= PHY_CNTL_4_ADDR_POS; + + val = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, + MSCC_PHY_ACTIPHY_CNTL); + if (val & PHY_ADDR_REVERSED) + vsc8531->base_addr = phydev->mdio.addr + addr; + else + vsc8531->base_addr = phydev->mdio.addr - addr; + + /* Some parts of the init sequence are identical for every PHY in the + * package. Some parts are modifying the GPIO register bank which is a + * set of registers that are affecting all PHYs, a few resetting the + * microprocessor common to all PHYs. The CRC check responsible of the + * checking the firmware within the 8051 microprocessor can only be + * accessed via the PHY whose internal address in the package is 0. + * All PHYs' interrupts mask register has to be zeroed before enabling + * any PHY's interrupt in this register. + * For all these reasons, we need to do the init sequence once and only + * once whatever is the first PHY in the package that is initialized and + * do the correct init sequence for all PHYs that are package-critical + * in this pre-init function. + */ + if (!vsc8584_is_pkg_init(phydev, val & PHY_ADDR_REVERSED ? 1 : 0)) { + if ((phydev->phy_id & phydev->drv->phy_id_mask) == + (PHY_ID_VSC8574 & phydev->drv->phy_id_mask)) + ret = vsc8574_config_pre_init(phydev); + else if ((phydev->phy_id & phydev->drv->phy_id_mask) == + (PHY_ID_VSC8584 & phydev->drv->phy_id_mask)) + ret = vsc8584_config_pre_init(phydev); + else + ret = -EINVAL; + + if (ret) + goto err; + } + + vsc8531->pkg_init = true; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK); + val &= ~MAC_CFG_MASK; + if (phydev->interface == PHY_INTERFACE_MODE_QSGMII) + val |= MAC_CFG_QSGMII; + else + val |= MAC_CFG_SGMII; + + ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val); + if (ret) + goto err; + + val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT | + PROC_CMD_READ_MOD_WRITE_PORT; + if (phydev->interface == PHY_INTERFACE_MODE_QSGMII) + val |= PROC_CMD_QSGMII_MAC; + else + val |= PROC_CMD_SGMII_MAC; + + ret = vsc8584_cmd(phydev, val); + if (ret) + goto err; + + usleep_range(10000, 20000); + + /* Disable SerDes for 100Base-FX */ + ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF | + PROC_CMD_FIBER_PORT(addr) | PROC_CMD_FIBER_DISABLE | + PROC_CMD_READ_MOD_WRITE_PORT | + PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX); + if (ret) + goto err; + + /* Disable SerDes for 1000Base-X */ + ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF | + PROC_CMD_FIBER_PORT(addr) | PROC_CMD_FIBER_DISABLE | + PROC_CMD_READ_MOD_WRITE_PORT | + PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X); + if (ret) + goto err; + + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1); + val &= ~(MEDIA_OP_MODE_MASK | VSC8584_MAC_IF_SELECTION_MASK); + val |= MEDIA_OP_MODE_COPPER | (VSC8584_MAC_IF_SELECTION_SGMII << + VSC8584_MAC_IF_SELECTION_POS); + ret = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, val); + + ret = genphy_soft_reset(phydev); + if (ret) + return ret; + + for (i = 0; i < vsc8531->nleds; i++) { + ret = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]); + if (ret) + return ret; + } + + return genphy_config_init(phydev); + +err: + mutex_unlock(&phydev->mdio.bus->mdio_lock); + return ret; +} + static int vsc85xx_config_init(struct phy_device *phydev) { - int rc; + int rc, i; struct vsc8531_private *vsc8531 = phydev->priv; rc = vsc85xx_default_config(phydev); @@ -560,17 +1669,27 @@ static int vsc85xx_config_init(struct phy_device *phydev) if (rc) return rc; - rc = vsc85xx_led_cntl_set(phydev, 1, vsc8531->led_1_mode); + rc = vsc85xx_eee_init_seq_set(phydev); if (rc) return rc; - rc = vsc85xx_led_cntl_set(phydev, 0, vsc8531->led_0_mode); - if (rc) - return rc; + for (i = 0; i < vsc8531->nleds; i++) { + rc = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]); + if (rc) + return rc; + } - rc = genphy_config_init(phydev); + return genphy_config_init(phydev); +} - return rc; +static int vsc8584_did_interrupt(struct phy_device *phydev) +{ + int rc = 0; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + rc = phy_read(phydev, MII_VSC85XX_INT_STATUS); + + return (rc < 0) ? 0 : rc & MII_VSC85XX_INT_MASK_MASK; } static int vsc85xx_ack_interrupt(struct phy_device *phydev) @@ -622,11 +1741,67 @@ static int vsc85xx_read_status(struct phy_device *phydev) return genphy_read_status(phydev); } +static int vsc8574_probe(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531; + u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY, + VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY, + VSC8531_DUPLEX_COLLISION}; + + vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL); + if (!vsc8531) + return -ENOMEM; + + phydev->priv = vsc8531; + + vsc8531->nleds = 4; + vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES; + vsc8531->hw_stats = vsc8584_hw_stats; + vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats); + vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats, + sizeof(u64), GFP_KERNEL); + if (!vsc8531->stats) + return -ENOMEM; + + return vsc85xx_dt_led_modes_get(phydev, default_mode); +} + +static int vsc8584_probe(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531; + u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY, + VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY, + VSC8531_DUPLEX_COLLISION}; + + if ((phydev->phy_id & MSCC_DEV_REV_MASK) != VSC8584_REVB) { + dev_err(&phydev->mdio.dev, "Only VSC8584 revB is supported.\n"); + return -ENOTSUPP; + } + + vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL); + if (!vsc8531) + return -ENOMEM; + + phydev->priv = vsc8531; + + vsc8531->nleds = 4; + vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES; + vsc8531->hw_stats = vsc8584_hw_stats; + vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats); + vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats, + sizeof(u64), GFP_KERNEL); + if (!vsc8531->stats) + return -ENOMEM; + + return vsc85xx_dt_led_modes_get(phydev, default_mode); +} + static int vsc85xx_probe(struct phy_device *phydev) { struct vsc8531_private *vsc8531; int rate_magic; - int led_mode; + u32 default_mode[2] = {VSC8531_LINK_1000_ACTIVITY, + VSC8531_LINK_100_ACTIVITY}; rate_magic = vsc85xx_edge_rate_magic_get(phydev); if (rate_magic < 0) @@ -639,21 +1814,16 @@ static int vsc85xx_probe(struct phy_device *phydev) phydev->priv = vsc8531; vsc8531->rate_magic = rate_magic; + vsc8531->nleds = 2; + vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES; + vsc8531->hw_stats = vsc85xx_hw_stats; + vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats); + vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats, + sizeof(u64), GFP_KERNEL); + if (!vsc8531->stats) + return -ENOMEM; - /* LED[0] and LED[1] mode */ - led_mode = vsc85xx_dt_led_mode_get(phydev, "vsc8531,led-0-mode", - VSC8531_LINK_1000_ACTIVITY); - if (led_mode < 0) - return led_mode; - vsc8531->led_0_mode = led_mode; - - led_mode = vsc85xx_dt_led_mode_get(phydev, "vsc8531,led-1-mode", - VSC8531_LINK_100_ACTIVITY); - if (led_mode < 0) - return led_mode; - vsc8531->led_1_mode = led_mode; - - return 0; + return vsc85xx_dt_led_modes_get(phydev, default_mode); } /* Microsemi VSC85xx PHYs */ @@ -678,6 +1848,11 @@ static struct phy_driver vsc85xx_driver[] = { .get_wol = &vsc85xx_wol_get, .get_tunable = &vsc85xx_get_tunable, .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, }, { .phy_id = PHY_ID_VSC8531, @@ -699,6 +1874,11 @@ static struct phy_driver vsc85xx_driver[] = { .get_wol = &vsc85xx_wol_get, .get_tunable = &vsc85xx_get_tunable, .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, }, { .phy_id = PHY_ID_VSC8540, @@ -720,6 +1900,11 @@ static struct phy_driver vsc85xx_driver[] = { .get_wol = &vsc85xx_wol_get, .get_tunable = &vsc85xx_get_tunable, .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, }, { .phy_id = PHY_ID_VSC8541, @@ -741,6 +1926,63 @@ static struct phy_driver vsc85xx_driver[] = { .get_wol = &vsc85xx_wol_get, .get_tunable = &vsc85xx_get_tunable, .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, +}, +{ + .phy_id = PHY_ID_VSC8574, + .name = "Microsemi GE VSC8574 SyncE", + .phy_id_mask = 0xfffffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .soft_reset = &genphy_soft_reset, + .config_init = &vsc8584_config_init, + .config_aneg = &vsc85xx_config_aneg, + .aneg_done = &genphy_aneg_done, + .read_status = &vsc85xx_read_status, + .ack_interrupt = &vsc85xx_ack_interrupt, + .config_intr = &vsc85xx_config_intr, + .did_interrupt = &vsc8584_did_interrupt, + .suspend = &genphy_suspend, + .resume = &genphy_resume, + .probe = &vsc8574_probe, + .set_wol = &vsc85xx_wol_set, + .get_wol = &vsc85xx_wol_get, + .get_tunable = &vsc85xx_get_tunable, + .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, +}, +{ + .phy_id = PHY_ID_VSC8584, + .name = "Microsemi GE VSC8584 SyncE", + .phy_id_mask = 0xfffffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .soft_reset = &genphy_soft_reset, + .config_init = &vsc8584_config_init, + .config_aneg = &vsc85xx_config_aneg, + .aneg_done = &genphy_aneg_done, + .read_status = &vsc85xx_read_status, + .ack_interrupt = &vsc85xx_ack_interrupt, + .config_intr = &vsc85xx_config_intr, + .did_interrupt = &vsc8584_did_interrupt, + .suspend = &genphy_suspend, + .resume = &genphy_resume, + .probe = &vsc8584_probe, + .get_tunable = &vsc85xx_get_tunable, + .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, } }; @@ -752,6 +1994,8 @@ static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = { { PHY_ID_VSC8531, 0xfffffff0, }, { PHY_ID_VSC8540, 0xfffffff0, }, { PHY_ID_VSC8541, 0xfffffff0, }, + { PHY_ID_VSC8574, 0xfffffff0, }, + { PHY_ID_VSC8584, 0xfffffff0, }, { } }; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1ee25877c4d1..14509a8903c6 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -537,7 +537,7 @@ out_unlock: mutex_unlock(&phydev->lock); if (trigger) - phy_trigger_machine(phydev, sync); + phy_trigger_machine(phydev); return err; } @@ -635,6 +635,13 @@ int phy_speed_up(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(phy_speed_up); +static void phy_queue_state_machine(struct phy_device *phydev, + unsigned int secs) +{ + mod_delayed_work(system_power_efficient_wq, &phydev->state_queue, + secs * HZ); +} + /** * phy_start_machine - start PHY state machine tracking * @phydev: the phy_device struct @@ -647,7 +654,7 @@ EXPORT_SYMBOL_GPL(phy_speed_up); */ void phy_start_machine(struct phy_device *phydev) { - queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ); + phy_queue_state_machine(phydev, 1); } EXPORT_SYMBOL_GPL(phy_start_machine); @@ -655,19 +662,14 @@ EXPORT_SYMBOL_GPL(phy_start_machine); * phy_trigger_machine - trigger the state machine to run * * @phydev: the phy_device struct - * @sync: indicate whether we should wait for the workqueue cancelation * * Description: There has been a change in state which requires that the * state machine runs. */ -void phy_trigger_machine(struct phy_device *phydev, bool sync) +void phy_trigger_machine(struct phy_device *phydev) { - if (sync) - cancel_delayed_work_sync(&phydev->state_queue); - else - cancel_delayed_work(&phydev->state_queue); - queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); + phy_queue_state_machine(phydev, 0); } /** @@ -703,7 +705,7 @@ static void phy_error(struct phy_device *phydev) phydev->state = PHY_HALTED; mutex_unlock(&phydev->lock); - phy_trigger_machine(phydev, false); + phy_trigger_machine(phydev); } /** @@ -745,7 +747,7 @@ static irqreturn_t phy_change(struct phy_device *phydev) mutex_unlock(&phydev->lock); /* reschedule state queue work to run as soon as possible */ - phy_trigger_machine(phydev, true); + phy_trigger_machine(phydev); if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev)) goto phy_err; @@ -861,6 +863,8 @@ void phy_stop(struct phy_device *phydev) out_unlock: mutex_unlock(&phydev->lock); + phy_state_machine(&phydev->state_queue.work); + /* Cannot call flush_scheduled_work() here as desired because * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change() * will not reenable interrupts. @@ -909,7 +913,7 @@ void phy_start(struct phy_device *phydev) } mutex_unlock(&phydev->lock); - phy_trigger_machine(phydev, true); + phy_trigger_machine(phydev); } EXPORT_SYMBOL(phy_start); @@ -1121,11 +1125,14 @@ void phy_state_machine(struct work_struct *work) /* Only re-schedule a PHY state machine change if we are polling the * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving - * between states from phy_mac_interrupt() + * between states from phy_mac_interrupt(). + * + * In state PHY_HALTED the PHY gets suspended, so rescheduling the + * state machine would be pointless and possibly error prone when + * called from phy_disconnect() synchronously. */ - if (phy_polling_mode(phydev)) - queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, - PHY_STATE_TIME * HZ); + if (phy_polling_mode(phydev) && old_state != PHY_HALTED) + phy_queue_state_machine(phydev, PHY_STATE_TIME); } /** diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 19ab8a7d1e48..43cb08dcce81 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/mii.h> #include <linux/ethtool.h> +#include <linux/bitmap.h> #include <linux/phy.h> #include <linux/phy_led_triggers.h> #include <linux/mdio.h> @@ -42,6 +43,149 @@ MODULE_DESCRIPTION("PHY library"); MODULE_AUTHOR("Andy Fleming"); MODULE_LICENSE("GPL"); +__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init; +EXPORT_SYMBOL_GPL(phy_basic_features); + +__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init; +EXPORT_SYMBOL_GPL(phy_basic_t1_features); + +__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init; +EXPORT_SYMBOL_GPL(phy_gbit_features); + +__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; +EXPORT_SYMBOL_GPL(phy_gbit_fibre_features); + +__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; +EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features); + +__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; +EXPORT_SYMBOL_GPL(phy_10gbit_features); + +static const int phy_basic_ports_array[] = { + ETHTOOL_LINK_MODE_Autoneg_BIT, + ETHTOOL_LINK_MODE_TP_BIT, + ETHTOOL_LINK_MODE_MII_BIT, +}; + +static const int phy_fibre_port_array[] = { + ETHTOOL_LINK_MODE_FIBRE_BIT, +}; + +static const int phy_all_ports_features_array[] = { + ETHTOOL_LINK_MODE_Autoneg_BIT, + ETHTOOL_LINK_MODE_TP_BIT, + ETHTOOL_LINK_MODE_MII_BIT, + ETHTOOL_LINK_MODE_FIBRE_BIT, + ETHTOOL_LINK_MODE_AUI_BIT, + ETHTOOL_LINK_MODE_BNC_BIT, + ETHTOOL_LINK_MODE_Backplane_BIT, +}; + +static const int phy_10_100_features_array[] = { + ETHTOOL_LINK_MODE_10baseT_Half_BIT, + ETHTOOL_LINK_MODE_10baseT_Full_BIT, + ETHTOOL_LINK_MODE_100baseT_Half_BIT, + ETHTOOL_LINK_MODE_100baseT_Full_BIT, +}; + +static const int phy_basic_t1_features_array[] = { + ETHTOOL_LINK_MODE_TP_BIT, + ETHTOOL_LINK_MODE_100baseT_Full_BIT, +}; + +static const int phy_gbit_features_array[] = { + ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, +}; + +static const int phy_10gbit_features_array[] = { + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, +}; + +__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; +EXPORT_SYMBOL_GPL(phy_10gbit_full_features); + +static const int phy_10gbit_full_features_array[] = { + ETHTOOL_LINK_MODE_10baseT_Full_BIT, + ETHTOOL_LINK_MODE_100baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, +}; + +static void features_init(void) +{ + /* 10/100 half/full*/ + linkmode_set_bit_array(phy_basic_ports_array, + ARRAY_SIZE(phy_basic_ports_array), + phy_basic_features); + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + phy_basic_features); + + /* 100 full, TP */ + linkmode_set_bit_array(phy_basic_t1_features_array, + ARRAY_SIZE(phy_basic_t1_features_array), + phy_basic_t1_features); + + /* 10/100 half/full + 1000 half/full */ + linkmode_set_bit_array(phy_basic_ports_array, + ARRAY_SIZE(phy_basic_ports_array), + phy_gbit_features); + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + phy_gbit_features); + linkmode_set_bit_array(phy_gbit_features_array, + ARRAY_SIZE(phy_gbit_features_array), + phy_gbit_features); + + /* 10/100 half/full + 1000 half/full + fibre*/ + linkmode_set_bit_array(phy_basic_ports_array, + ARRAY_SIZE(phy_basic_ports_array), + phy_gbit_fibre_features); + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + phy_gbit_fibre_features); + linkmode_set_bit_array(phy_gbit_features_array, + ARRAY_SIZE(phy_gbit_features_array), + phy_gbit_fibre_features); + linkmode_set_bit_array(phy_fibre_port_array, + ARRAY_SIZE(phy_fibre_port_array), + phy_gbit_fibre_features); + + /* 10/100 half/full + 1000 half/full + TP/MII/FIBRE/AUI/BNC/Backplane*/ + linkmode_set_bit_array(phy_all_ports_features_array, + ARRAY_SIZE(phy_all_ports_features_array), + phy_gbit_all_ports_features); + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + phy_gbit_all_ports_features); + linkmode_set_bit_array(phy_gbit_features_array, + ARRAY_SIZE(phy_gbit_features_array), + phy_gbit_all_ports_features); + + /* 10/100 half/full + 1000 half/full + 10G full*/ + linkmode_set_bit_array(phy_all_ports_features_array, + ARRAY_SIZE(phy_all_ports_features_array), + phy_10gbit_features); + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + phy_10gbit_features); + linkmode_set_bit_array(phy_gbit_features_array, + ARRAY_SIZE(phy_gbit_features_array), + phy_10gbit_features); + linkmode_set_bit_array(phy_10gbit_features_array, + ARRAY_SIZE(phy_10gbit_features_array), + phy_10gbit_features); + + /* 10/100/1000/10G full */ + linkmode_set_bit_array(phy_all_ports_features_array, + ARRAY_SIZE(phy_all_ports_features_array), + phy_10gbit_full_features); + linkmode_set_bit_array(phy_10gbit_full_features_array, + ARRAY_SIZE(phy_10gbit_full_features_array), + phy_10gbit_full_features); +} + void phy_device_free(struct phy_device *phydev) { put_device(&phydev->mdio.dev); @@ -885,8 +1029,6 @@ int phy_init_hw(struct phy_device *phydev) if (phydev->drv->soft_reset) ret = phydev->drv->soft_reset(phydev); - else - ret = genphy_soft_reset(phydev); if (ret < 0) return ret; @@ -927,13 +1069,13 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) if (!fmt) { - dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n", + phydev_info(phydev, ATTACHED_FMT "\n", drv_name, phydev_name(phydev), irq_str); } else { va_list ap; - dev_info(&phydev->mdio.dev, ATTACHED_FMT, + phydev_info(phydev, ATTACHED_FMT, drv_name, phydev_name(phydev), irq_str); @@ -1771,6 +1913,124 @@ int phy_set_max_speed(struct phy_device *phydev, u32 max_speed) } EXPORT_SYMBOL(phy_set_max_speed); +/** + * phy_remove_link_mode - Remove a supported link mode + * @phydev: phy_device structure to remove link mode from + * @link_mode: Link mode to be removed + * + * Description: Some MACs don't support all link modes which the PHY + * does. e.g. a 1G MAC often does not support 1000Half. Add a helper + * to remove a link mode. + */ +void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode) +{ + WARN_ON(link_mode > 31); + + phydev->supported &= ~BIT(link_mode); + phydev->advertising = phydev->supported; +} +EXPORT_SYMBOL(phy_remove_link_mode); + +/** + * phy_support_sym_pause - Enable support of symmetrical pause + * @phydev: target phy_device struct + * + * Description: Called by the MAC to indicate is supports symmetrical + * Pause, but not asym pause. + */ +void phy_support_sym_pause(struct phy_device *phydev) +{ + phydev->supported |= SUPPORTED_Pause; + phydev->advertising = phydev->supported; +} +EXPORT_SYMBOL(phy_support_sym_pause); + +/** + * phy_support_asym_pause - Enable support of asym pause + * @phydev: target phy_device struct + * + * Description: Called by the MAC to indicate is supports Asym Pause. + */ +void phy_support_asym_pause(struct phy_device *phydev) +{ + phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + phydev->advertising = phydev->supported; +} +EXPORT_SYMBOL(phy_support_asym_pause); + +/** + * phy_set_sym_pause - Configure symmetric Pause + * @phydev: target phy_device struct + * @rx: Receiver Pause is supported + * @tx: Transmit Pause is supported + * @autoneg: Auto neg should be used + * + * Description: Configure advertised Pause support depending on if + * receiver pause and pause auto neg is supported. Generally called + * from the set_pauseparam .ndo. + */ +void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx, + bool autoneg) +{ + phydev->supported &= ~SUPPORTED_Pause; + + if (rx && tx && autoneg) + phydev->supported |= SUPPORTED_Pause; + + phydev->advertising = phydev->supported; +} +EXPORT_SYMBOL(phy_set_sym_pause); + +/** + * phy_set_asym_pause - Configure Pause and Asym Pause + * @phydev: target phy_device struct + * @rx: Receiver Pause is supported + * @tx: Transmit Pause is supported + * + * Description: Configure advertised Pause support depending on if + * transmit and receiver pause is supported. If there has been a + * change in adverting, trigger a new autoneg. Generally called from + * the set_pauseparam .ndo. + */ +void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx) +{ + u16 oldadv = phydev->advertising; + u16 newadv = oldadv &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause); + + if (rx) + newadv |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + if (tx) + newadv ^= SUPPORTED_Asym_Pause; + + if (oldadv != newadv) { + phydev->advertising = newadv; + + if (phydev->autoneg) + phy_start_aneg(phydev); + } +} +EXPORT_SYMBOL(phy_set_asym_pause); + +/** + * phy_validate_pause - Test if the PHY/MAC support the pause configuration + * @phydev: phy_device struct + * @pp: requested pause configuration + * + * Description: Test if the PHY/MAC combination supports the Pause + * configuration the user is requesting. Returns True if it is + * supported, false otherwise. + */ +bool phy_validate_pause(struct phy_device *phydev, + struct ethtool_pauseparam *pp) +{ + if (!(phydev->supported & SUPPORTED_Pause) || + (!(phydev->supported & SUPPORTED_Asym_Pause) && + pp->rx_pause != pp->tx_pause)) + return false; + return true; +} +EXPORT_SYMBOL(phy_validate_pause); + static void of_set_phy_supported(struct phy_device *phydev) { struct device_node *node = phydev->mdio.dev.of_node; @@ -1826,6 +2086,7 @@ static int phy_probe(struct device *dev) struct phy_device *phydev = to_phy_device(dev); struct device_driver *drv = phydev->mdio.dev.driver; struct phy_driver *phydrv = to_phy_driver(drv); + u32 features; int err = 0; phydev->drv = phydrv; @@ -1846,7 +2107,8 @@ static int phy_probe(struct device *dev) * a controller will attach, and may modify one * or both of these values */ - phydev->supported = phydrv->features; + ethtool_convert_link_mode_to_legacy_u32(&features, phydrv->features); + phydev->supported = features; of_set_phy_supported(phydev); phydev->advertising = phydev->supported; @@ -1866,10 +2128,14 @@ static int phy_probe(struct device *dev) * (e.g. hardware erratum) where the driver wants to set only one * of these bits. */ - if (phydrv->features & (SUPPORTED_Pause | SUPPORTED_Asym_Pause)) { + if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features) || + test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydrv->features)) { phydev->supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause); - phydev->supported |= phydrv->features & - (SUPPORTED_Pause | SUPPORTED_Asym_Pause); + if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features)) + phydev->supported |= SUPPORTED_Pause; + if (test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydrv->features)) + phydev->supported |= SUPPORTED_Asym_Pause; } else { phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; } @@ -1982,9 +2248,7 @@ static struct phy_driver genphy_driver = { .name = "Generic PHY", .soft_reset = genphy_no_soft_reset, .config_init = genphy_config_init, - .features = PHY_GBIT_FEATURES | SUPPORTED_MII | - SUPPORTED_AUI | SUPPORTED_FIBRE | - SUPPORTED_BNC, + .features = PHY_GBIT_ALL_PORTS_FEATURES, .aneg_done = genphy_aneg_done, .suspend = genphy_suspend, .resume = genphy_resume, @@ -1999,6 +2263,8 @@ static int __init phy_init(void) if (rc) return rc; + features_init(); + rc = phy_driver_register(&genphy_10g_driver, THIS_MODULE); if (rc) goto err_10g; diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 7abca86c3aa9..9b8dd0d0ee42 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -68,33 +68,6 @@ struct phylink { struct sfp_bus *sfp_bus; }; -static inline void linkmode_zero(unsigned long *dst) -{ - bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); -} - -static inline void linkmode_copy(unsigned long *dst, const unsigned long *src) -{ - bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS); -} - -static inline void linkmode_and(unsigned long *dst, const unsigned long *a, - const unsigned long *b) -{ - bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS); -} - -static inline void linkmode_or(unsigned long *dst, const unsigned long *a, - const unsigned long *b) -{ - bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS); -} - -static inline bool linkmode_empty(const unsigned long *src) -{ - return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS); -} - /** * phylink_set_port_modes() - set the port type modes in the ethtool mask * @mask: ethtool link mode mask @@ -907,6 +880,9 @@ void phylink_start(struct phylink *pl) phylink_an_mode_str(pl->link_an_mode), phy_modes(pl->link_config.interface)); + /* Always set the carrier off */ + netif_carrier_off(pl->netdev); + /* Apply the link configuration to the MAC when starting. This allows * a fixed-link to start with the correct parameters, and also * ensures that we set the appropriate advertisement for Serdes links. diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c index fbd548a1ad84..2fe9a87b55b5 100644 --- a/drivers/net/phy/ste10Xp.c +++ b/drivers/net/phy/ste10Xp.c @@ -86,7 +86,7 @@ static struct phy_driver ste10xp_pdriver[] = { .phy_id = STE101P_PHY_ID, .phy_id_mask = 0xfffffff0, .name = "STe101p", - .features = PHY_BASIC_FEATURES | SUPPORTED_Pause, + .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = ste10Xp_config_init, .ack_interrupt = ste10Xp_ack_interrupt, @@ -97,7 +97,7 @@ static struct phy_driver ste10xp_pdriver[] = { .phy_id = STE100P_PHY_ID, .phy_id_mask = 0xffffffff, .name = "STe100p", - .features = PHY_BASIC_FEATURES | SUPPORTED_Pause, + .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = ste10Xp_config_init, .ack_interrupt = ste10Xp_ack_interrupt, diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 02ad03a2fab7..500bc0027c1b 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -2400,7 +2400,7 @@ ppp_mp_reconstruct(struct ppp *ppp) if (ppp->mrru == 0) /* do nothing until mrru is set */ return NULL; - head = list->next; + head = __skb_peek(list); tail = NULL; skb_queue_walk_safe(list, p, tmp) { again: diff --git a/drivers/net/tap.c b/drivers/net/tap.c index f0f7cd977667..f03004f37eca 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -619,7 +619,7 @@ static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad, #define TAP_RESERVE HH_DATA_OFF(ETH_HLEN) /* Get packet from user space buffer */ -static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m, +static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, struct iov_iter *from, int noblock) { int good_linear = SKB_MAX_HEAD(TAP_RESERVE); @@ -663,7 +663,7 @@ static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m, if (unlikely(len < ETH_HLEN)) goto err; - if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { + if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { struct iov_iter i; copylen = vnet_hdr.hdr_len ? @@ -724,11 +724,11 @@ static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m, tap = rcu_dereference(q->tap); /* copy skb_ubuf_info for callback when skb has no error */ if (zerocopy) { - skb_shinfo(skb)->destructor_arg = m->msg_control; + skb_shinfo(skb)->destructor_arg = msg_control; skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; - } else if (m && m->msg_control) { - struct ubuf_info *uarg = m->msg_control; + } else if (msg_control) { + struct ubuf_info *uarg = msg_control; uarg->callback(uarg, false); } @@ -830,8 +830,7 @@ static ssize_t tap_do_read(struct tap_queue *q, ssize_t ret = 0; if (!iov_iter_count(to)) { - if (skb) - kfree_skb(skb); + kfree_skb(skb); return 0; } @@ -1146,11 +1145,87 @@ static const struct file_operations tap_fops = { #endif }; +static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp) +{ + struct tun_xdp_hdr *hdr = xdp->data_hard_start; + struct virtio_net_hdr *gso = &hdr->gso; + int buflen = hdr->buflen; + int vnet_hdr_len = 0; + struct tap_dev *tap; + struct sk_buff *skb; + int err, depth; + + if (q->flags & IFF_VNET_HDR) + vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); + + skb = build_skb(xdp->data_hard_start, buflen); + if (!skb) { + err = -ENOMEM; + goto err; + } + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + skb_put(skb, xdp->data_end - xdp->data); + + skb_set_network_header(skb, ETH_HLEN); + skb_reset_mac_header(skb); + skb->protocol = eth_hdr(skb)->h_proto; + + if (vnet_hdr_len) { + err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q)); + if (err) + goto err_kfree; + } + + skb_probe_transport_header(skb, ETH_HLEN); + + /* Move network header to the right position for VLAN tagged packets */ + if ((skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) && + __vlan_get_protocol(skb, skb->protocol, &depth) != 0) + skb_set_network_header(skb, depth); + + rcu_read_lock(); + tap = rcu_dereference(q->tap); + if (tap) { + skb->dev = tap->dev; + dev_queue_xmit(skb); + } else { + kfree_skb(skb); + } + rcu_read_unlock(); + + return 0; + +err_kfree: + kfree_skb(skb); +err: + rcu_read_lock(); + tap = rcu_dereference(q->tap); + if (tap && tap->count_tx_dropped) + tap->count_tx_dropped(tap); + rcu_read_unlock(); + return err; +} + static int tap_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { struct tap_queue *q = container_of(sock, struct tap_queue, sock); - return tap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); + struct tun_msg_ctl *ctl = m->msg_control; + struct xdp_buff *xdp; + int i; + + if (ctl && (ctl->type == TUN_MSG_PTR)) { + for (i = 0; i < ctl->num; i++) { + xdp = &((struct xdp_buff *)ctl->ptr)[i]; + tap_get_user_xdp(q, xdp); + } + return 0; + } + + return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter, + m->msg_flags & MSG_DONTWAIT); } static int tap_recvmsg(struct socket *sock, struct msghdr *m, @@ -1160,8 +1235,7 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m, struct sk_buff *skb = m->msg_control; int ret; if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) { - if (skb) - kfree_skb(skb); + kfree_skb(skb); return -EINVAL; } ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb); diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index e0d6760f3219..c48c3a1eb1f8 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Networking over Thunderbolt cable using Apple ThunderboltIP protocol * @@ -5,10 +6,6 @@ * Authors: Amir Levy <amir.jer.levy@intel.com> * Michael Jamet <michael.jamet@intel.com> * Mika Westerberg <mika.westerberg@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/atomic.h> diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 50e9cc19023a..92fa35abba92 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -113,7 +113,6 @@ do { \ } while (0) #endif -#define TUN_HEADROOM 256 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) /* TUN device flags */ @@ -563,12 +562,11 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) e->rps_rxhash = hash; } -/* We try to identify a flow through its rxhash first. The reason that +/* We try to identify a flow through its rxhash. The reason that * we do not check rxq no. is because some cards(e.g 82599), chooses * the rxq based on the txq where the last packet of the flow comes. As * the userspace application move between processors, we may get a - * different rxq no. here. If we could not get rxhash, then we would - * hope the rxq no. may help here. + * different rxq no. here. */ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) { @@ -579,18 +577,13 @@ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) numqueues = READ_ONCE(tun->numqueues); txq = __skb_get_hash_symmetric(skb); - if (txq) { - e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); - if (e) { - tun_flow_save_rps_rxhash(e, txq); - txq = e->queue_index; - } else - /* use multiply and shift instead of expensive divide */ - txq = ((u64)txq * numqueues) >> 32; - } else if (likely(skb_rx_queue_recorded(skb))) { - txq = skb_get_rx_queue(skb); - while (unlikely(txq >= numqueues)) - txq -= numqueues; + e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); + if (e) { + tun_flow_save_rps_rxhash(e, txq); + txq = e->queue_index; + } else { + /* use multiply and shift instead of expensive divide */ + txq = ((u64)txq * numqueues) >> 32; } return txq; @@ -870,6 +863,9 @@ static int tun_attach(struct tun_struct *tun, struct file *file, tun_napi_init(tun, tfile, napi, napi_frags); } + if (rtnl_dereference(tun->xdp_prog)) + sock_set_flag(&tfile->sk, SOCK_XDP); + tun_set_real_num_queues(tun); /* device is allowed to go away first, so no need to hold extra @@ -1045,16 +1041,13 @@ static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) /* Select queue was not called for the skbuff, so we extract the * RPS hash and save it into the flow_table here. */ + struct tun_flow_entry *e; __u32 rxhash; rxhash = __skb_get_hash_symmetric(skb); - if (rxhash) { - struct tun_flow_entry *e; - e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], - rxhash); - if (e) - tun_flow_save_rps_rxhash(e, rxhash); - } + e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); + if (e) + tun_flow_save_rps_rxhash(e, rxhash); } #endif } @@ -1205,13 +1198,29 @@ static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); + struct tun_file *tfile; struct bpf_prog *old_prog; + int i; old_prog = rtnl_dereference(tun->xdp_prog); rcu_assign_pointer(tun->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); + for (i = 0; i < tun->numqueues; i++) { + tfile = rtnl_dereference(tun->tfiles[i]); + if (prog) + sock_set_flag(&tfile->sk, SOCK_XDP); + else + sock_reset_flag(&tfile->sk, SOCK_XDP); + } + list_for_each_entry(tfile, &tun->disabled, next) { + if (prog) + sock_set_flag(&tfile->sk, SOCK_XDP); + else + sock_reset_flag(&tfile->sk, SOCK_XDP); + } + return 0; } @@ -1575,6 +1584,55 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, return true; } +static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf, + int buflen, int len, int pad) +{ + struct sk_buff *skb = build_skb(buf, buflen); + + if (!skb) + return ERR_PTR(-ENOMEM); + + skb_reserve(skb, pad); + skb_put(skb, len); + + get_page(alloc_frag->page); + alloc_frag->offset += buflen; + + return skb; +} + +static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, + struct xdp_buff *xdp, u32 act) +{ + int err; + + switch (act) { + case XDP_REDIRECT: + err = xdp_do_redirect(tun->dev, xdp, xdp_prog); + if (err) + return err; + break; + case XDP_TX: + err = tun_xdp_tx(tun->dev, xdp); + if (err < 0) + return err; + break; + case XDP_PASS: + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fall through */ + case XDP_ABORTED: + trace_xdp_exception(tun->dev, xdp_prog, act); + /* fall through */ + case XDP_DROP: + this_cpu_inc(tun->pcpu_stats->rx_dropped); + break; + } + + return act; +} + static struct sk_buff *tun_build_skb(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *from, @@ -1582,18 +1640,17 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, int len, int *skb_xdp) { struct page_frag *alloc_frag = ¤t->task_frag; - struct sk_buff *skb; struct bpf_prog *xdp_prog; int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - unsigned int delta = 0; char *buf; size_t copied; - int err, pad = TUN_RX_PAD; + int pad = TUN_RX_PAD; + int err = 0; rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) - pad += TUN_HEADROOM; + pad += XDP_PACKET_HEADROOM; buflen += SKB_DATA_ALIGN(len + pad); rcu_read_unlock(); @@ -1612,17 +1669,18 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, * of xdp_prog above, this should be rare and for simplicity * we do XDP on skb in case the headroom is not enough. */ - if (hdr->gso_type || !xdp_prog) + if (hdr->gso_type || !xdp_prog) { *skb_xdp = 1; - else - *skb_xdp = 0; + return __tun_build_skb(alloc_frag, buf, buflen, len, pad); + } + + *skb_xdp = 0; local_bh_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); - if (xdp_prog && !*skb_xdp) { + if (xdp_prog) { struct xdp_buff xdp; - void *orig_data; u32 act; xdp.data_hard_start = buf; @@ -1630,66 +1688,33 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; xdp.rxq = &tfile->xdp_rxq; - orig_data = xdp.data; - act = bpf_prog_run_xdp(xdp_prog, &xdp); - switch (act) { - case XDP_REDIRECT: - get_page(alloc_frag->page); - alloc_frag->offset += buflen; - err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); - xdp_do_flush_map(); - if (err) - goto err_redirect; - rcu_read_unlock(); - local_bh_enable(); - return NULL; - case XDP_TX: + act = bpf_prog_run_xdp(xdp_prog, &xdp); + if (act == XDP_REDIRECT || act == XDP_TX) { get_page(alloc_frag->page); alloc_frag->offset += buflen; - if (tun_xdp_tx(tun->dev, &xdp) < 0) - goto err_redirect; - rcu_read_unlock(); - local_bh_enable(); - return NULL; - case XDP_PASS: - delta = orig_data - xdp.data; - len = xdp.data_end - xdp.data; - break; - default: - bpf_warn_invalid_xdp_action(act); - /* fall through */ - case XDP_ABORTED: - trace_xdp_exception(tun->dev, xdp_prog, act); - /* fall through */ - case XDP_DROP: - goto err_xdp; } - } + err = tun_xdp_act(tun, xdp_prog, &xdp, act); + if (err < 0) + goto err_xdp; + if (err == XDP_REDIRECT) + xdp_do_flush_map(); + if (err != XDP_PASS) + goto out; - skb = build_skb(buf, buflen); - if (!skb) { - rcu_read_unlock(); - local_bh_enable(); - return ERR_PTR(-ENOMEM); + pad = xdp.data - xdp.data_hard_start; + len = xdp.data_end - xdp.data; } - - skb_reserve(skb, pad - delta); - skb_put(skb, len); - get_page(alloc_frag->page); - alloc_frag->offset += buflen; - rcu_read_unlock(); local_bh_enable(); - return skb; + return __tun_build_skb(alloc_frag, buf, buflen, len, pad); -err_redirect: - put_page(alloc_frag->page); err_xdp: + put_page(alloc_frag->page); +out: rcu_read_unlock(); local_bh_enable(); - this_cpu_inc(tun->pcpu_stats->rx_dropped); return NULL; } @@ -2350,18 +2375,133 @@ static void tun_sock_write_space(struct sock *sk) kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); } +static int tun_xdp_one(struct tun_struct *tun, + struct tun_file *tfile, + struct xdp_buff *xdp, int *flush) +{ + struct tun_xdp_hdr *hdr = xdp->data_hard_start; + struct virtio_net_hdr *gso = &hdr->gso; + struct tun_pcpu_stats *stats; + struct bpf_prog *xdp_prog; + struct sk_buff *skb = NULL; + u32 rxhash = 0, act; + int buflen = hdr->buflen; + int err = 0; + bool skb_xdp = false; + + xdp_prog = rcu_dereference(tun->xdp_prog); + if (xdp_prog) { + if (gso->gso_type) { + skb_xdp = true; + goto build; + } + xdp_set_data_meta_invalid(xdp); + xdp->rxq = &tfile->xdp_rxq; + + act = bpf_prog_run_xdp(xdp_prog, xdp); + err = tun_xdp_act(tun, xdp_prog, xdp, act); + if (err < 0) { + put_page(virt_to_head_page(xdp->data)); + return err; + } + + switch (err) { + case XDP_REDIRECT: + *flush = true; + /* fall through */ + case XDP_TX: + return 0; + case XDP_PASS: + break; + default: + put_page(virt_to_head_page(xdp->data)); + return 0; + } + } + +build: + skb = build_skb(xdp->data_hard_start, buflen); + if (!skb) { + err = -ENOMEM; + goto out; + } + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + skb_put(skb, xdp->data_end - xdp->data); + + if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { + this_cpu_inc(tun->pcpu_stats->rx_frame_errors); + kfree_skb(skb); + err = -EINVAL; + goto out; + } + + skb->protocol = eth_type_trans(skb, tun->dev); + skb_reset_network_header(skb); + skb_probe_transport_header(skb, 0); + + if (skb_xdp) { + err = do_xdp_generic(xdp_prog, skb); + if (err != XDP_PASS) + goto out; + } + + if (!rcu_dereference(tun->steering_prog)) + rxhash = __skb_get_hash_symmetric(skb); + + netif_receive_skb(skb); + + stats = get_cpu_ptr(tun->pcpu_stats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += skb->len; + u64_stats_update_end(&stats->syncp); + put_cpu_ptr(stats); + + if (rxhash) + tun_flow_update(tun, rxhash, tfile); + +out: + return err; +} + static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { - int ret; + int ret, i; struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = tun_get(tfile); + struct tun_msg_ctl *ctl = m->msg_control; + struct xdp_buff *xdp; if (!tun) return -EBADFD; - ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, + if (ctl && (ctl->type == TUN_MSG_PTR)) { + int n = ctl->num; + int flush = 0; + + local_bh_disable(); + rcu_read_lock(); + + for (i = 0; i < n; i++) { + xdp = &((struct xdp_buff *)ctl->ptr)[i]; + tun_xdp_one(tun, tfile, xdp, &flush); + } + + if (flush) + xdp_do_flush_map(); + + rcu_read_unlock(); + local_bh_enable(); + + ret = total_len; + goto out; + } + + ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, m->msg_flags & MSG_DONTWAIT, m->msg_flags & MSG_MORE); +out: tun_put(tun); return ret; } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 1eaec648bd1f..50c05d0f44cb 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -779,8 +779,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ctx->tx_timer.function = &cdc_ncm_tx_timer_cb; - ctx->bh.data = (unsigned long)dev; - ctx->bh.func = cdc_ncm_txpath_bh; + tasklet_init(&ctx->bh, cdc_ncm_txpath_bh, (unsigned long)dev); atomic_set(&ctx->stop, 0); spin_lock_init(&ctx->mtx); @@ -1601,11 +1600,8 @@ cdc_ncm_speed_change(struct usbnet *dev, static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) { - struct cdc_ncm_ctx *ctx; struct usb_cdc_notification *event; - ctx = (struct cdc_ncm_ctx *)dev->data[0]; - if (urb->actual_length < sizeof(*event)) return; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index c3c9ba44e2a1..be1917be28f2 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1,18 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2015 Microchip Technology - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/version.h> #include <linux/module.h> @@ -948,11 +936,9 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset, ret = lan78xx_read_raw_otp(dev, 0, 1, &sig); if (ret == 0) { - if (sig == OTP_INDICATOR_1) - offset = offset; - else if (sig == OTP_INDICATOR_2) + if (sig == OTP_INDICATOR_2) offset += 0x100; - else + else if (sig != OTP_INDICATOR_1) ret = -EINVAL; if (!ret) ret = lan78xx_read_raw_otp(dev, offset, length, data); @@ -1027,7 +1013,7 @@ done: static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata, int index, u8 addr[ETH_ALEN]) { - u32 temp; + u32 temp; if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) { temp = addr[3]; @@ -1838,8 +1824,7 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) node = of_get_child_by_name(dev->udev->dev.of_node, "mdio"); ret = of_mdiobus_register(dev->mdiobus, node); - if (node) - of_node_put(node); + of_node_put(node); if (ret) { netdev_err(dev->net, "can't register MDIO bus\n"); goto exit1; @@ -2169,7 +2154,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) } /* MAC doesn't support 1000T Half */ - phydev->supported &= ~SUPPORTED_1000baseT_Half; + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); /* support both flow controls */ dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); @@ -2693,7 +2678,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev) static int lan78xx_stop(struct net_device *net) { - struct lan78xx_net *dev = netdev_priv(net); + struct lan78xx_net *dev = netdev_priv(net); if (timer_pending(&dev->stat_monitor)) del_timer_sync(&dev->stat_monitor); @@ -2943,6 +2928,11 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) int i; ret = lan78xx_get_endpoints(dev, intf); + if (ret) { + netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n", + ret); + return ret; + } dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); @@ -3071,7 +3061,7 @@ static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev, static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) { - int status; + int status; if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { skb_queue_tail(&dev->rxq_pause, skb); @@ -3338,9 +3328,9 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) count = 0; length = 0; spin_lock_irqsave(&tqp->lock, flags); - for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { + skb_queue_walk(tqp, skb) { if (skb_is_gso(skb)) { - if (pkt_cnt) { + if (!skb_queue_is_first(tqp, skb)) { /* handle previous packets first */ break; } @@ -3631,10 +3621,10 @@ static void intr_complete(struct urb *urb) static void lan78xx_disconnect(struct usb_interface *intf) { - struct lan78xx_net *dev; - struct usb_device *udev; - struct net_device *net; - struct phy_device *phydev; + struct lan78xx_net *dev; + struct usb_device *udev; + struct net_device *net; + struct phy_device *phydev; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); @@ -3752,7 +3742,6 @@ static int lan78xx_probe(struct usb_interface *intf, ret = lan78xx_bind(dev, intf); if (ret < 0) goto out2; - strcpy(netdev->name, "eth%d"); if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len)) netdev->mtu = dev->hard_mtu - netdev->hard_header_len; diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h index 25aa54611774..968e5e5faee0 100644 --- a/drivers/net/usb/lan78xx.h +++ b/drivers/net/usb/lan78xx.h @@ -1,18 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2015 Microchip Technology - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #ifndef _LAN78XX_H #define _LAN78XX_H diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 770aa624147f..504282af27e5 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -802,7 +802,7 @@ static void usbnet_terminate_urbs(struct usbnet *dev) int usbnet_stop (struct net_device *net) { struct usbnet *dev = netdev_priv(net); - struct driver_info *info = dev->driver_info; + const struct driver_info *info = dev->driver_info; int retval, pm, mpn; clear_bit(EVENT_DEV_OPEN, &dev->flags); @@ -865,7 +865,7 @@ int usbnet_open (struct net_device *net) { struct usbnet *dev = netdev_priv(net); int retval; - struct driver_info *info = dev->driver_info; + const struct driver_info *info = dev->driver_info; if ((retval = usb_autopm_get_interface(dev->intf)) < 0) { netif_info(dev, ifup, dev->net, @@ -1205,7 +1205,7 @@ fail_lowmem: } if (test_bit (EVENT_LINK_RESET, &dev->flags)) { - struct driver_info *info = dev->driver_info; + const struct driver_info *info = dev->driver_info; int retval = 0; clear_bit (EVENT_LINK_RESET, &dev->flags); @@ -1353,7 +1353,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, unsigned int length; struct urb *urb = NULL; struct skb_data *entry; - struct driver_info *info = dev->driver_info; + const struct driver_info *info = dev->driver_info; unsigned long flags; int retval; @@ -1527,6 +1527,7 @@ static void usbnet_bh (struct timer_list *t) continue; case tx_done: kfree(entry->urb->sg); + /* fall through */ case rx_cleanup: usb_free_urb (entry->urb); dev_kfree_skb (skb); @@ -1646,7 +1647,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) struct usbnet *dev; struct net_device *net; struct usb_host_interface *interface; - struct driver_info *info; + const struct driver_info *info; struct usb_device *xdev; int status; const char *name; @@ -1662,7 +1663,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) } name = udev->dev.driver->name; - info = (struct driver_info *) prod->driver_info; + info = (const struct driver_info *) prod->driver_info; if (!info) { dev_dbg (&udev->dev, "blacklisted by %s\n", name); return -ENODEV; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 41a00cd76955..224c56a4e2b1 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -24,6 +24,7 @@ #include <linux/filter.h> #include <linux/ptr_ring.h> #include <linux/bpf_trace.h> +#include <linux/net_tstamp.h> #define DRV_NAME "veth" #define DRV_VERSION "1.0" @@ -36,12 +37,6 @@ #define VETH_XDP_TX BIT(0) #define VETH_XDP_REDIR BIT(1) -struct pcpu_vstats { - u64 packets; - u64 bytes; - struct u64_stats_sync syncp; -}; - struct veth_rq { struct napi_struct xdp_napi; struct net_device *dev; @@ -114,6 +109,18 @@ static void veth_get_ethtool_stats(struct net_device *dev, data[0] = peer ? peer->ifindex : 0; } +static int veth_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + + return 0; +} + static const struct ethtool_ops veth_ethtool_ops = { .get_drvinfo = veth_get_drvinfo, .get_link = ethtool_op_get_link, @@ -121,6 +128,7 @@ static const struct ethtool_ops veth_ethtool_ops = { .get_sset_count = veth_get_sset_count, .get_ethtool_stats = veth_get_ethtool_stats, .get_link_ksettings = veth_get_link_ksettings, + .get_ts_info = veth_get_ts_info, }; /* general routines */ @@ -201,8 +209,9 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) skb_record_rx_queue(skb, rxq); } + skb_tx_timestamp(skb); if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) { - struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats); + struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats); u64_stats_update_begin(&stats->syncp); stats->bytes += length; @@ -221,7 +230,7 @@ drop: return NETDEV_TX_OK; } -static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev) +static u64 veth_stats_one(struct pcpu_lstats *result, struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); int cpu; @@ -229,7 +238,7 @@ static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev) result->packets = 0; result->bytes = 0; for_each_possible_cpu(cpu) { - struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu); + struct pcpu_lstats *stats = per_cpu_ptr(dev->lstats, cpu); u64 packets, bytes; unsigned int start; @@ -249,7 +258,7 @@ static void veth_get_stats64(struct net_device *dev, { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; - struct pcpu_vstats one; + struct pcpu_lstats one; tot->tx_dropped = veth_stats_one(&one, dev); tot->tx_bytes = one.bytes; @@ -815,13 +824,13 @@ static int veth_dev_init(struct net_device *dev) { int err; - dev->vstats = netdev_alloc_pcpu_stats(struct pcpu_vstats); - if (!dev->vstats) + dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); + if (!dev->lstats) return -ENOMEM; err = veth_alloc_queues(dev); if (err) { - free_percpu(dev->vstats); + free_percpu(dev->lstats); return err; } @@ -831,7 +840,7 @@ static int veth_dev_init(struct net_device *dev) static void veth_dev_free(struct net_device *dev) { veth_free_queues(dev); - free_percpu(dev->vstats); + free_percpu(dev->lstats); } #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index dab504ec5e50..3f5aa59c37b7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2170,6 +2170,53 @@ static int virtnet_get_link_ksettings(struct net_device *dev, return 0; } +static int virtnet_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct ethtool_coalesce ec_default = { + .cmd = ETHTOOL_SCOALESCE, + .rx_max_coalesced_frames = 1, + }; + struct virtnet_info *vi = netdev_priv(dev); + int i, napi_weight; + + if (ec->tx_max_coalesced_frames > 1) + return -EINVAL; + + ec_default.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; + napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; + + /* disallow changes to fields not explicitly tested above */ + if (memcmp(ec, &ec_default, sizeof(ec_default))) + return -EINVAL; + + if (napi_weight ^ vi->sq[0].napi.weight) { + if (dev->flags & IFF_UP) + return -EBUSY; + for (i = 0; i < vi->max_queue_pairs; i++) + vi->sq[i].napi.weight = napi_weight; + } + + return 0; +} + +static int virtnet_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct ethtool_coalesce ec_default = { + .cmd = ETHTOOL_GCOALESCE, + .rx_max_coalesced_frames = 1, + }; + struct virtnet_info *vi = netdev_priv(dev); + + memcpy(ec, &ec_default, sizeof(ec_default)); + + if (vi->sq[0].napi.weight) + ec->tx_max_coalesced_frames = 1; + + return 0; +} + static void virtnet_init_settings(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); @@ -2208,6 +2255,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .get_link_ksettings = virtnet_get_link_ksettings, .set_link_ksettings = virtnet_set_link_ksettings, + .set_coalesce = virtnet_set_coalesce, + .get_coalesce = virtnet_get_coalesce, }; static void virtnet_freeze_down(struct virtio_device *vdev) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index f93547f257fb..69b7227c637e 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1215,8 +1215,19 @@ static int vrf_add_fib_rules(const struct net_device *dev) goto ipmr_err; #endif +#if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES) + err = vrf_fib_rule(dev, RTNL_FAMILY_IP6MR, true); + if (err < 0) + goto ip6mr_err; +#endif + return 0; +#if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES) +ip6mr_err: + vrf_fib_rule(dev, RTNL_FAMILY_IPMR, false); +#endif + #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES) ipmr_err: vrf_fib_rule(dev, AF_INET6, false); diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c index c28bdce14fd5..7bad5c95551f 100644 --- a/drivers/net/vsockmon.c +++ b/drivers/net/vsockmon.c @@ -11,12 +11,6 @@ #define DEFAULT_MTU (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + \ sizeof(struct af_vsockmon_hdr)) -struct pcpu_lstats { - u64 rx_packets; - u64 rx_bytes; - struct u64_stats_sync syncp; -}; - static int vsockmon_dev_init(struct net_device *dev) { dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); @@ -56,8 +50,8 @@ static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev) struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats); u64_stats_update_begin(&stats->syncp); - stats->rx_bytes += len; - stats->rx_packets++; + stats->bytes += len; + stats->packets++; u64_stats_update_end(&stats->syncp); dev_kfree_skb(skb); @@ -80,8 +74,8 @@ vsockmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) do { start = u64_stats_fetch_begin_irq(&vstats->syncp); - tbytes = vstats->rx_bytes; - tpackets = vstats->rx_packets; + tbytes = vstats->bytes; + tpackets = vstats->packets; } while (u64_stats_fetch_retry_irq(&vstats->syncp, start)); packets += tpackets; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 2b8da2b7e721..fb0cdbba8d76 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -464,7 +464,7 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, struct vxlan_fdb *f; f = __vxlan_find_mac(vxlan, mac, vni); - if (f) + if (f && f->used != jiffies) f->used = jiffies; return f; diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c index 2371e078afbb..91dbbde3c35b 100644 --- a/drivers/net/wan/c101.c +++ b/drivers/net/wan/c101.c @@ -24,7 +24,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> -#include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/hdlc.h> #include <linux/delay.h> diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 5f0366a125e2..8523ade16030 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -97,6 +97,12 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) if (priv->tsa) { uf_info->tsa = 1; uf_info->ctsp = 1; + uf_info->cds = 1; + uf_info->ctss = 1; + } else { + uf_info->cds = 0; + uf_info->ctsp = 0; + uf_info->ctss = 0; } /* This sets HPM register in CMXUCR register which configures a @@ -265,7 +271,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr); iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr); iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt); - iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask); + iowrite16be(priv->hmask, &priv->ucc_pram->hmask); iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1); iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2); iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3); @@ -375,6 +381,10 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_bytes += skb->len; break; + case ARPHRD_ETHER: + dev->stats.tx_bytes += skb->len; + break; + default: dev->stats.tx_dropped++; dev_kfree_skb(skb); @@ -512,6 +522,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit) break; case ARPHRD_PPP: + case ARPHRD_ETHER: length -= HDLC_CRC_SIZE; skb = dev_alloc_skb(length); @@ -780,6 +791,7 @@ static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding, if (parity != PARITY_NONE && parity != PARITY_CRC32_PR1_CCITT && + parity != PARITY_CRC16_PR0_CCITT && parity != PARITY_CRC16_PR1_CCITT) return -EINVAL; @@ -987,11 +999,17 @@ static const struct dev_pm_ops uhdlc_pm_ops = { #define HDLC_PM_OPS NULL #endif +static void uhdlc_tx_timeout(struct net_device *ndev) +{ + netdev_err(ndev, "%s\n", __func__); +} + static const struct net_device_ops uhdlc_ops = { .ndo_open = uhdlc_open, .ndo_stop = uhdlc_close, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = uhdlc_ioctl, + .ndo_tx_timeout = uhdlc_tx_timeout, }; static int ucc_hdlc_probe(struct platform_device *pdev) @@ -1015,7 +1033,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev) } ucc_num = val - 1; - if ((ucc_num > 3) || (ucc_num < 0)) { + if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) { dev_err(&pdev->dev, ": Invalid UCC num\n"); return -EINVAL; } @@ -1090,6 +1108,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev) goto free_utdm; } + if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask)) + uhdlc_priv->hmask = DEFAULT_ADDR_MASK; + ret = uhdlc_init(uhdlc_priv); if (ret) { dev_err(&pdev->dev, "Failed to init uhdlc\n"); @@ -1107,6 +1128,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev) hdlc = dev_to_hdlc(dev); dev->tx_queue_len = 16; dev->netdev_ops = &uhdlc_ops; + dev->watchdog_timeo = 2 * HZ; hdlc->attach = ucc_hdlc_attach; hdlc->xmit = ucc_hdlc_tx; netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32); diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h index c21134c1f180..b99fa2f1cd99 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.h +++ b/drivers/net/wan/fsl_ucc_hdlc.h @@ -106,6 +106,7 @@ struct ucc_hdlc_private { unsigned short encoding; unsigned short parity; + unsigned short hmask; u32 clocking; spinlock_t lock; /* lock for Tx BD and Tx buffer */ #ifdef CONFIG_PM diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c index 4e437c599e9a..1f8a3f77a7b9 100644 --- a/drivers/net/wan/pci200syn.c +++ b/drivers/net/wan/pci200syn.c @@ -27,7 +27,6 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/ioport.h> -#include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/hdlc.h> #include <linux/pci.h> diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index 54ff5930126c..6572a43590a8 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -42,7 +42,8 @@ config ATH10K_USB config ATH10K_SNOC tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)" - depends on ATH10K && ARCH_QCOM + depends on ATH10K + depends on ARCH_QCOM || COMPILE_TEST ---help--- This module adds support for integrated WCN3990 chip connected to system NOC(SNOC). Currently work in progress and will not diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index c9bd0e2b5db7..4cd69aca75e2 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -655,10 +655,10 @@ static void ath10k_ahb_hif_stop(struct ath10k *ar) ath10k_ahb_irq_disable(ar); synchronize_irq(ar_ahb->irq); - ath10k_pci_flush(ar); - napi_synchronize(&ar->napi); napi_disable(&ar->napi); + + ath10k_pci_flush(ar); } static int ath10k_ahb_hif_power_up(struct ath10k *ar) @@ -750,7 +750,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev) enum ath10k_hw_rev hw_rev; size_t size; int ret; - u32 chip_id; + struct ath10k_bus_params bus_params; of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev); if (!of_id) { @@ -806,14 +806,15 @@ static int ath10k_ahb_probe(struct platform_device *pdev) ath10k_pci_ce_deinit(ar); - chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS); - if (chip_id == 0xffffffff) { + bus_params.dev_type = ATH10K_DEV_TYPE_LL; + bus_params.chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS); + if (bus_params.chip_id == 0xffffffff) { ath10k_err(ar, "failed to get chip id\n"); ret = -ENODEV; goto err_halt_device; } - ret = ath10k_core_register(ar, chip_id); + ret = ath10k_core_register(ar, &bus_params); if (ret) { ath10k_err(ar, "failed to register driver core: %d\n", ret); goto err_halt_device; diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c index af4978d6a14b..1750b182209b 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.c +++ b/drivers/net/wireless/ath/ath10k/bmi.c @@ -459,3 +459,26 @@ int ath10k_bmi_fast_download(struct ath10k *ar, return ret; } + +int ath10k_bmi_set_start(struct ath10k *ar, u32 address) +{ + struct bmi_cmd cmd; + u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start); + int ret; + + if (ar->bmi.done_sent) { + ath10k_warn(ar, "bmi set start command disallowed\n"); + return -EBUSY; + } + + cmd.id = __cpu_to_le32(BMI_SET_APP_START); + cmd.set_app_start.addr = __cpu_to_le32(address); + + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); + if (ret) { + ath10k_warn(ar, "unable to set start to the device:%d\n", ret); + return ret; + } + + return 0; +} diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h index 9a396817aa55..725c9afc63f2 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.h +++ b/drivers/net/wireless/ath/ath10k/bmi.h @@ -86,6 +86,10 @@ enum bmi_cmd_id { #define BMI_PARAM_GET_FLASH_BOARD_ID 0x8000 #define BMI_PARAM_FLASH_SECTION_ALL 0x10000 +/* Dual-band Extended Board ID */ +#define BMI_PARAM_GET_EXT_BOARD_ID 0x40000 +#define ATH10K_BMI_EXT_BOARD_ID_SUPPORT 0x40000 + #define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK 0x7c00 #define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB 10 @@ -93,6 +97,7 @@ enum bmi_cmd_id { #define ATH10K_BMI_CHIP_ID_FROM_OTP_LSB 15 #define ATH10K_BMI_BOARD_ID_STATUS_MASK 0xff +#define ATH10K_BMI_EBOARD_ID_STATUS_MASK 0xff struct bmi_cmd { __le32 id; /* enum bmi_cmd_id */ @@ -190,6 +195,35 @@ struct bmi_target_info { u32 type; }; +struct bmi_segmented_file_header { + __le32 magic_num; + __le32 file_flags; + u8 data[]; +}; + +struct bmi_segmented_metadata { + __le32 addr; + __le32 length; + u8 data[]; +}; + +#define BMI_SGMTFILE_MAGIC_NUM 0x544d4753 /* "SGMT" */ +#define BMI_SGMTFILE_FLAG_COMPRESS 1 + +/* Special values for bmi_segmented_metadata.length (all have high bit set) */ + +/* end of segmented data */ +#define BMI_SGMTFILE_DONE 0xffffffff + +/* Board Data segment */ +#define BMI_SGMTFILE_BDDATA 0xfffffffe + +/* set beginning address */ +#define BMI_SGMTFILE_BEGINADDR 0xfffffffd + +/* immediate function execution */ +#define BMI_SGMTFILE_EXEC 0xfffffffc + /* in jiffies */ #define BMI_COMMUNICATION_TIMEOUT_HZ (3 * HZ) @@ -239,4 +273,6 @@ int ath10k_bmi_fast_download(struct ath10k *ar, u32 address, const void *buffer, u32 length); int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val); int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val); +int ath10k_bmi_set_start(struct ath10k *ar, u32 address); + #endif /* _BMI_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 18c709c484e7..f6d3ecbdd3a3 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1280,10 +1280,17 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state) int ath10k_ce_disable_interrupts(struct ath10k *ar) { + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state; + u32 ctrl_addr; int ce_id; for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { - u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); + ce_state = &ce->ce_states[ce_id]; + if (ce_state->attr_flags & CE_ATTR_POLL) + continue; + + ctrl_addr = ath10k_ce_base_address(ar, ce_id); ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); ath10k_ce_error_intr_disable(ar, ctrl_addr); @@ -1300,11 +1307,14 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar) int ce_id; struct ath10k_ce_pipe *ce_state; - /* Skip the last copy engine, CE7 the diagnostic window, as that - * uses polling and isn't initialized for interrupts. + /* Enable interrupts for copy engine that + * are not using polling mode. */ - for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) { + for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { ce_state = &ce->ce_states[ce_id]; + if (ce_state->attr_flags & CE_ATTR_POLL) + continue; + ath10k_ce_per_engine_handler_adjust(ce_state); } } @@ -1416,10 +1426,8 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, nentries = roundup_pow_of_two(nentries); - src_ring = kzalloc(sizeof(*src_ring) + - (nentries * - sizeof(*src_ring->per_transfer_context)), - GFP_KERNEL); + src_ring = kzalloc(struct_size(src_ring, per_transfer_context, + nentries), GFP_KERNEL); if (src_ring == NULL) return ERR_PTR(-ENOMEM); @@ -1476,10 +1484,8 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id, nentries = roundup_pow_of_two(nentries); - src_ring = kzalloc(sizeof(*src_ring) + - (nentries * - sizeof(*src_ring->per_transfer_context)), - GFP_KERNEL); + src_ring = kzalloc(struct_size(src_ring, per_transfer_context, + nentries), GFP_KERNEL); if (!src_ring) return ERR_PTR(-ENOMEM); @@ -1534,10 +1540,8 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, nentries = roundup_pow_of_two(attr->dest_nentries); - dest_ring = kzalloc(sizeof(*dest_ring) + - (nentries * - sizeof(*dest_ring->per_transfer_context)), - GFP_KERNEL); + dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context, + nentries), GFP_KERNEL); if (dest_ring == NULL) return ERR_PTR(-ENOMEM); @@ -1580,10 +1584,8 @@ ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id, nentries = roundup_pow_of_two(attr->dest_nentries); - dest_ring = kzalloc(sizeof(*dest_ring) + - (nentries * - sizeof(*dest_ring->per_transfer_context)), - GFP_KERNEL); + dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context, + nentries), GFP_KERNEL); if (!dest_ring) return ERR_PTR(-ENOMEM); diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index b8fb5382dede..ead9987c3259 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -275,16 +275,19 @@ void ath10k_ce_free_rri(struct ath10k *ar); /* ce_attr.flags values */ /* Use NonSnooping PCIe accesses? */ -#define CE_ATTR_NO_SNOOP 1 +#define CE_ATTR_NO_SNOOP BIT(0) /* Byte swap data words */ -#define CE_ATTR_BYTE_SWAP_DATA 2 +#define CE_ATTR_BYTE_SWAP_DATA BIT(1) /* Swizzle descriptors? */ -#define CE_ATTR_SWIZZLE_DESCRIPTORS 4 +#define CE_ATTR_SWIZZLE_DESCRIPTORS BIT(2) /* no interrupt on copy completion */ -#define CE_ATTR_DIS_INTR 8 +#define CE_ATTR_DIS_INTR BIT(3) + +/* no interrupt, only polling */ +#define CE_ATTR_POLL BIT(4) /* Attributes of an instance of a Copy Engine */ struct ce_attr { diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index c40cd129afe7..203f30992c26 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/firmware.h> #include <linux/of.h> +#include <linux/property.h> #include <linux/dmi.h> #include <linux/ctype.h> #include <asm/byteorder.h> @@ -63,6 +64,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { { .id = QCA988X_HW_2_0_VERSION, .dev_id = QCA988X_2_0_DEVICE_ID, + .bus = ATH10K_BUS_PCI, .name = "qca988x hw2.0", .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, @@ -84,13 +86,14 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, - .num_peers = TARGET_TLV_NUM_PEERS, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, }, { .id = QCA988X_HW_2_0_VERSION, @@ -116,7 +119,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, - .num_peers = TARGET_TLV_NUM_PEERS, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, @@ -124,10 +126,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, }, { .id = QCA9887_HW_1_0_VERSION, .dev_id = QCA9887_1_0_DEVICE_ID, + .bus = ATH10K_BUS_PCI, .name = "qca9887 hw1.0", .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 7, @@ -149,7 +154,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, - .num_peers = TARGET_TLV_NUM_PEERS, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, @@ -157,10 +161,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, }, { .id = QCA6174_HW_2_1_VERSION, .dev_id = QCA6164_2_1_DEVICE_ID, + .bus = ATH10K_BUS_PCI, .name = "qca6164 hw2.1", .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, .uart_pin = 6, @@ -181,7 +188,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, - .num_peers = TARGET_TLV_NUM_PEERS, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, @@ -189,10 +195,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, }, { .id = QCA6174_HW_2_1_VERSION, .dev_id = QCA6174_2_1_DEVICE_ID, + .bus = ATH10K_BUS_PCI, .name = "qca6174 hw2.1", .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, .uart_pin = 6, @@ -213,7 +222,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, - .num_peers = TARGET_TLV_NUM_PEERS, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, @@ -221,10 +229,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, }, { .id = QCA6174_HW_3_0_VERSION, .dev_id = QCA6174_2_1_DEVICE_ID, + .bus = ATH10K_BUS_PCI, .name = "qca6174 hw3.0", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 6, @@ -245,7 +256,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, - .num_peers = TARGET_TLV_NUM_PEERS, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, @@ -253,10 +263,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, }, { .id = QCA6174_HW_3_2_VERSION, .dev_id = QCA6174_2_1_DEVICE_ID, + .bus = ATH10K_BUS_PCI, .name = "qca6174 hw3.2", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 6, @@ -280,7 +293,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, - .num_peers = TARGET_TLV_NUM_PEERS, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, @@ -288,10 +300,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = true, }, { .id = QCA99X0_HW_2_0_DEV_VERSION, .dev_id = QCA99X0_2_0_DEVICE_ID, + .bus = ATH10K_BUS_PCI, .name = "qca99x0 hw2.0", .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, @@ -318,7 +333,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 11, - .num_peers = TARGET_TLV_NUM_PEERS, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, @@ -326,10 +340,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, }, { .id = QCA9984_HW_1_0_DEV_VERSION, .dev_id = QCA9984_1_0_DEVICE_ID, + .bus = ATH10K_BUS_PCI, .name = "qca9984/qca9994 hw1.0", .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 7, @@ -346,8 +363,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .fw = { .dir = QCA9984_HW_1_0_FW_DIR, .board = QCA9984_HW_1_0_BOARD_DATA_FILE, + .eboard = QCA9984_HW_1_0_EBOARD_DATA_FILE, .board_size = QCA99X0_BOARD_DATA_SZ, .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, + .ext_board_size = QCA99X0_EXT_BOARD_DATA_SZ, }, .sw_decrypt_mcast_mgmt = true, .hw_ops = &qca99x0_ops, @@ -361,7 +380,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 1560, .vht160_mcs_tx_highest = 1560, .n_cipher_suites = 11, - .num_peers = TARGET_TLV_NUM_PEERS, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, @@ -369,10 +387,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, }, { .id = QCA9888_HW_2_0_DEV_VERSION, .dev_id = QCA9888_2_0_DEVICE_ID, + .bus = ATH10K_BUS_PCI, .name = "qca9888 hw2.0", .patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, @@ -403,7 +424,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 780, .vht160_mcs_tx_highest = 780, .n_cipher_suites = 11, - .num_peers = TARGET_TLV_NUM_PEERS, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, @@ -411,10 +431,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, }, { .id = QCA9377_HW_1_0_DEV_VERSION, .dev_id = QCA9377_1_0_DEVICE_ID, + .bus = ATH10K_BUS_PCI, .name = "qca9377 hw1.0", .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 6, @@ -435,7 +458,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, - .num_peers = TARGET_TLV_NUM_PEERS, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, @@ -443,10 +465,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, }, { .id = QCA9377_HW_1_1_DEV_VERSION, .dev_id = QCA9377_1_0_DEVICE_ID, + .bus = ATH10K_BUS_PCI, .name = "qca9377 hw1.1", .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 6, @@ -469,7 +494,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, - .num_peers = TARGET_TLV_NUM_PEERS, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, @@ -477,10 +501,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = true, }, { .id = QCA4019_HW_1_0_DEV_VERSION, .dev_id = 0, + .bus = ATH10K_BUS_AHB, .name = "qca4019 hw1.0", .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 7, @@ -508,7 +535,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 11, - .num_peers = TARGET_TLV_NUM_PEERS, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, @@ -516,10 +542,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, + .fw_diag_ce_download = false, }, { .id = WCN3990_HW_1_0_DEV_VERSION, .dev_id = 0, + .bus = ATH10K_BUS_PCI, .name = "wcn3990 hw1.0", .continuous_frag_desc = true, .tx_chain_mask = 0x7, @@ -539,6 +568,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = true, .shadow_reg_support = true, .rri_on_ddr = true, + .hw_filter_reset_required = false, + .fw_diag_ce_download = false, }, }; @@ -762,153 +793,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data, return 0; } -static int ath10k_download_board_data(struct ath10k *ar, const void *data, - size_t data_len) -{ - u32 board_data_size = ar->hw_params.fw.board_size; - u32 address; - int ret; - - ret = ath10k_push_board_ext_data(ar, data, data_len); - if (ret) { - ath10k_err(ar, "could not push board ext data (%d)\n", ret); - goto exit; - } - - ret = ath10k_bmi_read32(ar, hi_board_data, &address); - if (ret) { - ath10k_err(ar, "could not read board data addr (%d)\n", ret); - goto exit; - } - - ret = ath10k_bmi_write_memory(ar, address, data, - min_t(u32, board_data_size, - data_len)); - if (ret) { - ath10k_err(ar, "could not write board data (%d)\n", ret); - goto exit; - } - - ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1); - if (ret) { - ath10k_err(ar, "could not write board data bit (%d)\n", ret); - goto exit; - } - -exit: - return ret; -} - -static int ath10k_download_cal_file(struct ath10k *ar, - const struct firmware *file) -{ - int ret; - - if (!file) - return -ENOENT; - - if (IS_ERR(file)) - return PTR_ERR(file); - - ret = ath10k_download_board_data(ar, file->data, file->size); - if (ret) { - ath10k_err(ar, "failed to download cal_file data: %d\n", ret); - return ret; - } - - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n"); - - return 0; -} - -static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name) -{ - struct device_node *node; - int data_len; - void *data; - int ret; - - node = ar->dev->of_node; - if (!node) - /* Device Tree is optional, don't print any warnings if - * there's no node for ath10k. - */ - return -ENOENT; - - if (!of_get_property(node, dt_name, &data_len)) { - /* The calibration data node is optional */ - return -ENOENT; - } - - if (data_len != ar->hw_params.cal_data_len) { - ath10k_warn(ar, "invalid calibration data length in DT: %d\n", - data_len); - ret = -EMSGSIZE; - goto out; - } - - data = kmalloc(data_len, GFP_KERNEL); - if (!data) { - ret = -ENOMEM; - goto out; - } - - ret = of_property_read_u8_array(node, dt_name, data, data_len); - if (ret) { - ath10k_warn(ar, "failed to read calibration data from DT: %d\n", - ret); - goto out_free; - } - - ret = ath10k_download_board_data(ar, data, data_len); - if (ret) { - ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n", - ret); - goto out_free; - } - - ret = 0; - -out_free: - kfree(data); - -out: - return ret; -} - -static int ath10k_download_cal_eeprom(struct ath10k *ar) -{ - size_t data_len; - void *data = NULL; - int ret; - - ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len); - if (ret) { - if (ret != -EOPNOTSUPP) - ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n", - ret); - goto out_free; - } - - ret = ath10k_download_board_data(ar, data, data_len); - if (ret) { - ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n", - ret); - goto out_free; - } - - ret = 0; - -out_free: - kfree(data); - - return ret; -} - static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) { u32 result, address; u8 board_id, chip_id; + bool ext_bid_support; int ret, bmi_board_id_param; address = ar->hw_params.patch_load_addr; @@ -948,10 +837,13 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) board_id = MS(result, ATH10K_BMI_BOARD_ID_FROM_OTP); chip_id = MS(result, ATH10K_BMI_CHIP_ID_FROM_OTP); + ext_bid_support = (result & ATH10K_BMI_EXT_BOARD_ID_SUPPORT); ath10k_dbg(ar, ATH10K_DBG_BOOT, - "boot get otp board id result 0x%08x board_id %d chip_id %d\n", - result, board_id, chip_id); + "boot get otp board id result 0x%08x board_id %d chip_id %d ext_bid_support %d\n", + result, board_id, chip_id, ext_bid_support); + + ar->id.ext_bid_supported = ext_bid_support; if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 || (board_id == 0)) { @@ -1055,64 +947,6 @@ static int ath10k_core_check_dt(struct ath10k *ar) return 0; } -static int ath10k_download_and_run_otp(struct ath10k *ar) -{ - u32 result, address = ar->hw_params.patch_load_addr; - u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param; - int ret; - - ret = ath10k_download_board_data(ar, - ar->running_fw->board_data, - ar->running_fw->board_len); - if (ret) { - ath10k_err(ar, "failed to download board data: %d\n", ret); - return ret; - } - - /* OTP is optional */ - - if (!ar->running_fw->fw_file.otp_data || - !ar->running_fw->fw_file.otp_len) { - ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n", - ar->running_fw->fw_file.otp_data, - ar->running_fw->fw_file.otp_len); - return 0; - } - - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n", - address, ar->running_fw->fw_file.otp_len); - - ret = ath10k_bmi_fast_download(ar, address, - ar->running_fw->fw_file.otp_data, - ar->running_fw->fw_file.otp_len); - if (ret) { - ath10k_err(ar, "could not write otp (%d)\n", ret); - return ret; - } - - /* As of now pre-cal is valid for 10_4 variants */ - if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT || - ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE) - bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL; - - ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result); - if (ret) { - ath10k_err(ar, "could not execute otp (%d)\n", ret); - return ret; - } - - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); - - if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT, - ar->running_fw->fw_file.fw_features)) && - result != 0) { - ath10k_err(ar, "otp calibration failed: %d", result); - return -EINVAL; - } - - return 0; -} - static int ath10k_download_fw(struct ath10k *ar) { u32 address, data_len; @@ -1135,14 +969,24 @@ static int ath10k_download_fw(struct ath10k *ar) "boot uploading firmware image %pK len %d\n", data, data_len); - ret = ath10k_bmi_fast_download(ar, address, data, data_len); - if (ret) { - ath10k_err(ar, "failed to download firmware: %d\n", - ret); - return ret; + /* Check if device supports to download firmware via + * diag copy engine. Downloading firmware via diag CE + * greatly reduces the time to download firmware. + */ + if (ar->hw_params.fw_diag_ce_download) { + ret = ath10k_hw_diag_fast_download(ar, address, + data, data_len); + if (ret == 0) + /* firmware upload via diag ce was successful */ + return 0; + + ath10k_warn(ar, + "failed to upload firmware via diag ce, trying BMI: %d", + ret); } - return ret; + return ath10k_bmi_fast_download(ar, address, + data, data_len); } static void ath10k_core_free_board_files(struct ath10k *ar) @@ -1150,9 +994,15 @@ static void ath10k_core_free_board_files(struct ath10k *ar) if (!IS_ERR(ar->normal_mode_fw.board)) release_firmware(ar->normal_mode_fw.board); + if (!IS_ERR(ar->normal_mode_fw.ext_board)) + release_firmware(ar->normal_mode_fw.ext_board); + ar->normal_mode_fw.board = NULL; ar->normal_mode_fw.board_data = NULL; ar->normal_mode_fw.board_len = 0; + ar->normal_mode_fw.ext_board = NULL; + ar->normal_mode_fw.ext_board_data = NULL; + ar->normal_mode_fw.ext_board_len = 0; } static void ath10k_core_free_firmware_files(struct ath10k *ar) @@ -1206,28 +1056,47 @@ success: return 0; } -static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar) +static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type) { - if (!ar->hw_params.fw.board) { - ath10k_err(ar, "failed to find board file fw entry\n"); - return -EINVAL; - } + const struct firmware *fw; - ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar, - ar->hw_params.fw.dir, - ar->hw_params.fw.board); - if (IS_ERR(ar->normal_mode_fw.board)) - return PTR_ERR(ar->normal_mode_fw.board); + if (bd_ie_type == ATH10K_BD_IE_BOARD) { + if (!ar->hw_params.fw.board) { + ath10k_err(ar, "failed to find board file fw entry\n"); + return -EINVAL; + } + + ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + ar->hw_params.fw.board); + if (IS_ERR(ar->normal_mode_fw.board)) + return PTR_ERR(ar->normal_mode_fw.board); + + ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data; + ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size; + } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) { + if (!ar->hw_params.fw.eboard) { + ath10k_err(ar, "failed to find eboard file fw entry\n"); + return -EINVAL; + } - ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data; - ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size; + fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, + ar->hw_params.fw.eboard); + ar->normal_mode_fw.ext_board = fw; + if (IS_ERR(ar->normal_mode_fw.ext_board)) + return PTR_ERR(ar->normal_mode_fw.ext_board); + + ar->normal_mode_fw.ext_board_data = ar->normal_mode_fw.ext_board->data; + ar->normal_mode_fw.ext_board_len = ar->normal_mode_fw.ext_board->size; + } return 0; } static int ath10k_core_parse_bd_ie_board(struct ath10k *ar, const void *buf, size_t buf_len, - const char *boardname) + const char *boardname, + int bd_ie_type) { const struct ath10k_fw_ie *hdr; bool name_match_found; @@ -1276,12 +1145,21 @@ static int ath10k_core_parse_bd_ie_board(struct ath10k *ar, /* no match found */ break; - ath10k_dbg(ar, ATH10K_DBG_BOOT, - "boot found board data for '%s'", - boardname); + if (bd_ie_type == ATH10K_BD_IE_BOARD) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot found board data for '%s'", + boardname); - ar->normal_mode_fw.board_data = board_ie_data; - ar->normal_mode_fw.board_len = board_ie_len; + ar->normal_mode_fw.board_data = board_ie_data; + ar->normal_mode_fw.board_len = board_ie_len; + } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot found eboard data for '%s'", + boardname); + + ar->normal_mode_fw.ext_board_data = board_ie_data; + ar->normal_mode_fw.ext_board_len = board_ie_len; + } ret = 0; goto out; @@ -1331,7 +1209,18 @@ static int ath10k_core_search_bd(struct ath10k *ar, switch (ie_id) { case ATH10K_BD_IE_BOARD: ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len, - boardname); + boardname, + ATH10K_BD_IE_BOARD); + if (ret == -ENOENT) + /* no match found, continue */ + break; + + /* either found or error, so stop searching */ + goto out; + case ATH10K_BD_IE_BOARD_EXT: + ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len, + boardname, + ATH10K_BD_IE_BOARD_EXT); if (ret == -ENOENT) /* no match found, continue */ break; @@ -1361,9 +1250,11 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, const u8 *data; int ret; - ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar, - ar->hw_params.fw.dir, - filename); + /* Skip if already fetched during board data download */ + if (!ar->normal_mode_fw.board) + ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + filename); if (IS_ERR(ar->normal_mode_fw.board)) return PTR_ERR(ar->normal_mode_fw.board); @@ -1451,23 +1342,49 @@ out: return 0; } -static int ath10k_core_fetch_board_file(struct ath10k *ar) +static int ath10k_core_create_eboard_name(struct ath10k *ar, char *name, + size_t name_len) +{ + if (ar->id.bmi_ids_valid) { + scnprintf(name, name_len, + "bus=%s,bmi-chip-id=%d,bmi-eboard-id=%d", + ath10k_bus_str(ar->hif.bus), + ar->id.bmi_chip_id, + ar->id.bmi_eboard_id); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using eboard name '%s'\n", name); + return 0; + } + /* Fallback if returned board id is zero */ + return -1; +} + +static int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type) { char boardname[100], fallback_boardname[100]; int ret; - ret = ath10k_core_create_board_name(ar, boardname, - sizeof(boardname), true); - if (ret) { - ath10k_err(ar, "failed to create board name: %d", ret); - return ret; - } + if (bd_ie_type == ATH10K_BD_IE_BOARD) { + ret = ath10k_core_create_board_name(ar, boardname, + sizeof(boardname), true); + if (ret) { + ath10k_err(ar, "failed to create board name: %d", ret); + return ret; + } - ret = ath10k_core_create_board_name(ar, fallback_boardname, - sizeof(boardname), false); - if (ret) { - ath10k_err(ar, "failed to create fallback board name: %d", ret); - return ret; + ret = ath10k_core_create_board_name(ar, fallback_boardname, + sizeof(boardname), false); + if (ret) { + ath10k_err(ar, "failed to create fallback board name: %d", ret); + return ret; + } + } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) { + ret = ath10k_core_create_eboard_name(ar, boardname, + sizeof(boardname)); + if (ret) { + ath10k_err(ar, "fallback to eboard.bin since board id 0"); + goto fallback; + } } ar->bd_api = 2; @@ -1477,8 +1394,9 @@ static int ath10k_core_fetch_board_file(struct ath10k *ar) if (!ret) goto success; +fallback: ar->bd_api = 1; - ret = ath10k_core_fetch_board_data_api_1(ar); + ret = ath10k_core_fetch_board_data_api_1(ar, bd_ie_type); if (ret) { ath10k_err(ar, "failed to fetch board-2.bin or board.bin from %s\n", ar->hw_params.fw.dir); @@ -1490,6 +1408,291 @@ success: return 0; } +static int ath10k_core_get_ext_board_id_from_otp(struct ath10k *ar) +{ + u32 result, address; + u8 ext_board_id; + int ret; + + address = ar->hw_params.patch_load_addr; + + if (!ar->normal_mode_fw.fw_file.otp_data || + !ar->normal_mode_fw.fw_file.otp_len) { + ath10k_warn(ar, + "failed to retrieve extended board id due to otp binary missing\n"); + return -ENODATA; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot upload otp to 0x%x len %zd for ext board id\n", + address, ar->normal_mode_fw.fw_file.otp_len); + + ret = ath10k_bmi_fast_download(ar, address, + ar->normal_mode_fw.fw_file.otp_data, + ar->normal_mode_fw.fw_file.otp_len); + if (ret) { + ath10k_err(ar, "could not write otp for ext board id check: %d\n", + ret); + return ret; + } + + ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EXT_BOARD_ID, &result); + if (ret) { + ath10k_err(ar, "could not execute otp for ext board id check: %d\n", + ret); + return ret; + } + + if (!result) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "ext board id does not exist in otp, ignore it\n"); + return -EOPNOTSUPP; + } + + ext_board_id = result & ATH10K_BMI_EBOARD_ID_STATUS_MASK; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot get otp ext board id result 0x%08x ext_board_id %d\n", + result, ext_board_id); + + ar->id.bmi_eboard_id = ext_board_id; + + return 0; +} + +static int ath10k_download_board_data(struct ath10k *ar, const void *data, + size_t data_len) +{ + u32 board_data_size = ar->hw_params.fw.board_size; + u32 eboard_data_size = ar->hw_params.fw.ext_board_size; + u32 board_address; + u32 ext_board_address; + int ret; + + ret = ath10k_push_board_ext_data(ar, data, data_len); + if (ret) { + ath10k_err(ar, "could not push board ext data (%d)\n", ret); + goto exit; + } + + ret = ath10k_bmi_read32(ar, hi_board_data, &board_address); + if (ret) { + ath10k_err(ar, "could not read board data addr (%d)\n", ret); + goto exit; + } + + ret = ath10k_bmi_write_memory(ar, board_address, data, + min_t(u32, board_data_size, + data_len)); + if (ret) { + ath10k_err(ar, "could not write board data (%d)\n", ret); + goto exit; + } + + ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1); + if (ret) { + ath10k_err(ar, "could not write board data bit (%d)\n", ret); + goto exit; + } + + if (!ar->id.ext_bid_supported) + goto exit; + + /* Extended board data download */ + ret = ath10k_core_get_ext_board_id_from_otp(ar); + if (ret == -EOPNOTSUPP) { + /* Not fetching ext_board_data if ext board id is 0 */ + ath10k_dbg(ar, ATH10K_DBG_BOOT, "otp returned ext board id 0\n"); + return 0; + } else if (ret) { + ath10k_err(ar, "failed to get extended board id: %d\n", ret); + goto exit; + } + + ret = ath10k_core_fetch_board_file(ar, ATH10K_BD_IE_BOARD_EXT); + if (ret) + goto exit; + + if (ar->normal_mode_fw.ext_board_data) { + ext_board_address = board_address + EXT_BOARD_ADDRESS_OFFSET; + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot writing ext board data to addr 0x%x", + ext_board_address); + ret = ath10k_bmi_write_memory(ar, ext_board_address, + ar->normal_mode_fw.ext_board_data, + min_t(u32, eboard_data_size, data_len)); + if (ret) + ath10k_err(ar, "failed to write ext board data: %d\n", ret); + } + +exit: + return ret; +} + +static int ath10k_download_and_run_otp(struct ath10k *ar) +{ + u32 result, address = ar->hw_params.patch_load_addr; + u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param; + int ret; + + ret = ath10k_download_board_data(ar, + ar->running_fw->board_data, + ar->running_fw->board_len); + if (ret) { + ath10k_err(ar, "failed to download board data: %d\n", ret); + return ret; + } + + /* OTP is optional */ + + if (!ar->running_fw->fw_file.otp_data || + !ar->running_fw->fw_file.otp_len) { + ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n", + ar->running_fw->fw_file.otp_data, + ar->running_fw->fw_file.otp_len); + return 0; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n", + address, ar->running_fw->fw_file.otp_len); + + ret = ath10k_bmi_fast_download(ar, address, + ar->running_fw->fw_file.otp_data, + ar->running_fw->fw_file.otp_len); + if (ret) { + ath10k_err(ar, "could not write otp (%d)\n", ret); + return ret; + } + + /* As of now pre-cal is valid for 10_4 variants */ + if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT || + ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE) + bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL; + + ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result); + if (ret) { + ath10k_err(ar, "could not execute otp (%d)\n", ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); + + if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT, + ar->running_fw->fw_file.fw_features)) && + result != 0) { + ath10k_err(ar, "otp calibration failed: %d", result); + return -EINVAL; + } + + return 0; +} + +static int ath10k_download_cal_file(struct ath10k *ar, + const struct firmware *file) +{ + int ret; + + if (!file) + return -ENOENT; + + if (IS_ERR(file)) + return PTR_ERR(file); + + ret = ath10k_download_board_data(ar, file->data, file->size); + if (ret) { + ath10k_err(ar, "failed to download cal_file data: %d\n", ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n"); + + return 0; +} + +static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name) +{ + struct device_node *node; + int data_len; + void *data; + int ret; + + node = ar->dev->of_node; + if (!node) + /* Device Tree is optional, don't print any warnings if + * there's no node for ath10k. + */ + return -ENOENT; + + if (!of_get_property(node, dt_name, &data_len)) { + /* The calibration data node is optional */ + return -ENOENT; + } + + if (data_len != ar->hw_params.cal_data_len) { + ath10k_warn(ar, "invalid calibration data length in DT: %d\n", + data_len); + ret = -EMSGSIZE; + goto out; + } + + data = kmalloc(data_len, GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto out; + } + + ret = of_property_read_u8_array(node, dt_name, data, data_len); + if (ret) { + ath10k_warn(ar, "failed to read calibration data from DT: %d\n", + ret); + goto out_free; + } + + ret = ath10k_download_board_data(ar, data, data_len); + if (ret) { + ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n", + ret); + goto out_free; + } + + ret = 0; + +out_free: + kfree(data); + +out: + return ret; +} + +static int ath10k_download_cal_eeprom(struct ath10k *ar) +{ + size_t data_len; + void *data = NULL; + int ret; + + ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len); + if (ret) { + if (ret != -EOPNOTSUPP) + ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n", + ret); + goto out_free; + } + + ret = ath10k_download_board_data(ar, data, data_len); + if (ret) { + ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n", + ret); + goto out_free; + } + + ret = 0; + +out_free: + kfree(data); + + return ret; +} + int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, struct ath10k_fw_file *fw_file) { @@ -1882,7 +2085,8 @@ static int ath10k_init_hw_params(struct ath10k *ar) for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) { hw_params = &ath10k_hw_params_list[i]; - if (hw_params->id == ar->target_version && + if (hw_params->bus == ar->hif.bus && + hw_params->id == ar->target_version && hw_params->dev_id == ar->dev_id) break; } @@ -1983,6 +2187,7 @@ static void ath10k_core_set_coverage_class_work(struct work_struct *work) static int ath10k_core_init_firmware_features(struct ath10k *ar) { struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file; + int max_num_peers; if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) && !test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) { @@ -2062,7 +2267,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) switch (fw_file->wmi_op_version) { case ATH10K_FW_WMI_OP_VERSION_MAIN: - ar->max_num_peers = TARGET_NUM_PEERS; + max_num_peers = TARGET_NUM_PEERS; ar->max_num_stations = TARGET_NUM_STATIONS; ar->max_num_vdevs = TARGET_NUM_VDEVS; ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC; @@ -2074,10 +2279,10 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) case ATH10K_FW_WMI_OP_VERSION_10_2: case ATH10K_FW_WMI_OP_VERSION_10_2_4: if (ath10k_peer_stats_enabled(ar)) { - ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS; + max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS; ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS; } else { - ar->max_num_peers = TARGET_10X_NUM_PEERS; + max_num_peers = TARGET_10X_NUM_PEERS; ar->max_num_stations = TARGET_10X_NUM_STATIONS; } ar->max_num_vdevs = TARGET_10X_NUM_VDEVS; @@ -2086,7 +2291,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; break; case ATH10K_FW_WMI_OP_VERSION_TLV: - ar->max_num_peers = TARGET_TLV_NUM_PEERS; + max_num_peers = TARGET_TLV_NUM_PEERS; ar->max_num_stations = TARGET_TLV_NUM_STATIONS; ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS; ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS; @@ -2098,7 +2303,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC; break; case ATH10K_FW_WMI_OP_VERSION_10_4: - ar->max_num_peers = TARGET_10_4_NUM_PEERS; + max_num_peers = TARGET_10_4_NUM_PEERS; ar->max_num_stations = TARGET_10_4_NUM_STATIONS; ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS; ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS; @@ -2117,10 +2322,16 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) break; case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: + default: WARN_ON(1); return -EINVAL; } + if (ar->hw_params.num_peers) + ar->max_num_peers = ar->hw_params.num_peers; + else + ar->max_num_peers = max_num_peers; + /* Backwards compatibility for firmwares without * ATH10K_FW_IE_HTT_OP_VERSION. */ @@ -2370,6 +2581,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, ar->wmi.svc_map)) val |= WMI_10_4_TDLS_UAPSD_BUFFER_STA; + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, + ar->wmi.svc_map)) + val |= WMI_10_4_TX_DATA_ACK_RSSI; + status = ath10k_mac_ext_resource_config(ar, val); if (status) { ath10k_err(ar, @@ -2405,7 +2620,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, * possible to implicitly make it correct by creating a dummy vdev and * then deleting it. */ - if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { + if (ar->hw_params.hw_filter_reset_required && + mode == ATH10K_FIRMWARE_MODE_NORMAL) { status = ath10k_core_reset_rx_filter(ar); if (status) { ath10k_err(ar, @@ -2593,7 +2809,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar) if (ret) ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n"); - ret = ath10k_core_fetch_board_file(ar); + ret = ath10k_core_fetch_board_file(ar, ATH10K_BD_IE_BOARD); if (ret) { ath10k_err(ar, "failed to fetch board file: %d\n", ret); goto err_free_firmware_files; @@ -2602,6 +2818,8 @@ static int ath10k_core_probe_fw(struct ath10k *ar) ath10k_debug_print_board_info(ar); } + device_get_mac_address(ar->dev, ar->mac_addr, sizeof(ar->mac_addr)); + ret = ath10k_core_init_firmware_features(ar); if (ret) { ath10k_err(ar, "fatal problem with firmware features: %d\n", @@ -2714,9 +2932,11 @@ err: return; } -int ath10k_core_register(struct ath10k *ar, u32 chip_id) +int ath10k_core_register(struct ath10k *ar, + const struct ath10k_bus_params *bus_params) { - ar->chip_id = chip_id; + ar->chip_id = bus_params->chip_id; + ar->dev_type = bus_params->dev_type; queue_work(ar->workqueue, &ar->register_work); return 0; diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 9feea02e7d37..c76af343db3d 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -92,14 +92,6 @@ struct ath10k; -enum ath10k_bus { - ATH10K_BUS_PCI, - ATH10K_BUS_AHB, - ATH10K_BUS_SDIO, - ATH10K_BUS_USB, - ATH10K_BUS_SNOC, -}; - static inline const char *ath10k_bus_str(enum ath10k_bus bus) { switch (bus) { @@ -461,6 +453,36 @@ struct ath10k_sta_tid_stats { unsigned long int rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MAX]; }; +enum ath10k_counter_type { + ATH10K_COUNTER_TYPE_BYTES, + ATH10K_COUNTER_TYPE_PKTS, + ATH10K_COUNTER_TYPE_MAX, +}; + +enum ath10k_stats_type { + ATH10K_STATS_TYPE_SUCC, + ATH10K_STATS_TYPE_FAIL, + ATH10K_STATS_TYPE_RETRY, + ATH10K_STATS_TYPE_AMPDU, + ATH10K_STATS_TYPE_MAX, +}; + +struct ath10k_htt_data_stats { + u64 legacy[ATH10K_COUNTER_TYPE_MAX][ATH10K_LEGACY_NUM]; + u64 ht[ATH10K_COUNTER_TYPE_MAX][ATH10K_HT_MCS_NUM]; + u64 vht[ATH10K_COUNTER_TYPE_MAX][ATH10K_VHT_MCS_NUM]; + u64 bw[ATH10K_COUNTER_TYPE_MAX][ATH10K_BW_NUM]; + u64 nss[ATH10K_COUNTER_TYPE_MAX][ATH10K_NSS_NUM]; + u64 gi[ATH10K_COUNTER_TYPE_MAX][ATH10K_GI_NUM]; +}; + +struct ath10k_htt_tx_stats { + struct ath10k_htt_data_stats stats[ATH10K_STATS_TYPE_MAX]; + u64 tx_duration; + u64 ba_fails; + u64 ack_fails; +}; + struct ath10k_sta { struct ath10k_vif *arvif; @@ -474,6 +496,7 @@ struct ath10k_sta { struct work_struct update_wk; u64 rx_duration; + struct ath10k_htt_tx_stats *tx_stats; #ifdef CONFIG_MAC80211_DEBUGFS /* protected by conf_mutex */ @@ -482,6 +505,8 @@ struct ath10k_sta { /* Protected with ar->data_lock */ struct ath10k_sta_tid_stats tid_stats[IEEE80211_NUM_TIDS + 1]; #endif + /* Protected with ar->data_lock */ + u32 peer_ps_state; }; #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ) @@ -607,6 +632,7 @@ struct ath10k_debug { u32 reg_addr; u32 nf_cal_period; void *cal_data; + u32 enable_extd_tx_stats; }; enum ath10k_state { @@ -861,6 +887,9 @@ struct ath10k_fw_components { const struct firmware *board; const void *board_data; size_t board_len; + const struct firmware *ext_board; + const void *ext_board_data; + size_t ext_board_len; struct ath10k_fw_file fw_file; }; @@ -880,6 +909,16 @@ struct ath10k_per_peer_tx_stats { u32 reserved2; }; +enum ath10k_dev_type { + ATH10K_DEV_TYPE_LL, + ATH10K_DEV_TYPE_HL, +}; + +struct ath10k_bus_params { + u32 chip_id; + enum ath10k_dev_type dev_type; +}; + struct ath10k { struct ath_common ath_common; struct ieee80211_hw *hw; @@ -890,6 +929,7 @@ struct ath10k { enum ath10k_hw_rev hw_rev; u16 dev_id; u32 chip_id; + enum ath10k_dev_type dev_type; u32 target_version; u8 fw_version_major; u32 fw_version_minor; @@ -908,6 +948,8 @@ struct ath10k { u32 low_5ghz_chan; u32 high_5ghz_chan; bool ani_enabled; + /* protected by conf_mutex */ + u8 ps_state_enable; bool p2p; @@ -947,7 +989,9 @@ struct ath10k { bool bmi_ids_valid; u8 bmi_board_id; + u8 bmi_eboard_id; u8 bmi_chip_id; + bool ext_bid_supported; char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH]; } id; @@ -1003,6 +1047,7 @@ struct ath10k { struct completion install_key_done; + int last_wmi_vdev_start_status; struct completion vdev_setup_done; struct workqueue_struct *workqueue; @@ -1167,7 +1212,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, const struct ath10k_fw_components *fw_components); int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt); void ath10k_core_stop(struct ath10k *ar); -int ath10k_core_register(struct ath10k *ar, u32 chip_id); +int ath10k_core_register(struct ath10k *ar, + const struct ath10k_bus_params *bus_params); void ath10k_core_unregister(struct ath10k *ar); #endif /* _CORE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 0baaad90b8d1..2c0cb6757fc6 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -2042,6 +2042,61 @@ static const struct file_operations fops_btcoex = { .open = simple_open }; +static ssize_t ath10k_write_enable_extd_tx_stats(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u32 filter; + int ret; + + if (kstrtouint_from_user(ubuf, count, 0, &filter)) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ar->debug.enable_extd_tx_stats = filter; + ret = count; + goto out; + } + + if (filter == ar->debug.enable_extd_tx_stats) { + ret = count; + goto out; + } + + ar->debug.enable_extd_tx_stats = filter; + ret = count; + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static ssize_t ath10k_read_enable_extd_tx_stats(struct file *file, + char __user *ubuf, + size_t count, loff_t *ppos) + +{ + char buf[32]; + struct ath10k *ar = file->private_data; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%08x\n", + ar->debug.enable_extd_tx_stats); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_enable_extd_tx_stats = { + .read = ath10k_read_enable_extd_tx_stats, + .write = ath10k_write_enable_extd_tx_stats, + .open = simple_open +}; + static ssize_t ath10k_write_peer_stats(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) @@ -2343,6 +2398,85 @@ static const struct file_operations fops_warm_hw_reset = { .llseek = default_llseek, }; +static void ath10k_peer_ps_state_disable(void *data, + struct ieee80211_sta *sta) +{ + struct ath10k *ar = data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + + spin_lock_bh(&ar->data_lock); + arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; + spin_unlock_bh(&ar->data_lock); +} + +static ssize_t ath10k_write_ps_state_enable(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + int ret; + u32 param; + u8 ps_state_enable; + + if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable)) + return -EINVAL; + + if (ps_state_enable > 1 || ps_state_enable < 0) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->ps_state_enable == ps_state_enable) { + ret = count; + goto exit; + } + + param = ar->wmi.pdev_param->peer_sta_ps_statechg_enable; + ret = ath10k_wmi_pdev_set_param(ar, param, ps_state_enable); + if (ret) { + ath10k_warn(ar, "failed to enable ps_state_enable: %d\n", + ret); + goto exit; + } + ar->ps_state_enable = ps_state_enable; + + if (!ar->ps_state_enable) + ieee80211_iterate_stations_atomic(ar->hw, + ath10k_peer_ps_state_disable, + ar); + + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static ssize_t ath10k_read_ps_state_enable(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + int len = 0; + char buf[32]; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%d\n", + ar->ps_state_enable); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ps_state_enable = { + .read = ath10k_read_ps_state_enable, + .write = ath10k_write_ps_state_enable, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + int ath10k_debug_create(struct ath10k *ar) { ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN); @@ -2454,10 +2588,15 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("btcoex", 0644, ar->debug.debugfs_phy, ar, &fops_btcoex); - if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) + if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { debugfs_create_file("peer_stats", 0644, ar->debug.debugfs_phy, ar, &fops_peer_stats); + debugfs_create_file("enable_extd_tx_stats", 0644, + ar->debug.debugfs_phy, ar, + &fops_enable_extd_tx_stats); + } + debugfs_create_file("fw_checksums", 0400, ar->debug.debugfs_phy, ar, &fops_fw_checksums); @@ -2474,6 +2613,9 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("warm_hw_reset", 0600, ar->debug.debugfs_phy, ar, &fops_warm_hw_reset); + debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_phy, ar, + &fops_ps_state_enable); + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 0afca5c106b6..3a6191cff2f9 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -128,6 +128,10 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar) return ar->debug.fw_dbglog_level; } +static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar) +{ + return ar->debug.enable_extd_tx_stats; +} #else static inline int ath10k_debug_start(struct ath10k *ar) @@ -190,6 +194,11 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar) return 0; } +static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar) +{ + return 0; +} + #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0) #define ath10k_debug_get_et_strings NULL diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index a63c97e2c50c..b09cdc699c69 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -460,6 +460,33 @@ static const struct file_operations fops_peer_debug_trigger = { .llseek = default_llseek, }; +static ssize_t ath10k_dbg_sta_read_peer_ps_state(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + char buf[20]; + int len = 0; + + spin_lock_bh(&ar->data_lock); + + len = scnprintf(buf, sizeof(buf) - len, "%d\n", + arsta->peer_ps_state); + + spin_unlock_bh(&ar->data_lock); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_peer_ps_state = { + .open = simple_open, + .read = ath10k_dbg_sta_read_peer_ps_state, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + static char *get_err_str(enum ath10k_pkt_rx_err i) { switch (i) { @@ -626,9 +653,105 @@ static const struct file_operations fops_tid_stats_dump = { .llseek = default_llseek, }; +static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + struct ath10k_htt_data_stats *stats; + const char *str_name[ATH10K_STATS_TYPE_MAX] = {"succ", "fail", + "retry", "ampdu"}; + const char *str[ATH10K_COUNTER_TYPE_MAX] = {"bytes", "packets"}; + int len = 0, i, j, k, retval = 0; + const int size = 2 * 4096; + char *buf; + + buf = kzalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + for (k = 0; k < ATH10K_STATS_TYPE_MAX; k++) { + for (j = 0; j < ATH10K_COUNTER_TYPE_MAX; j++) { + stats = &arsta->tx_stats->stats[k]; + len += scnprintf(buf + len, size - len, "%s_%s\n", + str_name[k], + str[j]); + len += scnprintf(buf + len, size - len, + " VHT MCS %s\n", + str[j]); + for (i = 0; i < ATH10K_VHT_MCS_NUM; i++) + len += scnprintf(buf + len, size - len, + " %llu ", + stats->vht[j][i]); + len += scnprintf(buf + len, size - len, "\n"); + len += scnprintf(buf + len, size - len, " HT MCS %s\n", + str[j]); + for (i = 0; i < ATH10K_HT_MCS_NUM; i++) + len += scnprintf(buf + len, size - len, + " %llu ", stats->ht[j][i]); + len += scnprintf(buf + len, size - len, "\n"); + len += scnprintf(buf + len, size - len, + " BW %s (20,40,80,160 MHz)\n", str[j]); + len += scnprintf(buf + len, size - len, + " %llu %llu %llu %llu\n", + stats->bw[j][0], stats->bw[j][1], + stats->bw[j][2], stats->bw[j][3]); + len += scnprintf(buf + len, size - len, + " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]); + len += scnprintf(buf + len, size - len, + " %llu %llu %llu %llu\n", + stats->nss[j][0], stats->nss[j][1], + stats->nss[j][2], stats->nss[j][3]); + len += scnprintf(buf + len, size - len, + " GI %s (LGI,SGI)\n", + str[j]); + len += scnprintf(buf + len, size - len, " %llu %llu\n", + stats->gi[j][0], stats->gi[j][1]); + len += scnprintf(buf + len, size - len, + " legacy rate %s (1,2 ... Mbps)\n ", + str[j]); + for (i = 0; i < ATH10K_LEGACY_NUM; i++) + len += scnprintf(buf + len, size - len, "%llu ", + stats->legacy[j][i]); + len += scnprintf(buf + len, size - len, "\n"); + } + } + + len += scnprintf(buf + len, size - len, + "\nTX duration\n %llu usecs\n", + arsta->tx_stats->tx_duration); + len += scnprintf(buf + len, size - len, + "BA fails\n %llu\n", arsta->tx_stats->ba_fails); + len += scnprintf(buf + len, size - len, + "ack fails\n %llu\n", arsta->tx_stats->ack_fails); + spin_unlock_bh(&ar->data_lock); + + if (len > size) + len = size; + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + mutex_unlock(&ar->conf_mutex); + return retval; +} + +static const struct file_operations fops_tx_stats = { + .read = ath10k_dbg_sta_dump_tx_stats, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir) { + struct ath10k *ar = hw->priv; + debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode); debugfs_create_file("addba", 0200, dir, sta, &fops_addba); debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp); @@ -637,4 +760,11 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, &fops_peer_debug_trigger); debugfs_create_file("dump_tid_stats", 0400, dir, sta, &fops_tid_stats_dump); + + if (ath10k_peer_stats_enabled(ar) && + ath10k_debug_is_extd_tx_stats_enabled(ar)) + debugfs_create_file("tx_stats", 0400, dir, sta, + &fops_tx_stats); + debugfs_create_file("peer_ps_state", 0400, dir, sta, + &fops_peer_ps_state); } diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 331b8d558791..28daed5981a1 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -53,7 +53,8 @@ static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc, { struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); - dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); + if (htc->ar->dev_type != ATH10K_DEV_TYPE_HL) + dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); skb_pull(skb, sizeof(struct ath10k_htc_hdr)); } @@ -137,11 +138,14 @@ int ath10k_htc_send(struct ath10k_htc *htc, ath10k_htc_prepare_tx_skb(ep, skb); skb_cb->eid = eid; - skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); - ret = dma_mapping_error(dev, skb_cb->paddr); - if (ret) { - ret = -EIO; - goto err_credits; + if (ar->dev_type != ATH10K_DEV_TYPE_HL) { + skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, + DMA_TO_DEVICE); + ret = dma_mapping_error(dev, skb_cb->paddr); + if (ret) { + ret = -EIO; + goto err_credits; + } } sg_item.transfer_id = ep->eid; @@ -157,7 +161,8 @@ int ath10k_htc_send(struct ath10k_htc *htc, return 0; err_unmap: - dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); + if (ar->dev_type != ATH10K_DEV_TYPE_HL) + dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); err_credits: if (ep->tx_credit_flow_enabled) { spin_lock_bh(&htc->tx_lock); @@ -803,8 +808,11 @@ setup: ep->service_id, &ep->ul_pipe_id, &ep->dl_pipe_id); - if (status) + if (status) { + ath10k_warn(ar, "unsupported HTC service id: %d\n", + ep->service_id); return status; + } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n", @@ -838,6 +846,56 @@ struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size) return skb; } +static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb) +{ + trace_ath10k_htt_pktlog(ar, skb->data, skb->len); + dev_kfree_skb_any(skb); +} + +static int ath10k_htc_pktlog_connect(struct ath10k *ar) +{ + struct ath10k_htc_svc_conn_resp conn_resp; + struct ath10k_htc_svc_conn_req conn_req; + int status; + + memset(&conn_req, 0, sizeof(conn_req)); + memset(&conn_resp, 0, sizeof(conn_resp)); + + conn_req.ep_ops.ep_tx_complete = NULL; + conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx; + conn_req.ep_ops.ep_tx_credits = NULL; + + /* connect to control service */ + conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG; + status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp); + if (status) { + ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n", + status); + return status; + } + + return 0; +} + +static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar) +{ + u8 ul_pipe_id; + u8 dl_pipe_id; + int status; + + status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG, + &ul_pipe_id, + &dl_pipe_id); + if (status) { + ath10k_warn(ar, "unsupported HTC service id: %d\n", + ATH10K_HTC_SVC_ID_HTT_LOG_MSG); + + return false; + } + + return true; +} + int ath10k_htc_start(struct ath10k_htc *htc) { struct ath10k *ar = htc->ar; @@ -871,6 +929,14 @@ int ath10k_htc_start(struct ath10k_htc *htc) return status; } + if (ath10k_htc_pktlog_svc_supported(ar)) { + status = ath10k_htc_pktlog_connect(ar); + if (status) { + ath10k_err(ar, "failed to connect to pktlog: %d\n", status); + return status; + } + } + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 5d3ff80f3a1f..a76f7c9e2199 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -29,7 +29,6 @@ #include "htc.h" #include "hw.h" #include "rx_desc.h" -#include "hw.h" enum htt_dbg_stats_type { HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0, @@ -577,6 +576,8 @@ struct htt_mgmt_tx_completion { #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000 #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24 +#define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0) + struct htt_rx_indication_hdr { u8 info0; /* %HTT_RX_INDICATION_INFO0_ */ __le16 peer_id; @@ -719,6 +720,15 @@ struct htt_rx_indication { struct htt_rx_indication_mpdu_range mpdu_ranges[0]; } __packed; +/* High latency version of the RX indication */ +struct htt_rx_indication_hl { + struct htt_rx_indication_hdr hdr; + struct htt_rx_indication_ppdu ppdu; + struct htt_rx_indication_prefix prefix; + struct fw_rx_desc_hl fw_desc; + struct htt_rx_indication_mpdu_range mpdu_ranges[0]; +} __packed; + static inline struct htt_rx_indication_mpdu_range * htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind) { @@ -731,6 +741,18 @@ static inline struct htt_rx_indication_mpdu_range * return ptr; } +static inline struct htt_rx_indication_mpdu_range * + htt_rx_ind_get_mpdu_ranges_hl(struct htt_rx_indication_hl *rx_ind) +{ + void *ptr = rx_ind; + + ptr += sizeof(rx_ind->hdr) + + sizeof(rx_ind->ppdu) + + sizeof(rx_ind->prefix) + + sizeof(rx_ind->fw_desc); + return ptr; +} + enum htt_rx_flush_mpdu_status { HTT_RX_FLUSH_MPDU_DISCARD = 0, HTT_RX_FLUSH_MPDU_REORDER = 1, @@ -840,7 +862,7 @@ struct htt_data_tx_completion { } __packed; } __packed; u8 num_msdus; - u8 rsvd0; + u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */ __le16 msdus[0]; /* variable length based on %num_msdus */ } __packed; @@ -1641,6 +1663,7 @@ struct htt_resp { struct htt_mgmt_tx_completion mgmt_tx_completion; struct htt_data_tx_completion data_tx_completion; struct htt_rx_indication rx_ind; + struct htt_rx_indication_hl rx_ind_hl; struct htt_rx_fragment_indication rx_frag_ind; struct htt_rx_peer_map peer_map; struct htt_rx_peer_unmap peer_unmap; @@ -1994,6 +2017,31 @@ struct htt_rx_desc { u8 msdu_payload[0]; }; +#define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff +#define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0 +#define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000 +#define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB 12 +#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000 +#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB 13 +#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK 0x00008000 +#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB 15 +#define HTT_RX_DESC_HL_INFO_FRAGMENT_MASK 0x00010000 +#define HTT_RX_DESC_HL_INFO_FRAGMENT_LSB 16 +#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK 0x01fe0000 +#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB 17 + +struct htt_rx_desc_base_hl { + __le32 info; /* HTT_RX_DESC_HL_INFO_ */ +}; + +struct htt_rx_chan_info { + __le16 primary_chan_center_freq_mhz; + __le16 contig_chan1_center_freq_mhz; + __le16 contig_chan2_center_freq_mhz; + u8 phy_mode; + u8 reserved; +} __packed; + #define HTT_RX_DESC_ALIGN 8 #define HTT_MAC_ADDR_LEN 6 diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 4d1cd90d6d27..f2405258a6d3 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -265,6 +265,9 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar) struct ath10k_htt *htt = &ar->htt; int ret; + if (ar->dev_type == ATH10K_DEV_TYPE_HL) + return 0; + spin_lock_bh(&htt->rx_ring.lock); ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - htt->rx_ring.fill_cnt)); @@ -279,6 +282,9 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar) void ath10k_htt_rx_free(struct ath10k_htt *htt) { + if (htt->ar->dev_type == ATH10K_DEV_TYPE_HL) + return; + del_timer_sync(&htt->rx_ring.refill_retry_timer); skb_queue_purge(&htt->rx_msdus_q); @@ -570,6 +576,9 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) size_t size; struct timer_list *timer = &htt->rx_ring.refill_retry_timer; + if (ar->dev_type == ATH10K_DEV_TYPE_HL) + return 0; + htt->rx_confused = false; /* XXX: The fill level could be changed during runtime in response to @@ -1176,11 +1185,11 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, */ /* This probably shouldn't happen but warn just in case */ - if (unlikely(WARN_ON_ONCE(!is_first))) + if (WARN_ON_ONCE(!is_first)) return; /* This probably shouldn't happen but warn just in case */ - if (unlikely(WARN_ON_ONCE(!(is_first && is_last)))) + if (WARN_ON_ONCE(!(is_first && is_last))) return; skb_trim(msdu, msdu->len - FCS_LEN); @@ -1846,8 +1855,116 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) return 0; } -static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt, - struct htt_rx_indication *rx) +static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, + struct htt_rx_indication_hl *rx, + struct sk_buff *skb) +{ + struct ath10k *ar = htt->ar; + struct ath10k_peer *peer; + struct htt_rx_indication_mpdu_range *mpdu_ranges; + struct fw_rx_desc_hl *fw_desc; + struct ieee80211_hdr *hdr; + struct ieee80211_rx_status *rx_status; + u16 peer_id; + u8 rx_desc_len; + int num_mpdu_ranges; + size_t tot_hdr_len; + struct ieee80211_channel *ch; + + peer_id = __le16_to_cpu(rx->hdr.peer_id); + + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find_by_id(ar, peer_id); + spin_unlock_bh(&ar->data_lock); + if (!peer) + ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id); + + num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), + HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); + mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx); + fw_desc = &rx->fw_desc; + rx_desc_len = fw_desc->len; + + /* I have not yet seen any case where num_mpdu_ranges > 1. + * qcacld does not seem handle that case either, so we introduce the + * same limitiation here as well. + */ + if (num_mpdu_ranges > 1) + ath10k_warn(ar, + "Unsupported number of MPDU ranges: %d, ignoring all but the first\n", + num_mpdu_ranges); + + if (mpdu_ranges->mpdu_range_status != + HTT_RX_IND_MPDU_STATUS_OK) { + ath10k_warn(ar, "MPDU range status: %d\n", + mpdu_ranges->mpdu_range_status); + goto err; + } + + /* Strip off all headers before the MAC header before delivery to + * mac80211 + */ + tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) + + sizeof(rx->ppdu) + sizeof(rx->prefix) + + sizeof(rx->fw_desc) + + sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len; + skb_pull(skb, tot_hdr_len); + + hdr = (struct ieee80211_hdr *)skb->data; + rx_status = IEEE80211_SKB_RXCB(skb); + rx_status->chains |= BIT(0); + rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + + rx->ppdu.combined_rssi; + rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; + + spin_lock_bh(&ar->data_lock); + ch = ar->scan_channel; + if (!ch) + ch = ar->rx_channel; + if (!ch) + ch = ath10k_htt_rx_h_any_channel(ar); + if (!ch) + ch = ar->tgt_oper_chan; + spin_unlock_bh(&ar->data_lock); + + if (ch) { + rx_status->band = ch->band; + rx_status->freq = ch->center_freq; + } + if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU) + rx_status->flag &= ~RX_FLAG_AMSDU_MORE; + else + rx_status->flag |= RX_FLAG_AMSDU_MORE; + + /* Not entirely sure about this, but all frames from the chipset has + * the protected flag set even though they have already been decrypted. + * Unmasking this flag is necessary in order for mac80211 not to drop + * the frame. + * TODO: Verify this is always the case or find out a way to check + * if there has been hw decryption. + */ + if (ieee80211_has_protected(hdr->frame_control)) { + hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); + rx_status->flag |= RX_FLAG_DECRYPTED | + RX_FLAG_IV_STRIPPED | + RX_FLAG_MMIC_STRIPPED; + } + + ieee80211_rx_ni(ar->hw, skb); + + /* We have delivered the skb to the upper layers (mac80211) so we + * must not free it. + */ + return false; +err: + /* Tell the caller that it must free the skb since we have not + * consumed it + */ + return true; +} + +static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt, + struct htt_rx_indication *rx) { struct ath10k *ar = htt->ar; struct htt_rx_indication_mpdu_range *mpdu_ranges; @@ -1884,7 +2001,9 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, struct htt_resp *resp = (struct htt_resp *)skb->data; struct htt_tx_done tx_done = {}; int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); - __le16 msdu_id; + __le16 msdu_id, *msdus; + bool rssi_enabled = false; + u8 msdu_count = 0; int i; switch (status) { @@ -1908,10 +2027,30 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", resp->data_tx_completion.num_msdus); - for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { - msdu_id = resp->data_tx_completion.msdus[i]; + msdu_count = resp->data_tx_completion.num_msdus; + + if (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI) + rssi_enabled = true; + + for (i = 0; i < msdu_count; i++) { + msdus = resp->data_tx_completion.msdus; + msdu_id = msdus[i]; tx_done.msdu_id = __le16_to_cpu(msdu_id); + if (rssi_enabled) { + /* Total no of MSDUs should be even, + * if odd MSDUs are sent firmware fills + * last msdu id with 0xffff + */ + if (msdu_count & 0x01) { + msdu_id = msdus[msdu_count + i + 1]; + tx_done.ack_rssi = __le16_to_cpu(msdu_id); + } else { + msdu_id = msdus[msdu_count + i]; + tx_done.ack_rssi = __le16_to_cpu(msdu_id); + } + } + /* kfifo_put: In practice firmware shouldn't fire off per-CE * interrupt and main interrupt (MSI/-X range case) for the same * HTC service so it should be safe to use kfifo_put w/o lock. @@ -2488,7 +2627,7 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) dev_kfree_skb_any(skb); } -static inline bool is_valid_legacy_rate(u8 rate) +static inline int ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate) { static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12, 18, 24, 36, 48, 54}; @@ -2496,10 +2635,116 @@ static inline bool is_valid_legacy_rate(u8 rate) for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) { if (rate == legacy_rates[i]) - return true; + return i; } - return false; + ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate); + return -EINVAL; +} + +static void +ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, + struct ath10k_sta *arsta, + struct ath10k_per_peer_tx_stats *pstats, + u8 legacy_rate_idx) +{ + struct rate_info *txrate = &arsta->txrate; + struct ath10k_htt_tx_stats *tx_stats; + int ht_idx, gi, mcs, bw, nss; + + if (!arsta->tx_stats) + return; + + tx_stats = arsta->tx_stats; + gi = (arsta->txrate.flags & RATE_INFO_FLAGS_SHORT_GI); + ht_idx = txrate->mcs + txrate->nss * 8; + mcs = txrate->mcs; + bw = txrate->bw; + nss = txrate->nss; + +#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name] + + if (txrate->flags == RATE_INFO_FLAGS_VHT_MCS) { + STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes; + STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts; + STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes; + STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts; + STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes; + STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts; + } else if (txrate->flags == RATE_INFO_FLAGS_MCS) { + STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes; + STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts; + STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes; + STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts; + STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes; + STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts; + } else { + mcs = legacy_rate_idx; + if (mcs < 0) + return; + + STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes; + STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts; + STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes; + STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts; + STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes; + STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts; + } + + if (ATH10K_HW_AMPDU(pstats->flags)) { + tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags); + + if (txrate->flags == RATE_INFO_FLAGS_MCS) { + STATS_OP_FMT(AMPDU).ht[0][ht_idx] += + pstats->succ_bytes + pstats->retry_bytes; + STATS_OP_FMT(AMPDU).ht[1][ht_idx] += + pstats->succ_pkts + pstats->retry_pkts; + } else { + STATS_OP_FMT(AMPDU).vht[0][mcs] += + pstats->succ_bytes + pstats->retry_bytes; + STATS_OP_FMT(AMPDU).vht[1][mcs] += + pstats->succ_pkts + pstats->retry_pkts; + } + STATS_OP_FMT(AMPDU).bw[0][bw] += + pstats->succ_bytes + pstats->retry_bytes; + STATS_OP_FMT(AMPDU).nss[0][nss] += + pstats->succ_bytes + pstats->retry_bytes; + STATS_OP_FMT(AMPDU).gi[0][gi] += + pstats->succ_bytes + pstats->retry_bytes; + STATS_OP_FMT(AMPDU).bw[1][bw] += + pstats->succ_pkts + pstats->retry_pkts; + STATS_OP_FMT(AMPDU).nss[1][nss] += + pstats->succ_pkts + pstats->retry_pkts; + STATS_OP_FMT(AMPDU).gi[1][gi] += + pstats->succ_pkts + pstats->retry_pkts; + } else { + tx_stats->ack_fails += + ATH10K_HW_BA_FAIL(pstats->flags); + } + + STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes; + STATS_OP_FMT(SUCC).nss[0][nss] += pstats->succ_bytes; + STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes; + + STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts; + STATS_OP_FMT(SUCC).nss[1][nss] += pstats->succ_pkts; + STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts; + + STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes; + STATS_OP_FMT(FAIL).nss[0][nss] += pstats->failed_bytes; + STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes; + + STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts; + STATS_OP_FMT(FAIL).nss[1][nss] += pstats->failed_pkts; + STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts; + + STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes; + STATS_OP_FMT(RETRY).nss[0][nss] += pstats->retry_bytes; + STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes; + + STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts; + STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts; + STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts; } static void @@ -2508,7 +2753,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar, struct ath10k_per_peer_tx_stats *peer_stats) { struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; - u8 rate = 0, sgi; + u8 rate = 0, rate_idx = 0, sgi; struct rate_info txrate; lockdep_assert_held(&ar->data_lock); @@ -2536,17 +2781,12 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar, if (txrate.flags == WMI_RATE_PREAMBLE_CCK || txrate.flags == WMI_RATE_PREAMBLE_OFDM) { rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode); - - if (!is_valid_legacy_rate(rate)) { - ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", - rate); - return; - } - /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */ - rate *= 10; - if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK) - rate = rate - 5; + if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK) + rate = 5; + rate_idx = ath10k_get_legacy_rate_idx(ar, rate); + if (rate_idx < 0) + return; arsta->txrate.legacy = rate; } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) { arsta->txrate.flags = RATE_INFO_FLAGS_MCS; @@ -2561,6 +2801,10 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar, arsta->txrate.nss = txrate.nss; arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw); + + if (ath10k_debug_is_extd_tx_stats_enabled(ar)) + ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats, + rate_idx); } static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, @@ -2702,7 +2946,12 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } case HTT_T2H_MSG_TYPE_RX_IND: - ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind); + if (ar->dev_type == ATH10K_DEV_TYPE_HL) + return ath10k_htt_rx_proc_rx_ind_hl(htt, + &resp->rx_ind_hl, + skb); + else + ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind); break; case HTT_T2H_MSG_TYPE_PEER_MAP: { struct htt_peer_map_event ev = { @@ -2986,11 +3235,16 @@ static const struct ath10k_htt_rx_ops htt_rx_ops_64 = { .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64, }; +static const struct ath10k_htt_rx_ops htt_rx_ops_hl = { +}; + void ath10k_htt_set_rx_ops(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; - if (ar->hw_params.target_64bit) + if (ar->dev_type == ATH10K_DEV_TYPE_HL) + htt->rx_ops = &htt_rx_ops_hl; + else if (ar->hw_params.target_64bit) htt->rx_ops = &htt_rx_ops_64; else htt->rx_ops = &htt_rx_ops_32; diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 7cff0d52338f..ad05ab714c9b 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -495,6 +495,9 @@ int ath10k_htt_tx_start(struct ath10k_htt *htt) if (htt->tx_mem_allocated) return 0; + if (ar->dev_type == ATH10K_DEV_TYPE_HL) + return 0; + ret = ath10k_htt_tx_alloc_buf(htt); if (ret) goto free_idr_pending_tx; @@ -934,6 +937,57 @@ static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt) return 0; } +static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + struct sk_buff *skb; + struct htt_cmd *cmd; + struct htt_rx_ring_setup_ring32 *ring; + const int num_rx_ring = 1; + u16 flags; + int len; + int ret; + + /* + * the HW expects the buffer to be an integral number of 4-byte + * "words" + */ + BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); + BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); + + len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr) + + (sizeof(*ring) * num_rx_ring); + skb = ath10k_htc_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + + cmd = (struct htt_cmd *)skb->data; + ring = &cmd->rx_setup_32.rings[0]; + + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; + cmd->rx_setup_32.hdr.num_rings = 1; + + flags = 0; + flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; + flags |= HTT_RX_RING_FLAGS_UNICAST_RX; + flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; + + memset(ring, 0, sizeof(*ring)); + ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN); + ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); + ring->flags = __cpu_to_le16(flags); + + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu) @@ -1123,7 +1177,8 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) return 0; err_unmap_msdu: - dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); + if (ar->dev_type != ATH10K_DEV_TYPE_HL) + dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: @@ -1134,6 +1189,94 @@ err: return res; } +#define HTT_TX_HL_NEEDED_HEADROOM \ + (unsigned int)(sizeof(struct htt_cmd_hdr) + \ + sizeof(struct htt_data_tx_desc) + \ + sizeof(struct ath10k_htc_hdr)) + +static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, + struct sk_buff *msdu) +{ + struct ath10k *ar = htt->ar; + int res, data_len; + struct htt_cmd_hdr *cmd_hdr; + struct htt_data_tx_desc *tx_desc; + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); + struct sk_buff *tmp_skb; + bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); + u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); + u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); + u8 flags0 = 0; + u16 flags1 = 0; + + data_len = msdu->len; + + switch (txmode) { + case ATH10K_HW_TXRX_RAW: + case ATH10K_HW_TXRX_NATIVE_WIFI: + flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; + /* fall through */ + case ATH10K_HW_TXRX_ETHERNET: + flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + break; + case ATH10K_HW_TXRX_MGMT: + flags0 |= SM(ATH10K_HW_TXRX_MGMT, + HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; + break; + } + + if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) + flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; + + flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); + flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); + if (msdu->ip_summed == CHECKSUM_PARTIAL && + !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; + flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; + } + + /* Prepend the HTT header and TX desc struct to the data message + * and realloc the skb if it does not have enough headroom. + */ + if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) { + tmp_skb = msdu; + + ath10k_dbg(htt->ar, ATH10K_DBG_HTT, + "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n", + skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM); + msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM); + kfree_skb(tmp_skb); + if (!msdu) { + ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n"); + res = -ENOMEM; + goto out; + } + } + + skb_push(msdu, sizeof(*cmd_hdr)); + skb_push(msdu, sizeof(*tx_desc)); + cmd_hdr = (struct htt_cmd_hdr *)msdu->data; + tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr)); + + cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM; + tx_desc->flags0 = flags0; + tx_desc->flags1 = __cpu_to_le16(flags1); + tx_desc->len = __cpu_to_le16(data_len); + tx_desc->id = 0; + tx_desc->frags_paddr = 0; /* always zero */ + /* Initialize peer_id to INVALID_PEER because this is NOT + * Reinjection path + */ + tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID); + + res = ath10k_htc_send(&htt->ar->htc, htt->eid, msdu); + +out: + return res; +} + static int ath10k_htt_tx_32(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, struct sk_buff *msdu) @@ -1561,11 +1704,19 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_64 = { .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64, }; +static const struct ath10k_htt_tx_ops htt_tx_ops_hl = { + .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl, + .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, + .htt_tx = ath10k_htt_tx_hl, +}; + void ath10k_htt_set_tx_ops(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; - if (ar->hw_params.target_64bit) + if (ar->dev_type == ATH10K_DEV_TYPE_HL) + htt->tx_ops = &htt_tx_ops_hl; + else if (ar->hw_params.target_64bit) htt->tx_ops = &htt_tx_ops_64; else htt->tx_ops = &htt_tx_ops_32; diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index 677535b3d207..af8ae8117c62 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -16,6 +16,7 @@ #include <linux/types.h> #include <linux/bitops.h> +#include <linux/bitfield.h> #include "core.h" #include "hw.h" #include "hif.h" @@ -918,6 +919,196 @@ static int ath10k_hw_qca6174_enable_pll_clock(struct ath10k *ar) return 0; } +/* Program CPU_ADDR_MSB to allow different memory + * region access. + */ +static void ath10k_hw_map_target_mem(struct ath10k *ar, u32 msb) +{ + u32 address = SOC_CORE_BASE_ADDRESS + FW_RAM_CONFIG_ADDRESS; + + ath10k_hif_write32(ar, address, msb); +} + +/* 1. Write to memory region of target, such as IRAM adn DRAM. + * 2. Target address( 0 ~ 00100000 & 0x00400000~0x00500000) + * can be written directly. See ath10k_pci_targ_cpu_to_ce_addr() too. + * 3. In order to access the region other than the above, + * we need to set the value of register CPU_ADDR_MSB. + * 4. Target memory access space is limited to 1M size. If the size is larger + * than 1M, need to split it and program CPU_ADDR_MSB accordingly. + */ +static int ath10k_hw_diag_segment_msb_download(struct ath10k *ar, + const void *buffer, + u32 address, + u32 length) +{ + u32 addr = address & REGION_ACCESS_SIZE_MASK; + int ret, remain_size, size; + const u8 *buf; + + ath10k_hw_map_target_mem(ar, CPU_ADDR_MSB_REGION_VAL(address)); + + if (addr + length > REGION_ACCESS_SIZE_LIMIT) { + size = REGION_ACCESS_SIZE_LIMIT - addr; + remain_size = length - size; + + ret = ath10k_hif_diag_write(ar, address, buffer, size); + if (ret) { + ath10k_warn(ar, + "failed to download the first %d bytes segment to address:0x%x: %d\n", + size, address, ret); + goto done; + } + + /* Change msb to the next memory region*/ + ath10k_hw_map_target_mem(ar, + CPU_ADDR_MSB_REGION_VAL(address) + 1); + buf = buffer + size; + ret = ath10k_hif_diag_write(ar, + address & ~REGION_ACCESS_SIZE_MASK, + buf, remain_size); + if (ret) { + ath10k_warn(ar, + "failed to download the second %d bytes segment to address:0x%x: %d\n", + remain_size, + address & ~REGION_ACCESS_SIZE_MASK, + ret); + goto done; + } + } else { + ret = ath10k_hif_diag_write(ar, address, buffer, length); + if (ret) { + ath10k_warn(ar, + "failed to download the only %d bytes segment to address:0x%x: %d\n", + length, address, ret); + goto done; + } + } + +done: + /* Change msb to DRAM */ + ath10k_hw_map_target_mem(ar, + CPU_ADDR_MSB_REGION_VAL(DRAM_BASE_ADDRESS)); + return ret; +} + +static int ath10k_hw_diag_segment_download(struct ath10k *ar, + const void *buffer, + u32 address, + u32 length) +{ + if (address >= DRAM_BASE_ADDRESS + REGION_ACCESS_SIZE_LIMIT) + /* Needs to change MSB for memory write */ + return ath10k_hw_diag_segment_msb_download(ar, buffer, + address, length); + else + return ath10k_hif_diag_write(ar, address, buffer, length); +} + +int ath10k_hw_diag_fast_download(struct ath10k *ar, + u32 address, + const void *buffer, + u32 length) +{ + const u8 *buf = buffer; + bool sgmt_end = false; + u32 base_addr = 0; + u32 base_len = 0; + u32 left = 0; + struct bmi_segmented_file_header *hdr; + struct bmi_segmented_metadata *metadata; + int ret = 0; + + if (length < sizeof(*hdr)) + return -EINVAL; + + /* check firmware header. If it has no correct magic number + * or it's compressed, returns error. + */ + hdr = (struct bmi_segmented_file_header *)buf; + if (__le32_to_cpu(hdr->magic_num) != BMI_SGMTFILE_MAGIC_NUM) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "Not a supported firmware, magic_num:0x%x\n", + hdr->magic_num); + return -EINVAL; + } + + if (hdr->file_flags != 0) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "Not a supported firmware, file_flags:0x%x\n", + hdr->file_flags); + return -EINVAL; + } + + metadata = (struct bmi_segmented_metadata *)hdr->data; + left = length - sizeof(*hdr); + + while (left > 0) { + if (left < sizeof(*metadata)) { + ath10k_warn(ar, "firmware segment is truncated: %d\n", + left); + ret = -EINVAL; + break; + } + base_addr = __le32_to_cpu(metadata->addr); + base_len = __le32_to_cpu(metadata->length); + buf = metadata->data; + left -= sizeof(*metadata); + + switch (base_len) { + case BMI_SGMTFILE_BEGINADDR: + /* base_addr is the start address to run */ + ret = ath10k_bmi_set_start(ar, base_addr); + base_len = 0; + break; + case BMI_SGMTFILE_DONE: + /* no more segment */ + base_len = 0; + sgmt_end = true; + ret = 0; + break; + case BMI_SGMTFILE_BDDATA: + case BMI_SGMTFILE_EXEC: + ath10k_warn(ar, + "firmware has unsupported segment:%d\n", + base_len); + ret = -EINVAL; + break; + default: + if (base_len > left) { + /* sanity check */ + ath10k_warn(ar, + "firmware has invalid segment length, %d > %d\n", + base_len, left); + ret = -EINVAL; + break; + } + + ret = ath10k_hw_diag_segment_download(ar, + buf, + base_addr, + base_len); + + if (ret) + ath10k_warn(ar, + "failed to download firmware via diag interface:%d\n", + ret); + break; + } + + if (ret || sgmt_end) + break; + + metadata = (struct bmi_segmented_metadata *)(buf + base_len); + left -= base_len; + } + + if (ret == 0) + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot firmware fast diag download successfully.\n"); + return ret; +} + const struct ath10k_hw_ops qca988x_ops = { .set_coverage_class = ath10k_hw_qca988x_set_coverage_class, }; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 977f79ebb4fd..1b5da272d18c 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -21,6 +21,14 @@ #include "targaddrs.h" +enum ath10k_bus { + ATH10K_BUS_PCI, + ATH10K_BUS_AHB, + ATH10K_BUS_SDIO, + ATH10K_BUS_USB, + ATH10K_BUS_SNOC, +}; + #define ATH10K_FW_DIR "ath10k" #define QCA988X_2_0_DEVICE_ID_UBNT (0x11ac) @@ -109,6 +117,7 @@ enum qca9377_chip_id_rev { #define QCA9984_HW_1_0_CHIP_ID_REV 0x0 #define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0" #define QCA9984_HW_1_0_BOARD_DATA_FILE "board.bin" +#define QCA9984_HW_1_0_EBOARD_DATA_FILE "eboard.bin" #define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234 /* QCA9888 2.0 defines */ @@ -221,6 +230,7 @@ enum ath10k_fw_htt_op_version { enum ath10k_bd_ie_type { /* contains sub IEs of enum ath10k_bd_ie_board_type */ ATH10K_BD_IE_BOARD = 0, + ATH10K_BD_IE_BOARD_EXT = 1, }; enum ath10k_bd_ie_board_type { @@ -389,6 +399,11 @@ extern const struct ath10k_hw_ce_regs qcax_ce_regs; void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev); +int ath10k_hw_diag_fast_download(struct ath10k *ar, + u32 address, + const void *buffer, + u32 length); + #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X) #define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887) #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174) @@ -501,6 +516,7 @@ struct ath10k_hw_clk_params { struct ath10k_hw_params { u32 id; u16 dev_id; + enum ath10k_bus bus; const char *name; u32 patch_load_addr; int uart_pin; @@ -539,6 +555,8 @@ struct ath10k_hw_params { const char *dir; const char *board; size_t board_size; + const char *eboard; + size_t ext_board_size; size_t board_ext_size; } fw; @@ -589,6 +607,14 @@ struct ath10k_hw_params { /* Number of bytes to be the offset for each FFT sample */ int spectral_bin_offset; + + /* targets which require hw filter reset during boot up, + * to avoid it sending spurious acks. + */ + bool hw_filter_reset_required; + + /* target supporting fw download via diag ce */ + bool fw_diag_ce_download; }; struct htt_rx_desc; @@ -1124,4 +1150,15 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, #define RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020 /* qca6174 PLL offset/mask end */ +/* CPU_ADDR_MSB is a register, bit[3:0] is to specify which memory + * region is accessed. The memory region size is 1M. + * If host wants to access 0xX12345 at target, then CPU_ADDR_MSB[3:0] + * is 0xX. + * The following MACROs are defined to get the 0xX and the size limit. + */ +#define CPU_ADDR_MSB_REGION_MASK GENMASK(23, 20) +#define CPU_ADDR_MSB_REGION_VAL(X) FIELD_GET(CPU_ADDR_MSB_REGION_MASK, X) +#define REGION_ACCESS_SIZE_LIMIT 0x100000 +#define REGION_ACCESS_SIZE_MASK (REGION_ACCESS_SIZE_LIMIT - 1) + #endif /* _HW_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 90f9372dec25..3933dd96da55 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -18,6 +18,7 @@ #include "mac.h" +#include <net/cfg80211.h> #include <net/mac80211.h> #include <linux/etherdevice.h> #include <linux/acpi.h> @@ -29,7 +30,6 @@ #include "htt.h" #include "txrx.h" #include "testmode.h" -#include "wmi.h" #include "wmi-tlv.h" #include "wmi-ops.h" #include "wow.h" @@ -156,6 +156,22 @@ u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, return 0; } +static int ath10k_mac_get_rate_hw_value(int bitrate) +{ + int i; + u8 hw_value_prefix = 0; + + if (ath10k_mac_bitrate_is_cck(bitrate)) + hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6; + + for (i = 0; i < sizeof(ath10k_rates); i++) { + if (ath10k_rates[i].bitrate == bitrate) + return hw_value_prefix | ath10k_rates[i].hw_value; + } + + return -EINVAL; +} + static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) { switch ((mcs_map >> (2 * nss)) & 0x3) { @@ -967,7 +983,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar) if (time_left == 0) return -ETIMEDOUT; - return 0; + return ar->last_wmi_vdev_start_status; } static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) @@ -5451,9 +5467,10 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, struct cfg80211_chan_def def; u32 vdev_param, pdev_param, slottime, preamble; u16 bitrate, hw_value; - u8 rate; - int rateidx, ret = 0; + u8 rate, basic_rate_idx; + int rateidx, ret = 0, hw_rate_code; enum nl80211_band band; + const struct ieee80211_supported_band *sband; mutex_lock(&ar->conf_mutex); @@ -5659,6 +5676,30 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, arvif->vdev_id, ret); } + if (changed & BSS_CHANGED_BASIC_RATES) { + if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) { + mutex_unlock(&ar->conf_mutex); + return; + } + + sband = ar->hw->wiphy->bands[def.chan->band]; + basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; + bitrate = sband->bitrates[basic_rate_idx].bitrate; + + hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate); + if (hw_rate_code < 0) { + ath10k_warn(ar, "bitrate not supported %d\n", bitrate); + mutex_unlock(&ar->conf_mutex); + return; + } + + vdev_param = ar->wmi.vdev_param->mgmt_rate; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + hw_rate_code); + if (ret) + ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret); + } + mutex_unlock(&ar->conf_mutex); } @@ -6215,6 +6256,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, new_state == IEEE80211_STA_NONE) { memset(arsta, 0, sizeof(*arsta)); arsta->arvif = arvif; + arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); for (i = 0; i < ARRAY_SIZE(sta->txq); i++) @@ -6243,6 +6285,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, ar->num_stations + 1, ar->max_num_stations, ar->num_peers + 1, ar->max_num_peers); + if (ath10k_debug_is_extd_tx_stats_enabled(ar)) { + arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), + GFP_KERNEL); + if (!arsta->tx_stats) + goto exit; + } + num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif); num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw); @@ -6328,6 +6377,9 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, "mac vdev %d peer delete %pM sta %pK (sta gone)\n", arvif->vdev_id, sta->addr, sta); + if (ath10k_debug_is_extd_tx_stats_enabled(ar)) + kfree(arsta->tx_stats); + if (sta->tdls) { ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, @@ -6768,23 +6820,17 @@ static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) return -EOPNOTSUPP; } -static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u32 queues, bool drop) +void ath10k_mac_wait_tx_complete(struct ath10k *ar) { - struct ath10k *ar = hw->priv; bool skip; long time_left; /* mac80211 doesn't care if we really xmit queued frames or not * we'll collect those frames either way if we stop/delete vdevs */ - if (drop) - return; - - mutex_lock(&ar->conf_mutex); if (ar->state == ATH10K_STATE_WEDGED) - goto skip; + return; time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({ bool empty; @@ -6803,8 +6849,18 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (time_left == 0 || skip) ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n", skip, ar->state, time_left); +} -skip: +static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + struct ath10k *ar = hw->priv; + + if (drop) + return; + + mutex_lock(&ar->conf_mutex); + ath10k_mac_wait_tx_complete(ar); mutex_unlock(&ar->conf_mutex); } @@ -8148,6 +8204,24 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { }, }; +static const struct +ieee80211_iface_combination ath10k_10_4_bcn_int_if_comb[] = { + { + .limits = ath10k_10_4_if_limits, + .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), + .max_interfaces = 16, + .num_different_channels = 1, + .beacon_int_infra_match = true, + .beacon_int_min_gcd = 100, +#ifdef CONFIG_ATH10K_DFS_CERTIFIED + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80), +#endif + }, +}; + static void ath10k_get_arvif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { @@ -8310,6 +8384,10 @@ int ath10k_mac_register(struct ath10k *ar) void *channels; int ret; + if (!is_valid_ether_addr(ar->mac_addr)) { + ath10k_warn(ar, "invalid MAC address; choosing random\n"); + eth_random_addr(ar->mac_addr); + } SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); SET_IEEE80211_DEV(ar->hw, ar->dev); @@ -8359,6 +8437,7 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; } + wiphy_read_of_freq_limits(ar->hw->wiphy); ath10k_mac_setup_ht_vht_cap(ar); ar->hw->wiphy->interface_modes = @@ -8463,6 +8542,10 @@ int ath10k_mac_register(struct ath10k *ar) wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT); + /* * on LL hardware queues are managed entirely by the FW * so we only advertise to mac we can do the queues thing @@ -8506,6 +8589,13 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb; ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ath10k_10_4_if_comb); + if (test_bit(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, + ar->wmi.svc_map)) { + ar->hw->wiphy->iface_combinations = + ath10k_10_4_bcn_int_if_comb; + ar->hw->wiphy->n_iface_combinations = + ARRAY_SIZE(ath10k_10_4_bcn_int_if_comb); + } break; case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index 81f8d6c0af35..570493d2d648 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -82,6 +82,7 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, u16 peer_id, u8 tid); int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val); +void ath10k_mac_wait_tx_complete(struct ath10k *ar); static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif, struct sk_buff *skb) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index af2cf55c4c1e..873dbb65439f 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -192,7 +192,7 @@ static struct ce_attr host_ce_config_wlan[] = { /* CE7: ce_diag, the Diagnostic Window */ { - .flags = CE_ATTR_FLAGS, + .flags = CE_ATTR_FLAGS | CE_ATTR_POLL, .src_nentries = 2, .src_sz_max = DIAG_TRANSFER_LIMIT, .dest_nentries = 2, @@ -870,6 +870,21 @@ static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) return val; } +/* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr. + * Support to access target space below 1M for qca6174 and qca9377. + * If target space is below 1M, the bit[20] of converted CE addr is 0. + * Otherwise bit[20] of converted CE addr is 1. + */ +static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) +{ + u32 val = 0, region = addr & 0xfffff; + + val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS) + & 0x7ff) << 21; + val |= ((addr >= 0x100000) ? 0x100000 : 0) | region; + return val; +} + static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) { u32 val = 0, region = addr & 0xfffff; @@ -931,6 +946,15 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, goto done; } + /* The address supplied by the caller is in the + * Target CPU virtual address space. + * + * In order to use this address with the diagnostic CE, + * convert it from Target CPU virtual address space + * to CE address space + */ + address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); + remaining_bytes = nbytes; ce_data = ce_data_base; while (remaining_bytes) { @@ -942,16 +966,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, goto done; /* Request CE to send from Target(!) address to Host buffer */ - /* - * The address supplied by the caller is in the - * Target CPU virtual address space. - * - * In order to use this address with the diagnostic CE, - * convert it from Target CPU virtual address space - * to CE address space - */ - address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); - ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0, 0); if (ret) @@ -960,8 +974,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, i = 0; while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL) != 0) { - mdelay(1); - if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + udelay(DIAG_ACCESS_CE_WAIT_US); + i += DIAG_ACCESS_CE_WAIT_US; + + if (i > DIAG_ACCESS_CE_TIMEOUT_US) { ret = -EBUSY; goto done; } @@ -972,9 +988,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, (void **)&buf, &completed_nbytes) != 0) { - mdelay(1); + udelay(DIAG_ACCESS_CE_WAIT_US); + i += DIAG_ACCESS_CE_WAIT_US; - if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + if (i > DIAG_ACCESS_CE_TIMEOUT_US) { ret = -EBUSY; goto done; } @@ -1119,9 +1136,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, i = 0; while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL) != 0) { - mdelay(1); + udelay(DIAG_ACCESS_CE_WAIT_US); + i += DIAG_ACCESS_CE_WAIT_US; - if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + if (i > DIAG_ACCESS_CE_TIMEOUT_US) { ret = -EBUSY; goto done; } @@ -1132,9 +1150,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, (void **)&buf, &completed_nbytes) != 0) { - mdelay(1); + udelay(DIAG_ACCESS_CE_WAIT_US); + i += DIAG_ACCESS_CE_WAIT_US; - if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + if (i > DIAG_ACCESS_CE_TIMEOUT_US) { ret = -EBUSY; goto done; } @@ -1839,7 +1858,7 @@ int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, } } - if (WARN_ON(!ul_set || !dl_set)) + if (!ul_set || !dl_set) return -ENOENT; return 0; @@ -2068,9 +2087,9 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) ath10k_pci_irq_disable(ar); ath10k_pci_irq_sync(ar); - ath10k_pci_flush(ar); napi_synchronize(&ar->napi); napi_disable(&ar->napi); + ath10k_pci_flush(ar); spin_lock_irqsave(&ar_pci->ps_lock, flags); WARN_ON(ar_pci->ps_wake_refcount > 0); @@ -3482,7 +3501,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, struct ath10k *ar; struct ath10k_pci *ar_pci; enum ath10k_hw_rev hw_rev; - u32 chip_id; + struct ath10k_bus_params bus_params; bool pci_ps; int (*pci_soft_reset)(struct ath10k *ar); int (*pci_hard_reset)(struct ath10k *ar); @@ -3510,7 +3529,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, pci_ps = true; pci_soft_reset = ath10k_pci_warm_reset; pci_hard_reset = ath10k_pci_qca6174_chip_reset; - targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; + targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr; break; case QCA99X0_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA99X0; @@ -3538,7 +3557,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, pci_ps = true; pci_soft_reset = NULL; pci_hard_reset = ath10k_pci_qca6174_chip_reset; - targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; + targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr; break; default: WARN_ON(1); @@ -3618,19 +3637,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_free_irq; } - chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); - if (chip_id == 0xffffffff) { + bus_params.dev_type = ATH10K_DEV_TYPE_LL; + bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); + if (bus_params.chip_id == 0xffffffff) { ath10k_err(ar, "failed to get chip id\n"); goto err_free_irq; } - if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) { + if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) { ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", - pdev->device, chip_id); + pdev->device, bus_params.chip_id); goto err_free_irq; } - ret = ath10k_core_register(ar, chip_id); + ret = ath10k_core_register(ar, &bus_params); if (ret) { ath10k_err(ar, "failed to register driver core: %d\n", ret); goto err_free_irq; diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 0ed436657108..e8d86331c539 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -207,7 +207,8 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) #define CDC_WAR_DATA_CE 4 /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ -#define DIAG_ACCESS_CE_TIMEOUT_MS 10 +#define DIAG_ACCESS_CE_TIMEOUT_US 10000 /* 10 ms */ +#define DIAG_ACCESS_CE_WAIT_US 50 void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value); void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val); diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index ea4075d456fa..310674de3cb8 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -1277,4 +1277,19 @@ struct fw_rx_desc_base { u8 info0; } __packed; +#define FW_RX_DESC_FLAGS_FIRST_MSDU (1 << 0) +#define FW_RX_DESC_FLAGS_LAST_MSDU (1 << 1) +#define FW_RX_DESC_C3_FAILED (1 << 2) +#define FW_RX_DESC_C4_FAILED (1 << 3) +#define FW_RX_DESC_IPV6 (1 << 4) +#define FW_RX_DESC_TCP (1 << 5) +#define FW_RX_DESC_UDP (1 << 6) + +struct fw_rx_desc_hl { + u8 info0; + u8 version; + u8 len; + u8 flags; +} __packed; + #endif /* _RX_DESC_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 7f61591ce0de..983ecfef1d28 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -1941,7 +1941,8 @@ static int ath10k_sdio_probe(struct sdio_func *func, struct ath10k_sdio *ar_sdio; struct ath10k *ar; enum ath10k_hw_rev hw_rev; - u32 chip_id, dev_id_base; + u32 dev_id_base; + struct ath10k_bus_params bus_params; int ret, i; /* Assumption: All SDIO based chipsets (so far) are QCA6174 based. @@ -2035,9 +2036,10 @@ static int ath10k_sdio_probe(struct sdio_func *func, goto err_free_wq; } + bus_params.dev_type = ATH10K_DEV_TYPE_HL; /* TODO: don't know yet how to get chip_id with SDIO */ - chip_id = 0; - ret = ath10k_core_register(ar, chip_id); + bus_params.chip_id = 0; + ret = ath10k_core_register(ar, &bus_params); if (ret) { ath10k_err(ar, "failed to register driver core: %d\n", ret); goto err_free_wq; diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index fa1843a7e0fd..f7b5b855aab2 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -62,6 +62,7 @@ static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); static const struct ath10k_snoc_drv_priv drv_priv = { .hw_rev = ATH10K_HW_WCN3990, @@ -171,7 +172,7 @@ static struct ce_attr host_ce_config_wlan[] = { .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, - .recv_cb = ath10k_snoc_htt_htc_rx_cb, + .recv_cb = ath10k_snoc_pktlog_rx_cb, }, }; @@ -436,6 +437,14 @@ static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); } +/* Called by lower (CE) layer when data is received from the Target. + * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data. + */ +static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); +} + static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) { skb_pull(skb, sizeof(struct ath10k_htc_hdr)); @@ -616,7 +625,7 @@ static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar, } } - if (WARN_ON(!ul_set || !dl_set)) + if (!ul_set || !dl_set) return -ENOENT; return 0; @@ -722,14 +731,15 @@ static void ath10k_snoc_buffer_cleanup(struct ath10k *ar) static void ath10k_snoc_hif_stop(struct ath10k *ar) { ath10k_snoc_irq_disable(ar); - ath10k_snoc_buffer_cleanup(ar); napi_synchronize(&ar->napi); napi_disable(&ar->napi); + ath10k_snoc_buffer_cleanup(ar); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); } static int ath10k_snoc_hif_start(struct ath10k *ar) { + napi_enable(&ar->napi); ath10k_snoc_irq_enable(ar); ath10k_snoc_rx_post(ar); @@ -792,7 +802,6 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar) goto err_wlan_enable; } - napi_enable(&ar->napi); return 0; err_wlan_enable: @@ -1274,6 +1283,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev) struct ath10k *ar; int ret; u32 i; + struct ath10k_bus_params bus_params; of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev); if (!of_id) { @@ -1341,7 +1351,9 @@ static int ath10k_snoc_probe(struct platform_device *pdev) goto err_free_irq; } - ret = ath10k_core_register(ar, drv_data->hw_rev); + bus_params.dev_type = ATH10K_DEV_TYPE_LL; + bus_params.chip_id = drv_data->hw_rev; + ret = ath10k_core_register(ar, &bus_params); if (ret) { ath10k_err(ar, "failed to register driver core: %d\n", ret); goto err_hw_power_off; diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index c2b5bad0459b..b11a1c3d87b4 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -484,6 +484,10 @@ struct host_interest { #define QCA99X0_BOARD_DATA_SZ 12288 #define QCA99X0_BOARD_EXT_DATA_SZ 0 +/* Dual band extended board data */ +#define QCA99X0_EXT_BOARD_DATA_SZ 2048 +#define EXT_BOARD_ADDRESS_OFFSET 0x3000 + #define QCA4019_BOARD_DATA_SZ 12064 #define QCA4019_BOARD_EXT_DATA_SZ 0 diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index cda164f6e9f6..23606b6972d0 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -95,7 +95,8 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, wake_up(&htt->empty_tx_wq); spin_unlock_bh(&htt->tx_lock); - dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); + if (ar->dev_type != ATH10K_DEV_TYPE_HL) + dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); ath10k_report_offchan_tx(htt->ar, msdu); diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c index d4803ff5a78a..f731d35ee76d 100644 --- a/drivers/net/wireless/ath/ath10k/usb.c +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -983,7 +983,7 @@ static int ath10k_usb_probe(struct usb_interface *interface, struct usb_device *dev = interface_to_usbdev(interface); int ret, vendor_id, product_id; enum ath10k_hw_rev hw_rev; - u32 chip_id; + struct ath10k_bus_params bus_params; /* Assumption: All USB based chipsets (so far) are QCA9377 based. * If there will be newer chipsets that does not use the hw reg @@ -1016,9 +1016,10 @@ static int ath10k_usb_probe(struct usb_interface *interface, ar->id.vendor = vendor_id; ar->id.device = product_id; + bus_params.dev_type = ATH10K_DEV_TYPE_HL; /* TODO: don't know yet how to get chip_id with USB */ - chip_id = 0; - ret = ath10k_core_register(ar, chip_id); + bus_params.chip_id = 0; + ret = ath10k_core_register(ar, &bus_params); if (ret) { ath10k_warn(ar, "failed to register driver core: %d\n", ret); goto err; diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index cdc1e64d52ad..731ceaed4d5a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -19,7 +19,6 @@ #include "debug.h" #include "mac.h" #include "hw.h" -#include "mac.h" #include "wmi.h" #include "wmi-ops.h" #include "wmi-tlv.h" @@ -1569,7 +1568,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); - cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers); + if (ar->hw_params.num_peers) + cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers); + else + cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS); cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit); cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries); @@ -1582,7 +1584,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) } cfg->num_peer_keys = __cpu_to_le32(2); - cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS); + if (ar->hw_params.num_peers) + cfg->num_tids = __cpu_to_le32(ar->hw_params.num_peers * 2); + else + cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS); cfg->tx_chain_mask = __cpu_to_le32(0x7); cfg->rx_chain_mask = __cpu_to_le32(0x7); cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index fd612d2905b0..25e8fa789e8d 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1307,7 +1307,8 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = { .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, - .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .peer_sta_ps_statechg_enable = + WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE, .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, @@ -1869,6 +1870,12 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) if (ret) dev_kfree_skb_any(skb); + if (ret == -EAGAIN) { + ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n", + cmd_id); + queue_work(ar->workqueue, &ar->restart_work); + } + return ret; } @@ -2336,7 +2343,12 @@ static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id, dma_unmap_single(ar->dev, pkt_addr->paddr, msdu->len, DMA_FROM_DEVICE); info = IEEE80211_SKB_CB(msdu); - info->flags |= status; + + if (status) + info->flags &= ~IEEE80211_TX_STAT_ACK; + else + info->flags |= IEEE80211_TX_STAT_ACK; + ieee80211_tx_status_irqsafe(ar->hw, msdu); ret = 0; @@ -2476,7 +2488,8 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) status->freq, status->band, status->signal, status->rate_idx); - ieee80211_rx(ar->hw, skb); + ieee80211_rx_ni(ar->hw, skb); + return 0; } @@ -3236,18 +3249,31 @@ void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb) { struct wmi_vdev_start_ev_arg arg = {}; int ret; + u32 status; ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); + ar->last_wmi_vdev_start_status = 0; + ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg); if (ret) { ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret); - return; + ar->last_wmi_vdev_start_status = ret; + goto out; } - if (WARN_ON(__le32_to_cpu(arg.status))) - return; + status = __le32_to_cpu(arg.status); + if (WARN_ON_ONCE(status)) { + ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n", + status, (status == WMI_VDEV_START_CHAN_INVALID) ? + "chan-invalid" : "unknown"); + /* Setup is done one way or another though, so we should still + * do the completion, so don't return here. + */ + ar->last_wmi_vdev_start_status = -EINVAL; + } +out: complete(&ar->vdev_setup_done); } @@ -4774,6 +4800,13 @@ ath10k_wmi_tpc_final_get_rate(struct ath10k *ar, } } + if (pream == -1) { + ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n", + pream_idx, __le32_to_cpu(ev->chan_freq)); + tpc = 0; + goto out; + } + if (pream == 4) tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]); @@ -5016,6 +5049,36 @@ ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb) } } +static void +ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_peer_sta_ps_state_chg_event *ev; + struct ieee80211_sta *sta; + struct ath10k_sta *arsta; + u8 peer_addr[ETH_ALEN]; + + lockdep_assert_held(&ar->data_lock); + + ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data; + ether_addr_copy(peer_addr, ev->peer_macaddr.addr); + + rcu_read_lock(); + + sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer_addr, NULL); + + if (!sta) { + ath10k_warn(ar, "failed to find station entry %pM\n", + peer_addr); + goto exit; + } + + arsta = (struct ath10k_sta *)sta->drv_priv; + arsta->peer_ps_state = __le32_to_cpu(ev->peer_ps_state); + +exit: + rcu_read_unlock(); +} + void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb) { ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); @@ -5449,7 +5512,8 @@ int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb) arg.mac_addr, __le32_to_cpu(arg.status)); - ether_addr_copy(ar->mac_addr, arg.mac_addr); + if (is_zero_ether_addr(ar->mac_addr)) + ether_addr_copy(ar->mac_addr, arg.mac_addr); complete(&ar->wmi.unified_ready); return 0; } @@ -5945,6 +6009,9 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_dbg(ar, ATH10K_DBG_WMI, "received event id %d not implemented\n", id); break; + case WMI_10_2_PEER_STA_PS_STATECHG_EVENTID: + ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb); + break; default: ath10k_warn(ar, "Unknown eventid: %d\n", id); break; @@ -6062,6 +6129,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_10_4_DFS_STATUS_CHECK_EVENTID: ath10k_wmi_event_dfs_status_check(ar, skb); break; + case WMI_10_4_PEER_STA_PS_STATECHG_EVENTID: + ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb); + break; default: ath10k_warn(ar, "Unknown eventid: %d\n", id); break; diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 36220258e3c7..f67c52757ea6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -203,6 +203,8 @@ enum wmi_service { WMI_SERVICE_TPC_STATS_FINAL, WMI_SERVICE_RESET_CHIP, WMI_SERVICE_SPOOF_MAC_SUPPORT, + WMI_SERVICE_TX_DATA_ACK_RSSI, + WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, /* keep last */ WMI_SERVICE_MAX, @@ -350,6 +352,13 @@ enum wmi_10_4_service { WMI_10_4_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, WMI_10_4_SERVICE_HOST_DFS_CHECK_SUPPORT, WMI_10_4_SERVICE_TPC_STATS_FINAL, + WMI_10_4_SERVICE_CFR_CAPTURE_SUPPORT, + WMI_10_4_SERVICE_TX_DATA_ACK_RSSI, + WMI_10_4_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_LEGACY, + WMI_10_4_SERVICE_PER_PACKET_SW_ENCRYPT, + WMI_10_4_SERVICE_PEER_TID_CONFIGS_SUPPORT, + WMI_10_4_SERVICE_VDEV_BCN_RATE_CONTROL, + WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, }; static inline char *wmi_service_name(int service_id) @@ -463,6 +472,8 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT); SVCSTR(WMI_SERVICE_TPC_STATS_FINAL); SVCSTR(WMI_SERVICE_RESET_CHIP); + SVCSTR(WMI_SERVICE_TX_DATA_ACK_RSSI); + SVCSTR(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT); default: return NULL; } @@ -771,6 +782,10 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, len); SVCMAP(WMI_10_4_SERVICE_TPC_STATS_FINAL, WMI_SERVICE_TPC_STATS_FINAL, len); + SVCMAP(WMI_10_4_SERVICE_TX_DATA_ACK_RSSI, + WMI_SERVICE_TX_DATA_ACK_RSSI, len); + SVCMAP(WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, + WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, len); } #undef SVCMAP @@ -2924,6 +2939,7 @@ enum wmi_coex_version { * @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host * enable/disable * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY:Explicit TDLS mode enable/disable + * @WMI_10_4_TX_DATA_ACK_RSSI: Enable DATA ACK RSSI if firmware is capable */ enum wmi_10_4_feature_mask { WMI_10_4_LTEU_SUPPORT = BIT(0), @@ -2939,6 +2955,7 @@ enum wmi_10_4_feature_mask { WMI_10_4_TDLS_UAPSD_SLEEP_STA = BIT(10), WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE = BIT(11), WMI_10_4_TDLS_EXPLICIT_MODE_ONLY = BIT(12), + WMI_10_4_TX_DATA_ACK_RSSI = BIT(16), }; @@ -4153,6 +4170,13 @@ enum wmi_tpc_pream_5ghz { WMI_TPC_PREAM_5GHZ_HTCUP, }; +#define WMI_PEER_PS_STATE_DISABLED 2 + +struct wmi_peer_sta_ps_state_chg_event { + struct wmi_mac_addr peer_macaddr; + __le32 peer_ps_state; +} __packed; + struct wmi_pdev_chanlist_update_event { /* number of channels */ __le32 num_chan; @@ -4958,10 +4982,15 @@ enum wmi_rate_preamble { #define ATH10K_HW_GI(flags) (((flags) >> 5) & 0x1) #define ATH10K_HW_RATECODE(rate, nss, preamble) \ (((preamble) << 6) | ((nss) << 4) | (rate)) +#define ATH10K_HW_AMPDU(flags) ((flags) & 0x1) +#define ATH10K_HW_BA_FAIL(flags) (((flags) >> 1) & 0x3) -#define VHT_MCS_NUM 10 -#define VHT_BW_NUM 4 -#define VHT_NSS_NUM 4 +#define ATH10K_VHT_MCS_NUM 10 +#define ATH10K_BW_NUM 4 +#define ATH10K_NSS_NUM 4 +#define ATH10K_LEGACY_NUM 12 +#define ATH10K_GI_NUM 2 +#define ATH10K_HT_MCS_NUM 32 /* Value to disable fixed rate setting */ #define WMI_FIXED_RATE_NONE (0xff) @@ -6642,11 +6671,17 @@ struct wmi_ch_info_ev_arg { __le32 rx_frame_count; }; +/* From 10.4 firmware, not sure all have the same values. */ +enum wmi_vdev_start_status { + WMI_VDEV_START_OK = 0, + WMI_VDEV_START_CHAN_INVALID, +}; + struct wmi_vdev_start_ev_arg { __le32 vdev_id; __le32 req_id; __le32 resp_type; /* %WMI_VDEV_RESP_ */ - __le32 status; + __le32 status; /* See wmi_vdev_start_status enum above */ }; struct wmi_peer_kick_ev_arg { diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index a6b179f88d36..af444dfecae9 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -374,6 +374,8 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw, goto cleanup; } + ath10k_mac_wait_tx_complete(ar); + ret = ath10k_wow_enable(ar); if (ret) { ath10k_warn(ar, "failed to start wow: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index e01faf641288..94f70047d3fc 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -1028,8 +1028,6 @@ ath5k_debug_dump_bands(struct ath5k_hw *ah) if (likely(!(ah->debug.level & ATH5K_DEBUG_DUMPBANDS))) return; - BUG_ON(!ah->sbands); - for (b = 0; b < NUM_NL80211_BANDS; b++) { struct ieee80211_supported_band *band = &ah->sbands[b]; char bname[6]; diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index 58fb227a849f..54132af70094 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -710,8 +710,8 @@ static bool check_device_tree(struct ath6kl *ar) for_each_compatible_node(node, NULL, "atheros,ath6kl") { board_id = of_get_property(node, board_id_prop, NULL); if (board_id == NULL) { - ath6kl_warn("No \"%s\" property on %s node.\n", - board_id_prop, node->name); + ath6kl_warn("No \"%s\" property on %pOFn node.\n", + board_id_prop, node); continue; } snprintf(board_filename, sizeof(board_filename), diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index 0c61dbaa62a4..cb59016c723b 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -638,7 +638,7 @@ void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid, memcpy(vif->bssid, bssid, sizeof(vif->bssid)); vif->bss_ch = channel; - if ((vif->nw_type == INFRA_NETWORK)) { + if (vif->nw_type == INFRA_NETWORK) { ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, vif->listen_intvl_t, 0); ath6kl_check_ch_switch(ar, channel); diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index ef2dd68d3f77..11d6f975c87d 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -37,10 +37,6 @@ #define AR5008_11NG_HT_SS_SHIFT 12 #define AR5008_11NG_HT_DS_SHIFT 20 -static const int firstep_table[] = -/* level: 0 1 2 3 4 5 6 7 8 */ - { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */ - /* * register values to turn OFDM weak signal detection OFF */ diff --git a/drivers/net/wireless/ath/ath9k/common-debug.c b/drivers/net/wireless/ath/ath9k/common-debug.c index 239429f10378..53ca4b063eb9 100644 --- a/drivers/net/wireless/ath/ath9k/common-debug.c +++ b/drivers/net/wireless/ath/ath9k/common-debug.c @@ -144,6 +144,8 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf, RXS_ERR("BEACONS", rx_beacons); RXS_ERR("FRAGS", rx_frags); RXS_ERR("SPECTRAL", rx_spectral); + RXS_ERR("SPECTRAL SMPL GOOD", rx_spectral_sample_good); + RXS_ERR("SPECTRAL SMPL ERR", rx_spectral_sample_err); RXS_ERR("CRC ERR", crc_err); RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err); diff --git a/drivers/net/wireless/ath/ath9k/common-debug.h b/drivers/net/wireless/ath/ath9k/common-debug.h index 3376990d3a24..2938b5b96b07 100644 --- a/drivers/net/wireless/ath/ath9k/common-debug.h +++ b/drivers/net/wireless/ath/ath9k/common-debug.h @@ -39,6 +39,8 @@ * @rx_beacons: No. of beacons received. * @rx_frags: No. of rx-fragements received. * @rx_spectral: No of spectral packets received. + * @rx_spectral_sample_good: No. of good spectral samples + * @rx_spectral_sample_err: No. of good spectral samples */ struct ath_rx_stats { u32 rx_pkts_all; @@ -58,6 +60,8 @@ struct ath_rx_stats { u32 rx_beacons; u32 rx_frags; u32 rx_spectral; + u32 rx_spectral_sample_good; + u32 rx_spectral_sample_err; }; #ifdef CONFIG_ATH9K_COMMON_DEBUG diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index 440e16e641e4..6a43d26276e5 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -59,8 +59,7 @@ ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read) sample = sample_end - SPECTRAL_HT20_SAMPLE_LEN + 1; - max_index = spectral_max_index(mag_info->all_bins, - SPECTRAL_HT20_NUM_BINS); + max_index = spectral_max_index_ht20(mag_info->all_bins); max_magnitude = spectral_max_magnitude(mag_info->all_bins); max_exp = mag_info->max_exp & 0xf; @@ -72,7 +71,7 @@ ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read) if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN && max_index < 1) return -1; - if (sample[max_index] != (max_magnitude >> max_exp)) + if ((sample[max_index] & 0xf8) != ((max_magnitude >> max_exp) & 0xf8)) return -1; else return 0; @@ -100,12 +99,10 @@ ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read) sample = sample_end - SPECTRAL_HT20_40_SAMPLE_LEN + 1; lower_mag = spectral_max_magnitude(mag_info->lower_bins); - lower_max_index = spectral_max_index(mag_info->lower_bins, - SPECTRAL_HT20_40_NUM_BINS); + lower_max_index = spectral_max_index_ht40(mag_info->lower_bins); upper_mag = spectral_max_magnitude(mag_info->upper_bins); - upper_max_index = spectral_max_index(mag_info->upper_bins, - SPECTRAL_HT20_40_NUM_BINS); + upper_max_index = spectral_max_index_ht40(mag_info->upper_bins); max_exp = mag_info->max_exp & 0xf; @@ -117,19 +114,10 @@ ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read) ((upper_max_index < 1) || (lower_max_index < 1))) return -1; - /* Some time hardware messes up the index and adds - * the index of the middle point (dc_pos). Try to fix it. - */ - if ((upper_max_index - dc_pos > 0) && - (sample[upper_max_index] == (upper_mag >> max_exp))) - upper_max_index -= dc_pos; - - if ((lower_max_index - dc_pos > 0) && - (sample[lower_max_index - dc_pos] == (lower_mag >> max_exp))) - lower_max_index -= dc_pos; - - if ((sample[upper_max_index + dc_pos] != (upper_mag >> max_exp)) || - (sample[lower_max_index] != (lower_mag >> max_exp))) + if (((sample[upper_max_index + dc_pos] & 0xf8) != + ((upper_mag >> max_exp) & 0xf8)) || + ((sample[lower_max_index] & 0xf8) != + ((lower_mag >> max_exp) & 0xf8))) return -1; else return 0; @@ -169,8 +157,7 @@ ath_cmn_process_ht20_fft(struct ath_rx_status *rs, magnitude = spectral_max_magnitude(mag_info->all_bins); fft_sample_20.max_magnitude = __cpu_to_be16(magnitude); - max_index = spectral_max_index(mag_info->all_bins, - SPECTRAL_HT20_NUM_BINS); + max_index = spectral_max_index_ht20(mag_info->all_bins); fft_sample_20.max_index = max_index; bitmap_w = spectral_bitmap_weight(mag_info->all_bins); @@ -188,7 +175,8 @@ ath_cmn_process_ht20_fft(struct ath_rx_status *rs, magnitude >> max_exp, max_index); - if (fft_sample_20.data[max_index] != (magnitude >> max_exp)) { + if ((fft_sample_20.data[max_index] & 0xf8) != + ((magnitude >> max_exp) & 0xf8)) { ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n"); ret = -1; } @@ -302,12 +290,10 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs, upper_mag = spectral_max_magnitude(mag_info->upper_bins); fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag); - lower_max_index = spectral_max_index(mag_info->lower_bins, - SPECTRAL_HT20_40_NUM_BINS); + lower_max_index = spectral_max_index_ht40(mag_info->lower_bins); fft_sample_40.lower_max_index = lower_max_index; - upper_max_index = spectral_max_index(mag_info->upper_bins, - SPECTRAL_HT20_40_NUM_BINS); + upper_max_index = spectral_max_index_ht40(mag_info->upper_bins); fft_sample_40.upper_max_index = upper_max_index; lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins); @@ -331,29 +317,13 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs, upper_mag >> max_exp, upper_max_index); - /* Some time hardware messes up the index and adds - * the index of the middle point (dc_pos). Try to fix it. - */ - if ((upper_max_index - dc_pos > 0) && - (fft_sample_40.data[upper_max_index] == (upper_mag >> max_exp))) { - upper_max_index -= dc_pos; - fft_sample_40.upper_max_index = upper_max_index; - } - - if ((lower_max_index - dc_pos > 0) && - (fft_sample_40.data[lower_max_index - dc_pos] == - (lower_mag >> max_exp))) { - lower_max_index -= dc_pos; - fft_sample_40.lower_max_index = lower_max_index; - } - /* Check if we got the expected magnitude values at * the expected bins */ - if ((fft_sample_40.data[upper_max_index + dc_pos] - != (upper_mag >> max_exp)) || - (fft_sample_40.data[lower_max_index] - != (lower_mag >> max_exp))) { + if (((fft_sample_40.data[upper_max_index + dc_pos] & 0xf8) + != ((upper_mag >> max_exp) & 0xf8)) || + ((fft_sample_40.data[lower_max_index] & 0xf8) + != ((lower_mag >> max_exp) & 0xf8))) { ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n"); ret = -1; } @@ -411,7 +381,7 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs, ath_dbg(common, SPECTRAL_SCAN, "Calculated new upper max 0x%X at %i\n", - tmp_mag, i); + tmp_mag, fft_sample_40.upper_max_index); } else for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) { if (fft_sample_40.data[i] == (upper_mag >> max_exp)) @@ -501,6 +471,7 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h u8 sample_buf[SPECTRAL_SAMPLE_MAX_LEN] = {0}; struct ath_hw *ah = spec_priv->ah; struct ath_common *common = ath9k_hw_common(spec_priv->ah); + struct ath_softc *sc = (struct ath_softc *)common->priv; u8 num_bins, *vdata = (u8 *)hdr; struct ath_radar_info *radar_info; int len = rs->rs_datalen; @@ -649,8 +620,13 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h sample_buf, sample_len, sample_bytes); - fft_handler(rs, spec_priv, sample_buf, - tsf, freq, chan_type); + ret = fft_handler(rs, spec_priv, sample_buf, + tsf, freq, chan_type); + + if (ret == 0) + RX_STAT_INC(rx_spectral_sample_good); + else + RX_STAT_INC(rx_spectral_sample_err); memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN); @@ -665,6 +641,11 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h ret = fft_handler(rs, spec_priv, sample_start, tsf, freq, chan_type); + if (ret == 0) + RX_STAT_INC(rx_spectral_sample_good); + else + RX_STAT_INC(rx_spectral_sample_err); + /* Mix the received bins to the /dev/random * pool */ @@ -675,7 +656,7 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h * loop. */ if (len <= fft_len + 2) - break; + return 1; sample_start = &vdata[i + 1]; diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h index 303ab470ce34..011d8ab8b974 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.h +++ b/drivers/net/wireless/ath/ath9k/common-spectral.h @@ -145,6 +145,23 @@ static inline u8 spectral_max_index(u8 *bins, int num_bins) return m; } +static inline u8 spectral_max_index_ht40(u8 *bins) +{ + u8 idx; + + idx = spectral_max_index(bins, SPECTRAL_HT20_40_NUM_BINS); + + /* positive values and zero are starting at the beginning + * of the data field. + */ + return idx % (SPECTRAL_HT20_40_NUM_BINS / 2); +} + +static inline u8 spectral_max_index_ht20(u8 *bins) +{ + return spectral_max_index(bins, SPECTRAL_HT20_NUM_BINS); +} + /* return the bitmap weight from the all/upper/lower bins */ static inline u8 spectral_bitmap_weight(u8 *bins) { diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 0a6eb8a8c1ed..c871b7ec5011 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -990,19 +990,6 @@ static int read_file_dump_nfcal(struct seq_file *file, void *data) return 0; } -static int open_file_dump_nfcal(struct inode *inode, struct file *f) -{ - return single_open(f, read_file_dump_nfcal, inode->i_private); -} - -static const struct file_operations fops_dump_nfcal = { - .read = seq_read, - .open = open_file_dump_nfcal, - .owner = THIS_MODULE, - .llseek = seq_lseek, - .release = single_release, -}; - #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT static ssize_t read_file_btcoex(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c index a6f45f1bb5bb..e8fcd3e1c470 100644 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c @@ -116,7 +116,7 @@ void ath_debug_rate_stats(struct ath_softc *sc, if (rxs->rate_idx >= ARRAY_SIZE(rstats->ht_stats)) goto exit; - if ((rxs->bw == RATE_INFO_BW_40)) + if (rxs->bw == RATE_INFO_BW_40) rstats->ht_stats[rxs->rate_idx].ht40_cnt++; else rstats->ht_stats[rxs->rate_idx].ht20_cnt++; @@ -286,9 +286,25 @@ static ssize_t read_airtime(struct file *file, char __user *user_buf, return retval; } +static ssize_t +write_airtime_reset_stub(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath_node *an = file->private_data; + struct ath_airtime_stats *astats; + int i; + + astats = &an->airtime_stats; + astats->rx_airtime = 0; + astats->tx_airtime = 0; + for (i = 0; i < 4; i++) + an->airtime_deficit[i] = ATH_AIRTIME_QUANTUM; + return count; +} static const struct file_operations fops_airtime = { .read = read_airtime, + .write = write_airtime_reset_stub, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, @@ -304,5 +320,5 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw, debugfs_create_file("node_aggr", 0444, dir, an, &fops_node_aggr); debugfs_create_file("node_recv", 0444, dir, an, &fops_node_recv); - debugfs_create_file("airtime", 0444, dir, an, &fops_airtime); + debugfs_create_file("airtime", 0644, dir, an, &fops_airtime); } diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 1049773378f2..c85f613e8ceb 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1251,8 +1251,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, struct ath_vif *avp = (void *)vif->drv_priv; struct ath_node *an = &avp->mcast_node; - mutex_lock(&sc->mutex); - if (IS_ENABLED(CONFIG_ATH9K_TX99)) { if (sc->cur_chan->nvifs >= 1) { mutex_unlock(&sc->mutex); @@ -1261,6 +1259,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, sc->tx99_vif = vif; } + mutex_lock(&sc->mutex); + ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); sc->cur_chan->nvifs++; diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c index ce50d8f5835e..95544ce05acf 100644 --- a/drivers/net/wireless/ath/ath9k/tx99.c +++ b/drivers/net/wireless/ath/ath9k/tx99.c @@ -56,11 +56,6 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc) struct sk_buff *skb; struct ath_vif *avp; - if (!sc->tx99_vif) - return NULL; - - avp = (struct ath_vif *)sc->tx99_vif->drv_priv; - skb = alloc_skb(len, GFP_KERNEL); if (!skb) return NULL; @@ -77,7 +72,10 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc) memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN); memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN); - hdr->seq_ctrl |= cpu_to_le16(avp->seq_no); + if (sc->tx99_vif) { + avp = (struct ath_vif *) sc->tx99_vif->drv_priv; + hdr->seq_ctrl |= cpu_to_le16(avp->seq_no); + } tx_info = IEEE80211_SKB_CB(skb); memset(tx_info, 0, sizeof(*tx_info)); diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 0cb5b58925dc..8c75651ede6c 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -246,8 +246,8 @@ static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb) * of available memory blocks, so the number can * never execeed the mem_blocks count. */ - if (unlikely(WARN_ON_ONCE(cookie == 0) || - WARN_ON_ONCE(cookie > ar->fw.mem_blocks))) + if (WARN_ON_ONCE(cookie == 0) || + WARN_ON_ONCE(cookie > ar->fw.mem_blocks)) return; atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size), diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 06cfe8d311f3..5ab3e31c9ffa 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -174,13 +174,12 @@ static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn int i; size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc); - wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr, - GFP_KERNEL); + wcn_ch->cpu_addr = dma_zalloc_coherent(dev, size, + &wcn_ch->dma_addr, + GFP_KERNEL); if (!wcn_ch->cpu_addr) return -ENOMEM; - memset(wcn_ch->cpu_addr, 0, size); - cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr; cur_ctl = wcn_ch->head_blk_ctl; @@ -628,13 +627,13 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn) 16 - (WCN36XX_BD_CHUNK_SIZE % 8); s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H; - cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr, - GFP_KERNEL); + cpu_addr = dma_zalloc_coherent(wcn->dev, s, + &wcn->mgmt_mem_pool.phy_addr, + GFP_KERNEL); if (!cpu_addr) goto out_err; wcn->mgmt_mem_pool.virt_addr = cpu_addr; - memset(cpu_addr, 0, s); /* Allocate BD headers for DATA frames */ @@ -643,13 +642,13 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn) 16 - (WCN36XX_BD_CHUNK_SIZE % 8); s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L; - cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr, - GFP_KERNEL); + cpu_addr = dma_zalloc_coherent(wcn->dev, s, + &wcn->data_mem_pool.phy_addr, + GFP_KERNEL); if (!cpu_addr) goto out_err; wcn->data_mem_pool.virt_addr = cpu_addr; - memset(cpu_addr, 0, s); return 0; diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 00098f24116d..1d2d698fb779 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -792,10 +792,10 @@ static int wcn36xx_smd_process_ptt_msg_rsp(void *buf, size_t len, rsp->header.len - sizeof(rsp->ptt_msg_resp_status)); if (rsp->header.len > 0) { - *p_ptt_rsp_msg = kmalloc(rsp->header.len, GFP_ATOMIC); + *p_ptt_rsp_msg = kmemdup(rsp->ptt_msg, rsp->header.len, + GFP_ATOMIC); if (!*p_ptt_rsp_msg) return -ENOMEM; - memcpy(*p_ptt_rsp_msg, rsp->ptt_msg, rsp->header.len); } return ret; } diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index f79c337105cb..d18e81fae5f1 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -48,9 +48,29 @@ static struct ieee80211_channel wil_60ghz_channels[] = { CHAN60G(1, 0), CHAN60G(2, 0), CHAN60G(3, 0), -/* channel 4 not supported yet */ + CHAN60G(4, 0), }; +static int wil_num_supported_channels(struct wil6210_priv *wil) +{ + int num_channels = ARRAY_SIZE(wil_60ghz_channels); + + if (!test_bit(WMI_FW_CAPABILITY_CHANNEL_4, wil->fw_capabilities)) + num_channels--; + + return num_channels; +} + +void update_supported_bands(struct wil6210_priv *wil) +{ + struct wiphy *wiphy = wil_to_wiphy(wil); + + wil_dbg_misc(wil, "update supported bands"); + + wiphy->bands[NL80211_BAND_60GHZ]->n_channels = + wil_num_supported_channels(wil); +} + /* Vendor id to be used in vendor specific command and events * to user space. * NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID, @@ -199,7 +219,9 @@ wil_mgmt_stypes[NUM_NL80211_IFTYPES] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_RESP >> 4) | BIT(IEEE80211_STYPE_ASSOC_RESP >> 4) | - BIT(IEEE80211_STYPE_DISASSOC >> 4), + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_REASSOC_RESP >> 4), .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | @@ -871,6 +893,26 @@ static void wil_print_crypto(struct wil6210_priv *wil, c->control_port_no_encrypt); } +static const char * +wil_get_auth_type_name(enum nl80211_auth_type auth_type) +{ + switch (auth_type) { + case NL80211_AUTHTYPE_OPEN_SYSTEM: + return "OPEN_SYSTEM"; + case NL80211_AUTHTYPE_SHARED_KEY: + return "SHARED_KEY"; + case NL80211_AUTHTYPE_FT: + return "FT"; + case NL80211_AUTHTYPE_NETWORK_EAP: + return "NETWORK_EAP"; + case NL80211_AUTHTYPE_SAE: + return "SAE"; + case NL80211_AUTHTYPE_AUTOMATIC: + return "AUTOMATIC"; + default: + return "unknown"; + } +} static void wil_print_connect_params(struct wil6210_priv *wil, struct cfg80211_connect_params *sme) { @@ -884,11 +926,73 @@ static void wil_print_connect_params(struct wil6210_priv *wil, if (sme->ssid) print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET, 16, 1, sme->ssid, sme->ssid_len, true); + if (sme->prev_bssid) + wil_info(wil, " Previous BSSID=%pM\n", sme->prev_bssid); + wil_info(wil, " Auth Type: %s\n", + wil_get_auth_type_name(sme->auth_type)); wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open"); wil_info(wil, " PBSS: %d\n", sme->pbss); wil_print_crypto(wil, &sme->crypto); } +static int wil_ft_connect(struct wiphy *wiphy, + struct net_device *ndev, + struct cfg80211_connect_params *sme) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = ndev_to_vif(ndev); + struct wmi_ft_auth_cmd auth_cmd; + int rc; + + if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING, wil->fw_capabilities)) { + wil_err(wil, "FT: FW does not support FT roaming\n"); + return -EOPNOTSUPP; + } + + if (!sme->prev_bssid) { + wil_err(wil, "FT: prev_bssid was not set\n"); + return -EINVAL; + } + + if (ether_addr_equal(sme->prev_bssid, sme->bssid)) { + wil_err(wil, "FT: can not roam to same AP\n"); + return -EINVAL; + } + + if (!test_bit(wil_vif_fwconnected, vif->status)) { + wil_err(wil, "FT: roam while not connected\n"); + return -EINVAL; + } + + if (vif->privacy != sme->privacy) { + wil_err(wil, "FT: privacy mismatch, current (%d) roam (%d)\n", + vif->privacy, sme->privacy); + return -EINVAL; + } + + if (sme->pbss) { + wil_err(wil, "FT: roam is not valid for PBSS\n"); + return -EINVAL; + } + + memset(&auth_cmd, 0, sizeof(auth_cmd)); + auth_cmd.channel = sme->channel->hw_value - 1; + ether_addr_copy(auth_cmd.bssid, sme->bssid); + + wil_info(wil, "FT: roaming\n"); + + set_bit(wil_vif_ft_roam, vif->status); + rc = wmi_send(wil, WMI_FT_AUTH_CMDID, vif->mid, + &auth_cmd, sizeof(auth_cmd)); + if (rc == 0) + mod_timer(&vif->connect_timer, + jiffies + msecs_to_jiffies(5000)); + else + clear_bit(wil_vif_ft_roam, vif->status); + + return rc; +} + static int wil_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_connect_params *sme) @@ -901,14 +1005,23 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, const u8 *rsn_eid; int ch; int rc = 0; + bool is_ft_roam = false; + u8 network_type; enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS; wil_dbg_misc(wil, "connect, mid=%d\n", vif->mid); wil_print_connect_params(wil, sme); - if (test_bit(wil_vif_fwconnecting, vif->status) || + if (sme->auth_type == NL80211_AUTHTYPE_FT) + is_ft_roam = true; + if (sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC && test_bit(wil_vif_fwconnected, vif->status)) - return -EALREADY; + is_ft_roam = true; + + if (!is_ft_roam) + if (test_bit(wil_vif_fwconnecting, vif->status) || + test_bit(wil_vif_fwconnected, vif->status)) + return -EALREADY; if (sme->ie_len > WMI_MAX_IE_LEN) { wil_err(wil, "IE too large (%td bytes)\n", sme->ie_len); @@ -918,8 +1031,13 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, rsn_eid = sme->ie ? cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) : NULL; - if (sme->privacy && !rsn_eid) + if (sme->privacy && !rsn_eid) { wil_info(wil, "WSC connection\n"); + if (is_ft_roam) { + wil_err(wil, "No WSC with FT roam\n"); + return -EINVAL; + } + } if (sme->pbss) bss_type = IEEE80211_BSS_TYPE_PBSS; @@ -941,6 +1059,45 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, vif->privacy = sme->privacy; vif->pbss = sme->pbss; + rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie); + if (rc) + goto out; + + switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) { + case WLAN_CAPABILITY_DMG_TYPE_AP: + network_type = WMI_NETTYPE_INFRA; + break; + case WLAN_CAPABILITY_DMG_TYPE_PBSS: + network_type = WMI_NETTYPE_P2P; + break; + default: + wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n", + bss->capability); + rc = -EINVAL; + goto out; + } + + ch = bss->channel->hw_value; + if (ch == 0) { + wil_err(wil, "BSS at unknown frequency %dMhz\n", + bss->channel->center_freq); + rc = -EOPNOTSUPP; + goto out; + } + + if (is_ft_roam) { + if (network_type != WMI_NETTYPE_INFRA) { + wil_err(wil, "FT: Unsupported BSS type, capability= 0x%04x\n", + bss->capability); + rc = -EINVAL; + goto out; + } + rc = wil_ft_connect(wiphy, ndev, sme); + if (rc == 0) + vif->bss = bss; + goto out; + } + if (vif->privacy) { /* For secure assoc, remove old keys */ rc = wmi_del_cipher_key(vif, 0, bss->bssid, @@ -957,28 +1114,9 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, } } - /* WMI_SET_APPIE_CMD. ie may contain rsn info as well as other info - * elements. Send it also in case it's empty, to erase previously set - * ies in FW. - */ - rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie); - if (rc) - goto out; - /* WMI_CONNECT_CMD */ memset(&conn, 0, sizeof(conn)); - switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) { - case WLAN_CAPABILITY_DMG_TYPE_AP: - conn.network_type = WMI_NETTYPE_INFRA; - break; - case WLAN_CAPABILITY_DMG_TYPE_PBSS: - conn.network_type = WMI_NETTYPE_P2P; - break; - default: - wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n", - bss->capability); - goto out; - } + conn.network_type = network_type; if (vif->privacy) { if (rsn_eid) { /* regular secure connection */ conn.dot11_auth_mode = WMI_AUTH11_SHARED; @@ -998,14 +1136,6 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, conn.ssid_len = min_t(u8, ssid_eid[1], 32); memcpy(conn.ssid, ssid_eid+2, conn.ssid_len); - - ch = bss->channel->hw_value; - if (ch == 0) { - wil_err(wil, "BSS at unknown frequency %dMhz\n", - bss->channel->center_freq); - rc = -EOPNOTSUPP; - goto out; - } conn.channel = ch - 1; ether_addr_copy(conn.bssid, bss->bssid); @@ -1201,9 +1331,9 @@ wil_find_sta_by_key_usage(struct wil6210_priv *wil, u8 mid, return &wil->sta[cid]; } -static void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage, - struct wil_sta_info *cs, - struct key_params *params) +void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage, + struct wil_sta_info *cs, + struct key_params *params) { struct wil_tid_crypto_rx_single *cc; int tid; @@ -1286,13 +1416,19 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, params->seq_len, params->seq); if (IS_ERR(cs)) { - wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n", - mac_addr, key_usage_str[key_usage], key_index, - params->seq_len, params->seq); - return -EINVAL; + /* in FT, sta info may not be available as add_key may be + * sent by host before FW sends WMI_CONNECT_EVENT + */ + if (!test_bit(wil_vif_ft_roam, vif->status)) { + wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n", + mac_addr, key_usage_str[key_usage], key_index, + params->seq_len, params->seq); + return -EINVAL; + } } - wil_del_rx_key(key_index, key_usage, cs); + if (!IS_ERR(cs)) + wil_del_rx_key(key_index, key_usage, cs); if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) { wil_err(wil, @@ -1305,7 +1441,10 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, rc = wmi_add_cipher_key(vif, key_index, mac_addr, params->key_len, params->key, key_usage); - if (!rc) + if (!rc && !IS_ERR(cs)) + /* in FT set crypto will take place upon receiving + * WMI_RING_EN_EVENTID event + */ wil_set_crypto_rx(key_index, key_usage, cs, params); return rc; @@ -1468,21 +1607,36 @@ static void wil_print_bcon_data(struct cfg80211_beacon_data *b) } /* internal functions for device reset and starting AP */ -static int _wil_cfg80211_set_ies(struct wil6210_vif *vif, - struct cfg80211_beacon_data *bcon) +static u8 * +_wil_cfg80211_get_proberesp_ies(const u8 *proberesp, u16 proberesp_len, + u16 *ies_len) { - int rc; - u16 len = 0, proberesp_len = 0; - u8 *ies = NULL, *proberesp = NULL; + u8 *ies = NULL; - if (bcon->probe_resp) { + if (proberesp) { struct ieee80211_mgmt *f = - (struct ieee80211_mgmt *)bcon->probe_resp; + (struct ieee80211_mgmt *)proberesp; size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); - proberesp = f->u.probe_resp.variable; - proberesp_len = bcon->probe_resp_len - hlen; + + ies = f->u.probe_resp.variable; + if (ies_len) + *ies_len = proberesp_len - hlen; } + + return ies; +} + +static int _wil_cfg80211_set_ies(struct wil6210_vif *vif, + struct cfg80211_beacon_data *bcon) +{ + int rc; + u16 len = 0, proberesp_len = 0; + u8 *ies = NULL, *proberesp; + + proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp, + bcon->probe_resp_len, + &proberesp_len); rc = _wil_cfg80211_merge_extra_ies(proberesp, proberesp_len, bcon->proberesp_ies, @@ -1526,6 +1680,9 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, struct wireless_dev *wdev = ndev->ieee80211_ptr; u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); u8 is_go = (wdev->iftype == NL80211_IFTYPE_P2P_GO); + u16 proberesp_len = 0; + u8 *proberesp; + bool ft = false; if (pbss) wmi_nettype = WMI_NETTYPE_P2P; @@ -1538,6 +1695,25 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, wil_set_recovery_state(wil, fw_recovery_idle); + proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp, + bcon->probe_resp_len, + &proberesp_len); + /* check that the probe response IEs has a MDE */ + if ((proberesp && proberesp_len > 0 && + cfg80211_find_ie(WLAN_EID_MOBILITY_DOMAIN, + proberesp, + proberesp_len))) + ft = true; + + if (ft) { + if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING, + wil->fw_capabilities)) { + wil_err(wil, "FW does not support FT roaming\n"); + return -ENOTSUPP; + } + set_bit(wil_vif_ft_roam, vif->status); + } + mutex_lock(&wil->mutex); if (!wil_has_other_active_ifaces(wil, ndev, true, false)) { @@ -1699,6 +1875,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy, mutex_lock(&wil->mutex); wmi_pcp_stop(vif); + clear_bit(wil_vif_ft_roam, vif->status); if (last) __wil_down(wil); @@ -1718,8 +1895,9 @@ static int wil_cfg80211_add_station(struct wiphy *wiphy, struct wil6210_vif *vif = ndev_to_vif(dev); struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "add station %pM aid %d mid %d\n", - mac, params->aid, vif->mid); + wil_dbg_misc(wil, "add station %pM aid %d mid %d mask 0x%x set 0x%x\n", + mac, params->aid, vif->mid, + params->sta_flags_mask, params->sta_flags_set); if (!disable_ap_sme) { wil_err(wil, "not supported with AP SME enabled\n"); @@ -2040,6 +2218,54 @@ wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, return 0; } +static int +wil_cfg80211_update_ft_ies(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_update_ft_ies_params *ftie) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = ndev_to_vif(dev); + struct cfg80211_bss *bss; + struct wmi_ft_reassoc_cmd reassoc; + int rc = 0; + + wil_dbg_misc(wil, "update ft ies, mid=%d\n", vif->mid); + wil_hex_dump_misc("FT IE ", DUMP_PREFIX_OFFSET, 16, 1, + ftie->ie, ftie->ie_len, true); + + if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING, wil->fw_capabilities)) { + wil_err(wil, "FW does not support FT roaming\n"); + return -EOPNOTSUPP; + } + + rc = wmi_update_ft_ies(vif, ftie->ie_len, ftie->ie); + if (rc) + return rc; + + if (!test_bit(wil_vif_ft_roam, vif->status)) + /* vif is not roaming */ + return 0; + + /* wil_vif_ft_roam is set. wil_cfg80211_update_ft_ies is used as + * a trigger for reassoc + */ + + bss = vif->bss; + if (!bss) { + wil_err(wil, "FT: bss is NULL\n"); + return -EINVAL; + } + + memset(&reassoc, 0, sizeof(reassoc)); + ether_addr_copy(reassoc.bssid, bss->bssid); + + rc = wmi_send(wil, WMI_FT_REASSOC_CMDID, vif->mid, + &reassoc, sizeof(reassoc)); + if (rc) + wil_err(wil, "FT: reassoc failed (%d)\n", rc); + + return rc; +} + static const struct cfg80211_ops wil_cfg80211_ops = { .add_virtual_intf = wil_cfg80211_add_iface, .del_virtual_intf = wil_cfg80211_del_iface, @@ -2075,6 +2301,7 @@ static const struct cfg80211_ops wil_cfg80211_ops = { .resume = wil_cfg80211_resume, .sched_scan_start = wil_cfg80211_sched_scan_start, .sched_scan_stop = wil_cfg80211_sched_scan_stop, + .update_ft_ies = wil_cfg80211_update_ft_ies, }; static void wil_wiphy_init(struct wiphy *wiphy) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 51c3330bc316..66ffae2de86e 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -725,32 +725,6 @@ struct dentry *wil_debugfs_create_ioblob(const char *name, return debugfs_create_file(name, mode, parent, wil_blob, &fops_ioblob); } -/*---reset---*/ -static ssize_t wil_write_file_reset(struct file *file, const char __user *buf, - size_t len, loff_t *ppos) -{ - struct wil6210_priv *wil = file->private_data; - struct net_device *ndev = wil->main_ndev; - - /** - * BUG: - * this code does NOT sync device state with the rest of system - * use with care, debug only!!! - */ - rtnl_lock(); - dev_close(ndev); - ndev->flags &= ~IFF_UP; - rtnl_unlock(); - wil_reset(wil, true); - - return len; -} - -static const struct file_operations fops_reset = { - .write = wil_write_file_reset, - .open = simple_open, -}; - /*---write channel 1..4 to rxon for it, 0 to rxoff---*/ static ssize_t wil_write_file_rxon(struct file *file, const char __user *buf, size_t len, loff_t *ppos) @@ -1263,6 +1237,9 @@ static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data) int num_active; int num_free; + if (!rbm->buff_arr) + return -EINVAL; + seq_printf(s, " size = %zu\n", rbm->size); seq_printf(s, " free_list_empty_cnt = %lu\n", rbm->free_list_empty_cnt); @@ -1436,7 +1413,7 @@ static int wil_freq_debugfs_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr; - u16 freq = wdev->chandef.chan ? wdev->chandef.chan->center_freq : 0; + u32 freq = wdev->chandef.chan ? wdev->chandef.chan->center_freq : 0; seq_printf(s, "Freq = %d\n", freq); @@ -1695,6 +1672,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) char *status = "unknown"; u8 aid = 0; u8 mid; + bool sta_connected = false; switch (p->status) { case wil_sta_unused: @@ -1709,8 +1687,20 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) break; } mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX; - seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i, p->addr, status, - mid, aid); + if (mid < wil->max_vifs) { + struct wil6210_vif *vif = wil->vifs[mid]; + + if (vif->wdev.iftype == NL80211_IFTYPE_STATION && + p->status == wil_sta_connected) + sta_connected = true; + } + /* print roam counter only for connected stations */ + if (sta_connected) + seq_printf(s, "[%d] %pM connected (roam counter %d) MID %d AID %d\n", + i, p->addr, p->stats.ft_roams, mid, aid); + else + seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i, + p->addr, status, mid, aid); if (p->status == wil_sta_connected) { spin_lock_bh(&p->tid_rx_lock); @@ -2451,7 +2441,6 @@ static const struct { {"desc", 0444, &fops_txdesc}, {"bf", 0444, &fops_bf}, {"mem_val", 0644, &fops_memread}, - {"reset", 0244, &fops_reset}, {"rxon", 0244, &fops_rxon}, {"tx_mgmt", 0244, &fops_txmgmt}, {"wmi_send", 0244, &fops_wmi}, diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 7debed6bec06..398900a1c29e 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -223,6 +223,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) struct net_device *ndev = vif_to_ndev(vif); struct wireless_dev *wdev = vif_to_wdev(vif); struct wil_sta_info *sta = &wil->sta[cid]; + int min_ring_id = wil_get_min_tx_ring_id(wil); might_sleep(); wil_dbg_misc(wil, "disconnect_cid: CID %d, MID %d, status %d\n", @@ -273,7 +274,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx)); memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx)); /* release vrings */ - for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) { + for (i = min_ring_id; i < ARRAY_SIZE(wil->ring_tx); i++) { if (wil->ring2cid_tid[i][0] == cid) wil_ring_fini_tx(wil, i); } @@ -360,6 +361,8 @@ static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid, vif->bss = NULL; } clear_bit(wil_vif_fwconnecting, vif->status); + clear_bit(wil_vif_ft_roam, vif->status); + break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: @@ -604,8 +607,10 @@ int wil_priv_init(struct wil6210_priv *wil) wil->sta[i].mid = U8_MAX; } - for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) + for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { spin_lock_init(&wil->ring_tx_data[i].lock); + wil->ring2cid_tid[i][0] = WIL6210_MAX_CID; + } mutex_init(&wil->mutex); mutex_init(&wil->vif_mutex); @@ -653,8 +658,6 @@ int wil_priv_init(struct wil6210_priv *wil) /* edma configuration can be updated via debugfs before allocation */ wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS; - wil->use_compressed_rx_status = true; - wil->use_rx_hw_reordering = true; wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT; /* Rx status ring size should be bigger than the number of RX buffers @@ -1154,6 +1157,8 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil) wil->max_agg_wsize = WIL_MAX_AGG_WSIZE; wil->max_ampdu_size = WIL_MAX_AMPDU_SIZE; } + + update_supported_bands(wil); } void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 89119e7facd0..c8c6613371d1 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -108,6 +108,7 @@ int wil_set_capabilities(struct wil6210_priv *wil) set_bit(hw_capa_no_flash, wil->hw_capa); wil->use_enhanced_dma_hw = true; wil->use_rx_hw_reordering = true; + wil->use_compressed_rx_status = true; wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN : WIL_FW_NAME_TALYN; if (wil_fw_verify_file_exists(wil, wil_fw_name)) diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index 3a4194779ddf..75fe9323547c 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -190,7 +190,7 @@ out: static int wil_suspend_keep_radio_on(struct wil6210_priv *wil) { int rc = 0; - unsigned long start, data_comp_to; + unsigned long data_comp_to; wil_dbg_pm(wil, "suspend keep radio on\n"); @@ -232,7 +232,6 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil) } /* Wait for completion of the pending RX packets */ - start = jiffies; data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS); if (test_bit(wil_status_napi_en, wil->status)) { while (!wil->txrx_ops.is_rx_idle(wil)) { diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index b608aa16b4f1..983bd001b53b 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -382,11 +382,13 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) } /* apply */ - r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn); - spin_lock_bh(&sta->tid_rx_lock); - wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]); - sta->tid_rx[tid] = r; - spin_unlock_bh(&sta->tid_rx_lock); + if (!wil->use_rx_hw_reordering) { + r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn); + spin_lock_bh(&sta->tid_rx_lock); + wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]); + sta->tid_rx[tid] = r; + spin_unlock_bh(&sta->tid_rx_lock); + } out: return rc; diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 6a7943e487fb..cc5f263cc965 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -77,8 +77,9 @@ bool wil_is_tx_idle(struct wil6210_priv *wil) { int i; unsigned long data_comp_to; + int min_ring_id = wil_get_min_tx_ring_id(wil); - for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { + for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) { struct wil_ring *vring = &wil->ring_tx[i]; int vring_index = vring - wil->ring_tx; struct wil_ring_tx_data *txdata = @@ -765,7 +766,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) return; } - if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) { + if (wdev->iftype == NL80211_IFTYPE_STATION) { + if (mcast && ether_addr_equal(eth->h_source, ndev->dev_addr)) { + /* mcast packet looped back to us */ + rc = GRO_DROP; + dev_kfree_skb(skb); + goto stats; + } + } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) { if (mcast) { /* send multicast frames both to higher layers in * local net stack and back to the wireless medium @@ -1051,6 +1059,88 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, return rc; } +static int wil_tx_vring_modify(struct wil6210_vif *vif, int ring_id, int cid, + int tid) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + int rc; + struct wmi_vring_cfg_cmd cmd = { + .action = cpu_to_le32(WMI_VRING_CMD_MODIFY), + .vring_cfg = { + .tx_sw_ring = { + .max_mpdu_size = + cpu_to_le16(wil_mtu2macbuf(mtu_max)), + .ring_size = 0, + }, + .ringid = ring_id, + .cidxtid = mk_cidxtid(cid, tid), + .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, + .mac_ctrl = 0, + .to_resolution = 0, + .agg_max_wsize = 0, + .schd_params = { + .priority = cpu_to_le16(0), + .timeslot_us = cpu_to_le16(0xfff), + }, + }, + }; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_vring_cfg_done_event cmd; + } __packed reply = { + .cmd = {.status = WMI_FW_STATUS_FAILURE}, + }; + struct wil_ring *vring = &wil->ring_tx[ring_id]; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id]; + + wil_dbg_misc(wil, "vring_modify: ring %d cid %d tid %d\n", ring_id, + cid, tid); + lockdep_assert_held(&wil->mutex); + + if (!vring->va) { + wil_err(wil, "Tx ring [%d] not allocated\n", ring_id); + return -EINVAL; + } + + if (wil->ring2cid_tid[ring_id][0] != cid || + wil->ring2cid_tid[ring_id][1] != tid) { + wil_err(wil, "ring info does not match cid=%u tid=%u\n", + wil->ring2cid_tid[ring_id][0], + wil->ring2cid_tid[ring_id][1]); + } + + cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); + + rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd), + WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); + if (rc) + goto fail; + + if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "Tx modify failed, status 0x%02x\n", + reply.cmd.status); + rc = -EINVAL; + goto fail; + } + + /* set BA aggregation window size to 0 to force a new BA with the + * new AP + */ + txdata->agg_wsize = 0; + if (txdata->dot1x_open && agg_wsize >= 0) + wil_addba_tx_request(wil, ring_id, agg_wsize); + + return 0; +fail: + spin_lock_bh(&txdata->lock); + txdata->dot1x_open = false; + txdata->enabled = 0; + spin_unlock_bh(&txdata->lock); + wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; + wil->ring2cid_tid[ring_id][1] = 0; + return rc; +} + int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size) { struct wil6210_priv *wil = vif_to_wil(vif); @@ -1935,6 +2025,7 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil, bool check_stop) { int i; + int min_ring_id = wil_get_min_tx_ring_id(wil); if (unlikely(!vif)) return; @@ -1967,7 +2058,7 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil, return; /* check wake */ - for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { + for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) { struct wil_ring *cur_ring = &wil->ring_tx[i]; struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i]; @@ -2272,6 +2363,7 @@ void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil) wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast; wil->txrx_ops.tx_init = wil_tx_init; wil->txrx_ops.tx_fini = wil_tx_fini; + wil->txrx_ops.tx_ring_modify = wil_tx_vring_modify; /* RX ops */ wil->txrx_ops.rx_init = wil_rx_init; wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp; diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index bca61cb44c37..2bbae75b9a84 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -279,9 +279,6 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil, u16 buff_id; *d = *_d; - pa = wil_rx_desc_get_addr_edma(&d->dma); - dmalen = le16_to_cpu(d->dma.length); - dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); /* Extract the SKB from the rx_buff management array */ buff_id = __le16_to_cpu(d->mac.buff_id); @@ -291,10 +288,15 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil, } skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb; wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL; - if (unlikely(!skb)) + if (unlikely(!skb)) { wil_err(wil, "No Rx skb at buff_id %d\n", buff_id); - else + } else { + pa = wil_rx_desc_get_addr_edma(&d->dma); + dmalen = le16_to_cpu(d->dma.length); + dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); + kfree_skb(skb); + } /* Move the buffer from the active to the free list */ list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list, @@ -745,6 +747,16 @@ static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id, return rc; } +static int wil_tx_ring_modify_edma(struct wil6210_vif *vif, int ring_id, + int cid, int tid) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + + wil_err(wil, "ring modify is not supported for EDMA\n"); + + return -EOPNOTSUPP; +} + /* This function is used only for RX SW reorder */ static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid, struct sk_buff *skb, struct wil_net_stats *stats) @@ -906,6 +918,9 @@ again: wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL; if (!skb) { wil_err(wil, "No Rx skb at buff_id %d\n", buff_id); + /* Move the buffer from the active list to the free list */ + list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list, + &wil->rx_buff_mgmt.free); goto again; } @@ -1595,6 +1610,7 @@ void wil_init_txrx_ops_edma(struct wil6210_priv *wil) wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma; wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma; wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma; + wil->txrx_ops.tx_ring_modify = wil_tx_ring_modify_edma; /* RX ops */ wil->txrx_ops.rx_init = wil_rx_init_edma; wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 17c294b1ead1..abb82018d3b4 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -449,6 +449,15 @@ static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid) *tid = (cidxtid >> 4) & 0xf; } +/** + * wil_cid_valid - check cid is valid + * @cid: CID value + */ +static inline bool wil_cid_valid(u8 cid) +{ + return cid < WIL6210_MAX_CID; +} + struct wil6210_mbox_ring { u32 base; u16 entry_size; /* max. size of mbox entry, incl. all headers */ @@ -577,6 +586,7 @@ struct wil_net_stats { unsigned long rx_csum_err; u16 last_mcs_rx; u64 rx_per_mcs[WIL_MCS_MAX + 1]; + u32 ft_roams; /* relevant in STA mode */ }; /** @@ -599,6 +609,8 @@ struct wil_txrx_ops { struct wil_ctx *ctx); int (*tx_ring_tso)(struct wil6210_priv *wil, struct wil6210_vif *vif, struct wil_ring *ring, struct sk_buff *skb); + int (*tx_ring_modify)(struct wil6210_vif *vif, int ring_id, + int cid, int tid); irqreturn_t (*irq_tx)(int irq, void *cookie); /* RX ops */ int (*rx_init)(struct wil6210_priv *wil, u16 ring_size); @@ -821,6 +833,7 @@ extern u8 led_polarity; enum wil6210_vif_status { wil_vif_fwconnecting, wil_vif_fwconnected, + wil_vif_ft_roam, wil_vif_status_last /* keep last */ }; @@ -1204,6 +1217,7 @@ int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index, int wmi_echo(struct wil6210_priv *wil); int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie); int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring); +int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie); int wmi_rxon(struct wil6210_priv *wil, bool on); int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r); int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, @@ -1319,6 +1333,9 @@ void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil); void wil_rx_handle(struct wil6210_priv *wil, int *quota); void wil6210_unmask_irq_rx(struct wil6210_priv *wil); void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil); +void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage, + struct wil_sta_info *cs, + struct key_params *params); int wil_iftype_nl2wmi(enum nl80211_iftype type); @@ -1370,4 +1387,6 @@ int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, u8 token, u16 status, bool amsdu, u16 agg_wsize, u16 timeout); +void update_supported_bands(struct wil6210_priv *wil); + #endif /* __WIL6210_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 42c02a20ec97..4859f0e43658 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -227,6 +227,14 @@ struct blink_on_off_time led_blink_time[] = { {WIL_LED_BLINK_ON_FAST_MS, WIL_LED_BLINK_OFF_FAST_MS}, }; +struct auth_no_hdr { + __le16 auth_alg; + __le16 auth_transaction; + __le16 status_code; + /* possibly followed by Challenge text */ + u8 variable[0]; +} __packed; + u8 led_polarity = LED_POLARITY_LOW_ACTIVE; /** @@ -468,6 +476,12 @@ static const char *cmdid2name(u16 cmdid) return "WMI_LINK_STATS_CMD"; case WMI_SW_TX_REQ_EXT_CMDID: return "WMI_SW_TX_REQ_EXT_CMDID"; + case WMI_FT_AUTH_CMDID: + return "WMI_FT_AUTH_CMD"; + case WMI_FT_REASSOC_CMDID: + return "WMI_FT_REASSOC_CMD"; + case WMI_UPDATE_FT_IES_CMDID: + return "WMI_UPDATE_FT_IES_CMD"; default: return "Untracked CMD"; } @@ -606,6 +620,12 @@ static const char *eventid2name(u16 eventid) return "WMI_LINK_STATS_CONFIG_DONE_EVENT"; case WMI_LINK_STATS_EVENTID: return "WMI_LINK_STATS_EVENT"; + case WMI_COMMAND_NOT_SUPPORTED_EVENTID: + return "WMI_COMMAND_NOT_SUPPORTED_EVENT"; + case WMI_FT_AUTH_STATUS_EVENTID: + return "WMI_FT_AUTH_STATUS_EVENT"; + case WMI_FT_REASSOC_STATUS_EVENTID: + return "WMI_FT_REASSOC_STATUS_EVENT"; default: return "Untracked EVENT"; } @@ -1156,6 +1176,9 @@ static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len) struct wmi_ring_en_event *evt = d; u8 vri = evt->ring_index; struct wireless_dev *wdev = vif_to_wdev(vif); + struct wil_sta_info *sta; + u8 cid; + struct key_params params; wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid); @@ -1164,13 +1187,33 @@ static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len) return; } - if (wdev->iftype != NL80211_IFTYPE_AP || !disable_ap_sme) - /* in AP mode with disable_ap_sme, this is done by - * wil_cfg80211_change_station() + if (wdev->iftype != NL80211_IFTYPE_AP || !disable_ap_sme || + test_bit(wil_vif_ft_roam, vif->status)) + /* in AP mode with disable_ap_sme that is not FT, + * this is done by wil_cfg80211_change_station() */ wil->ring_tx_data[vri].dot1x_open = true; if (vri == vif->bcast_ring) /* no BA for bcast */ return; + + cid = wil->ring2cid_tid[vri][0]; + if (!wil_cid_valid(cid)) { + wil_err(wil, "invalid cid %d for vring %d\n", cid, vri); + return; + } + + /* In FT mode we get key but not store it as it is received + * before WMI_CONNECT_EVENT received from FW. + * wil_set_crypto_rx is called here to reset the security PN + */ + sta = &wil->sta[cid]; + if (test_bit(wil_vif_ft_roam, vif->status)) { + memset(¶ms, 0, sizeof(params)); + wil_set_crypto_rx(0, WMI_KEY_USE_PAIRWISE, sta, ¶ms); + if (wdev->iftype != NL80211_IFTYPE_AP) + clear_bit(wil_vif_ft_roam, vif->status); + } + if (agg_wsize >= 0) wil_addba_tx_request(wil, vri, agg_wsize); } @@ -1462,6 +1505,271 @@ wmi_evt_link_stats(struct wil6210_vif *vif, int id, void *d, int len) } /** + * find cid and ringid for the station vif + * + * return error, if other interfaces are used or ring was not found + */ +static int wil_find_cid_ringid_sta(struct wil6210_priv *wil, + struct wil6210_vif *vif, + int *cid, + int *ringid) +{ + struct wil_ring *ring; + struct wil_ring_tx_data *txdata; + int min_ring_id = wil_get_min_tx_ring_id(wil); + int i; + u8 lcid; + + if (!(vif->wdev.iftype == NL80211_IFTYPE_STATION || + vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) { + wil_err(wil, "invalid interface type %d\n", vif->wdev.iftype); + return -EINVAL; + } + + /* In the STA mode, it is expected to have only one ring + * for the AP we are connected to. + * find it and return the cid associated with it. + */ + for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) { + ring = &wil->ring_tx[i]; + txdata = &wil->ring_tx_data[i]; + if (!ring->va || !txdata->enabled || txdata->mid != vif->mid) + continue; + + lcid = wil->ring2cid_tid[i][0]; + if (lcid >= WIL6210_MAX_CID) /* skip BCAST */ + continue; + + wil_dbg_wmi(wil, "find sta -> ringid %d cid %d\n", i, lcid); + *cid = lcid; + *ringid = i; + return 0; + } + + wil_dbg_wmi(wil, "find sta cid while no rings active?\n"); + + return -ENOENT; +} + +static void +wmi_evt_auth_status(struct wil6210_vif *vif, int id, void *d, int len) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + struct net_device *ndev = vif_to_ndev(vif); + struct wmi_ft_auth_status_event *data = d; + int ie_len = len - offsetof(struct wmi_ft_auth_status_event, ie_info); + int rc, cid = 0, ringid = 0; + struct cfg80211_ft_event_params ft; + u16 d_len; + /* auth_alg(u16) + auth_transaction(u16) + status_code(u16) */ + const size_t auth_ie_offset = sizeof(u16) * 3; + struct auth_no_hdr *auth = (struct auth_no_hdr *)data->ie_info; + + /* check the status */ + if (ie_len >= 0 && data->status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "FT: auth failed. status %d\n", data->status); + goto fail; + } + + if (ie_len < auth_ie_offset) { + wil_err(wil, "FT: auth event too short, len %d\n", len); + goto fail; + } + + d_len = le16_to_cpu(data->ie_len); + if (d_len != ie_len) { + wil_err(wil, + "FT: auth ie length mismatch, d_len %d should be %d\n", + d_len, ie_len); + goto fail; + } + + if (!test_bit(wil_vif_ft_roam, wil->status)) { + wil_err(wil, "FT: Not in roaming state\n"); + goto fail; + } + + if (le16_to_cpu(auth->auth_transaction) != 2) { + wil_err(wil, "FT: auth error. auth_transaction %d\n", + le16_to_cpu(auth->auth_transaction)); + goto fail; + } + + if (le16_to_cpu(auth->auth_alg) != WLAN_AUTH_FT) { + wil_err(wil, "FT: auth error. auth_alg %d\n", + le16_to_cpu(auth->auth_alg)); + goto fail; + } + + wil_dbg_wmi(wil, "FT: Auth to %pM successfully\n", data->mac_addr); + wil_hex_dump_wmi("FT Auth ies : ", DUMP_PREFIX_OFFSET, 16, 1, + data->ie_info, d_len, true); + + /* find cid and ringid */ + rc = wil_find_cid_ringid_sta(wil, vif, &cid, &ringid); + if (rc) { + wil_err(wil, "No valid cid found\n"); + goto fail; + } + + if (vif->privacy) { + /* For secure assoc, remove old keys */ + rc = wmi_del_cipher_key(vif, 0, wil->sta[cid].addr, + WMI_KEY_USE_PAIRWISE); + if (rc) { + wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n"); + goto fail; + } + rc = wmi_del_cipher_key(vif, 0, wil->sta[cid].addr, + WMI_KEY_USE_RX_GROUP); + if (rc) { + wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n"); + goto fail; + } + } + + memset(&ft, 0, sizeof(ft)); + ft.ies = data->ie_info + auth_ie_offset; + ft.ies_len = d_len - auth_ie_offset; + ft.target_ap = data->mac_addr; + cfg80211_ft_event(ndev, &ft); + + return; + +fail: + wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID, false); +} + +static void +wmi_evt_reassoc_status(struct wil6210_vif *vif, int id, void *d, int len) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + struct net_device *ndev = vif_to_ndev(vif); + struct wiphy *wiphy = wil_to_wiphy(wil); + struct wmi_ft_reassoc_status_event *data = d; + int ies_len = len - offsetof(struct wmi_ft_reassoc_status_event, + ie_info); + int rc = -ENOENT, cid = 0, ringid = 0; + int ch; /* channel number (primary) */ + size_t assoc_req_ie_len = 0, assoc_resp_ie_len = 0; + u8 *assoc_req_ie = NULL, *assoc_resp_ie = NULL; + /* capinfo(u16) + listen_interval(u16) + current_ap mac addr + IEs */ + const size_t assoc_req_ie_offset = sizeof(u16) * 2 + ETH_ALEN; + /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */ + const size_t assoc_resp_ie_offset = sizeof(u16) * 3; + u16 d_len; + int freq; + struct cfg80211_roam_info info; + + if (ies_len < 0) { + wil_err(wil, "ft reassoc event too short, len %d\n", len); + goto fail; + } + + wil_dbg_wmi(wil, "Reasoc Status event: status=%d, aid=%d", + data->status, data->aid); + wil_dbg_wmi(wil, " mac_addr=%pM, beacon_ie_len=%d", + data->mac_addr, data->beacon_ie_len); + wil_dbg_wmi(wil, " reassoc_req_ie_len=%d, reassoc_resp_ie_len=%d", + le16_to_cpu(data->reassoc_req_ie_len), + le16_to_cpu(data->reassoc_resp_ie_len)); + + d_len = le16_to_cpu(data->beacon_ie_len) + + le16_to_cpu(data->reassoc_req_ie_len) + + le16_to_cpu(data->reassoc_resp_ie_len); + if (d_len != ies_len) { + wil_err(wil, + "ft reassoc ie length mismatch, d_len %d should be %d\n", + d_len, ies_len); + goto fail; + } + + /* check the status */ + if (data->status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "ft reassoc failed. status %d\n", data->status); + goto fail; + } + + /* find cid and ringid */ + rc = wil_find_cid_ringid_sta(wil, vif, &cid, &ringid); + if (rc) { + wil_err(wil, "No valid cid found\n"); + goto fail; + } + + ch = data->channel + 1; + wil_info(wil, "FT: Roam %pM channel [%d] cid %d aid %d\n", + data->mac_addr, ch, cid, data->aid); + + wil_hex_dump_wmi("reassoc AI : ", DUMP_PREFIX_OFFSET, 16, 1, + data->ie_info, len - sizeof(*data), true); + + /* figure out IE's */ + if (le16_to_cpu(data->reassoc_req_ie_len) > assoc_req_ie_offset) { + assoc_req_ie = &data->ie_info[assoc_req_ie_offset]; + assoc_req_ie_len = le16_to_cpu(data->reassoc_req_ie_len) - + assoc_req_ie_offset; + } + if (le16_to_cpu(data->reassoc_resp_ie_len) <= assoc_resp_ie_offset) { + wil_err(wil, "FT: reassoc resp ie len is too short, len %d\n", + le16_to_cpu(data->reassoc_resp_ie_len)); + goto fail; + } + + assoc_resp_ie = &data->ie_info[le16_to_cpu(data->reassoc_req_ie_len) + + assoc_resp_ie_offset]; + assoc_resp_ie_len = le16_to_cpu(data->reassoc_resp_ie_len) - + assoc_resp_ie_offset; + + if (test_bit(wil_status_resetting, wil->status) || + !test_bit(wil_status_fwready, wil->status)) { + wil_err(wil, "FT: status_resetting, cancel reassoc event\n"); + /* no need for cleanup, wil_reset will do that */ + return; + } + + mutex_lock(&wil->mutex); + + /* ring modify to set the ring for the roamed AP settings */ + wil_dbg_wmi(wil, + "ft modify tx config for connection CID %d ring %d\n", + cid, ringid); + + rc = wil->txrx_ops.tx_ring_modify(vif, ringid, cid, 0); + if (rc) { + wil_err(wil, "modify TX for CID %d MID %d ring %d failed (%d)\n", + cid, vif->mid, ringid, rc); + mutex_unlock(&wil->mutex); + goto fail; + } + + /* Update the driver STA members with the new bss */ + wil->sta[cid].aid = data->aid; + wil->sta[cid].stats.ft_roams++; + ether_addr_copy(wil->sta[cid].addr, vif->bss->bssid); + mutex_unlock(&wil->mutex); + del_timer_sync(&vif->connect_timer); + + cfg80211_ref_bss(wiphy, vif->bss); + freq = ieee80211_channel_to_frequency(ch, NL80211_BAND_60GHZ); + + memset(&info, 0, sizeof(info)); + info.channel = ieee80211_get_channel(wiphy, freq); + info.bss = vif->bss; + info.req_ie = assoc_req_ie; + info.req_ie_len = assoc_req_ie_len; + info.resp_ie = assoc_resp_ie; + info.resp_ie_len = assoc_resp_ie_len; + cfg80211_roamed(ndev, &info, GFP_KERNEL); + vif->bss = NULL; + + return; + +fail: + wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID, false); +} + +/** * Some events are ignored for purpose; and need not be interpreted as * "unhandled events" */ @@ -1492,6 +1800,8 @@ static const struct { {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore}, {WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result}, {WMI_LINK_STATS_EVENTID, wmi_evt_link_stats}, + {WMI_FT_AUTH_STATUS_EVENTID, wmi_evt_auth_status}, + {WMI_FT_REASSOC_STATUS_EVENTID, wmi_evt_reassoc_status}, }; /* @@ -2086,6 +2396,40 @@ out: return rc; } +int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + u16 len; + struct wmi_update_ft_ies_cmd *cmd; + int rc; + + if (!ie) + ie_len = 0; + + len = sizeof(struct wmi_update_ft_ies_cmd) + ie_len; + if (len < ie_len) { + wil_err(wil, "wraparound. ie len %d\n", ie_len); + return -EINVAL; + } + + cmd = kzalloc(len, GFP_KERNEL); + if (!cmd) { + rc = -ENOMEM; + goto out; + } + + cmd->ie_len = cpu_to_le16(ie_len); + memcpy(cmd->ie_info, ie, ie_len); + rc = wmi_send(wil, WMI_UPDATE_FT_IES_CMDID, vif->mid, cmd, len); + kfree(cmd); + +out: + if (rc) + wil_err(wil, "update ft ies failed : %d\n", rc); + + return rc; +} + /** * wmi_rxon - turn radio on/off * @on: turn on if true, off otherwise diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 139acb2caf92..b668758da994 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -103,6 +103,7 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_AMSDU = 23, WMI_FW_CAPABILITY_RAW_MODE = 24, WMI_FW_CAPABILITY_TX_REQ_EXT = 25, + WMI_FW_CAPABILITY_CHANNEL_4 = 26, WMI_FW_CAPABILITY_MAX, }; @@ -2369,6 +2370,7 @@ struct wmi_ft_reassoc_status_event { __le16 beacon_ie_len; __le16 reassoc_req_ie_len; __le16 reassoc_resp_ie_len; + u8 reserved[4]; u8 ie_info[0]; } __packed; diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h index b77d1a904f7e..9fc7c088a539 100644 --- a/drivers/net/wireless/broadcom/b43/b43.h +++ b/drivers/net/wireless/broadcom/b43/b43.h @@ -909,7 +909,7 @@ struct b43_wl { /* Set this if we call ieee80211_register_hw() and check if we call * ieee80211_unregister_hw(). */ - bool hw_registred; + bool hw_registered; /* We can only have one operating interface (802.11 core) * at a time. General information about this interface follows. diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c index d46d57b989ae..dfc4c34298d4 100644 --- a/drivers/net/wireless/broadcom/b43/dma.c +++ b/drivers/net/wireless/broadcom/b43/dma.c @@ -1432,7 +1432,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) goto out; } - if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) { + if (WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME)) { /* If we get here, we have a real error with the queue * full, but queues not stopped. */ b43err(dev->wl, "DMA queue overflow\n"); diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index b37e7391f55d..74be3c809225 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -2611,7 +2611,7 @@ start_ieee80211: err = ieee80211_register_hw(wl->hw); if (err) goto err_one_core_detach; - wl->hw_registred = true; + wl->hw_registered = true; b43_leds_register(wl->current_dev); /* Register HW RNG driver */ @@ -5493,13 +5493,11 @@ err_powerdown: static void b43_one_core_detach(struct b43_bus_dev *dev) { struct b43_wldev *wldev; - struct b43_wl *wl; /* Do not cancel ieee80211-workqueue based work here. * See comment in b43_remove(). */ wldev = b43_bus_get_wldev(dev); - wl = wldev->wl; b43_debugfs_remove_device(wldev); b43_wireless_core_detach(wldev); list_del(&wldev->list); @@ -5610,7 +5608,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); - wl->hw_registred = false; + wl->hw_registered = false; hw->max_rates = 2; SET_IEEE80211_DEV(hw, dev->dev); if (is_valid_ether_addr(sprom->et1mac)) @@ -5693,7 +5691,7 @@ static void b43_bcma_remove(struct bcma_device *core) B43_WARN_ON(!wl); if (!wldev->fw.ucode.data) return; /* NULL if firmware never loaded */ - if (wl->current_dev == wldev && wl->hw_registred) { + if (wl->current_dev == wldev && wl->hw_registered) { b43_leds_stop(wldev); ieee80211_unregister_hw(wl->hw); } @@ -5776,7 +5774,7 @@ static void b43_ssb_remove(struct ssb_device *sdev) B43_WARN_ON(!wl); if (!wldev->fw.ucode.data) return; /* NULL if firmware never loaded */ - if (wl->current_dev == wldev && wl->hw_registred) { + if (wl->current_dev == wldev && wl->hw_registered) { b43_leds_stop(wldev); ieee80211_unregister_hw(wl->hw); } diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c index 2f0c64cef65f..1b1da7d83652 100644 --- a/drivers/net/wireless/broadcom/b43legacy/dma.c +++ b/drivers/net/wireless/broadcom/b43legacy/dma.c @@ -1149,7 +1149,7 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev, return -ENOSPC; } - if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) { + if (WARN_ON(free_slots(ring) < SLOTS_PER_PACKET)) { /* If we get here, we have a real error with the queue * full, but queues not stopped. */ b43legacyerr(dev->wl, "DMA queue overflow\n"); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index d2f788d88668..3e37c8cf82c6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -576,7 +576,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, if (pktq->qlen == 1) err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, - pktq->next); + __skb_peek(pktq)); else if (!sdiodev->sg_support) { glom_skb = brcmu_pkt_buf_get_skb(totlen); if (!glom_skb) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 5444e6213d45..230a378c26fc 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -1649,6 +1649,14 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) case WLAN_AKM_SUITE_PSK: val = WPA2_AUTH_PSK; break; + case WLAN_AKM_SUITE_FT_8021X: + val = WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT; + if (sme->want_1x) + profile->use_fwsup = BRCMF_PROFILE_FWSUP_1X; + break; + case WLAN_AKM_SUITE_FT_PSK: + val = WPA2_AUTH_PSK | WPA2_AUTH_FT; + break; default: brcmf_err("invalid cipher group (%d)\n", sme->crypto.cipher_group); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index cd3651069d0c..94044a7a6021 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -296,9 +296,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) /* Replace all newline/linefeed characters with space * character */ - ptr = clmver; - while ((ptr = strnchr(ptr, '\n', sizeof(buf))) != NULL) - *ptr = ' '; + strreplace(clmver, '\n', ' '); brcmf_dbg(INFO, "CLM version = %s\n", clmver); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 8347da632a5b..4c5a3995dc35 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -178,7 +178,7 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp, ifp->fwil_fwerr = false; } -#define MAX_CAPS_BUFFER_SIZE 512 +#define MAX_CAPS_BUFFER_SIZE 768 static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp) { char caps[MAX_CAPS_BUFFER_SIZE]; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 3e9c4f2f5dd1..456a1bf008b3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -74,7 +74,7 @@ #define P2P_AF_MAX_WAIT_TIME msecs_to_jiffies(2000) #define P2P_INVALID_CHANNEL -1 #define P2P_CHANNEL_SYNC_RETRY 5 -#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(1500) +#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(450) #define P2P_DEFAULT_SLEEP_TIME_VSDB 200 /* WiFi P2P Public Action Frame OUI Subtypes */ @@ -1134,7 +1134,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) { struct afx_hdl *afx_hdl = &p2p->afx_hdl; struct brcmf_cfg80211_vif *pri_vif; - unsigned long duration; s32 retry; brcmf_dbg(TRACE, "Enter\n"); @@ -1150,7 +1149,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) * pending action frame tx is cancelled. */ retry = 0; - duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT); while ((retry < P2P_CHANNEL_SYNC_RETRY) && (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) { afx_hdl->is_listen = false; @@ -1158,7 +1156,8 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) retry); /* search peer on peer's listen channel */ schedule_work(&afx_hdl->afx_work); - wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration); + wait_for_completion_timeout(&afx_hdl->act_frm_scan, + P2P_AF_FRM_SCAN_MAX_WAIT); if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) || (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status))) @@ -1171,7 +1170,7 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) afx_hdl->is_listen = true; schedule_work(&afx_hdl->afx_work); wait_for_completion_timeout(&afx_hdl->act_frm_scan, - duration); + P2P_AF_FRM_SCAN_MAX_WAIT); } if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) || (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, @@ -1458,10 +1457,12 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp, return 0; if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) { - if (e->status == BRCMF_E_STATUS_SUCCESS) + if (e->status == BRCMF_E_STATUS_SUCCESS) { set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status); - else { + if (!p2p->wait_for_offchan_complete) + complete(&p2p->send_af_done); + } else { set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status); /* If there is no ack, we don't need to wait for * WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event @@ -1512,6 +1513,17 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p, p2p->af_sent_channel = le32_to_cpu(af_params->channel); p2p->af_tx_sent_jiffies = jiffies; + if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status) && + p2p->af_sent_channel == + ieee80211_frequency_to_channel(p2p->remain_on_channel.center_freq)) + p2p->wait_for_offchan_complete = false; + else + p2p->wait_for_offchan_complete = true; + + brcmf_dbg(TRACE, "Waiting for %s tx completion event\n", + (p2p->wait_for_offchan_complete) ? + "off-channel" : "on-channel"); + timeout = wait_for_completion_timeout(&p2p->send_af_done, P2P_AF_MAX_WAIT_TIME); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h index 0e8b34d2d85c..39f0d0218088 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h @@ -124,6 +124,7 @@ struct afx_hdl { * @gon_req_action: about to send go negotiation requets frame. * @block_gon_req_tx: drop tx go negotiation requets frame. * @p2pdev_dynamically: is p2p device if created by module param or supplicant. + * @wait_for_offchan_complete: wait for off-channel tx completion event. */ struct brcmf_p2p_info { struct brcmf_cfg80211_info *cfg; @@ -144,6 +145,7 @@ struct brcmf_p2p_info { bool gon_req_action; bool block_gon_req_tx; bool p2pdev_dynamically; + bool wait_for_offchan_complete; }; s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 4fffa6988087..5dea569d63ed 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -2017,6 +2017,7 @@ static const struct dev_pm_ops brcmf_pciedrvr_pm = { static const struct pci_device_id brcmf_pcie_devid_table[] = { BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID), + BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355), BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID), diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index a907d7b065fa..b2e1ab5adb64 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -1463,7 +1463,7 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq) struct sk_buff *pfirst, *pnext; int errcode; - u8 doff, sfdoff; + u8 doff; struct brcmf_sdio_hdrinfo rd_new; @@ -1597,7 +1597,6 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq) /* Remove superframe header, remember offset */ skb_pull(pfirst, rd_new.dat_offset); - sfdoff = rd_new.dat_offset; num = 0; /* Validate all the subframe headers */ @@ -2189,7 +2188,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq, * length of the chain (including padding) */ if (bus->txglom) - brcmf_sdio_update_hwhdr(pktq->next->data, total_len); + brcmf_sdio_update_hwhdr(__skb_peek(pktq)->data, total_len); return 0; } @@ -3405,7 +3404,6 @@ static int brcmf_sdio_bus_preinit(struct device *dev) struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; struct brcmf_sdio *bus = sdiodev->bus; struct brcmf_core *core = bus->sdio_core; - uint pad_size; u32 value; int err; @@ -3448,7 +3446,6 @@ static int brcmf_sdio_bus_preinit(struct device *dev) if (sdiodev->sg_support) { bus->txglom = false; value = 1; - pad_size = bus->sdiodev->func2->cur_blksize << 1; err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom", &value, sizeof(u32)); if (err < 0) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c index 2fe1f6863278..3bd54f125776 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c @@ -62,8 +62,7 @@ int brcms_debugfs_attach(struct brcms_pub *drvr) void brcms_debugfs_detach(struct brcms_pub *drvr) { - if (!IS_ERR_OR_NULL(drvr->dbgfs_dir)) - debugfs_remove_recursive(drvr->dbgfs_dir); + debugfs_remove_recursive(drvr->dbgfs_dir); } struct dentry *brcms_debugfs_get_devdir(struct brcms_pub *drvr) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index ecc89e718b9c..6255fb6d97a7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -1578,10 +1578,10 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx) if (le32_to_cpu(hdr->idx) == idx) { pdata = wl->fw.fw_bin[i]->data + le32_to_cpu(hdr->offset); - *pbuf = kmemdup(pdata, len, GFP_KERNEL); + *pbuf = kvmalloc(len, GFP_KERNEL); if (*pbuf == NULL) goto fail; - + memcpy(*pbuf, pdata, len); return 0; } } @@ -1629,7 +1629,7 @@ int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx) */ void brcms_ucode_free_buf(void *p) { - kfree(p); + kvfree(p); } /* diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c index bedec1606caa..a57f2711f3c0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c @@ -25453,12 +25453,12 @@ void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype) (pi->cal_type_override == PHY_PERICAL_FULL) ? true : false; - if ((pi->mphase_cal_phase_id > MPHASE_CAL_STATE_INIT)) { + if (pi->mphase_cal_phase_id > MPHASE_CAL_STATE_INIT) { if (pi->nphy_txiqlocal_chanspec != pi->radio_chanspec) wlc_phy_cal_perical_mphase_restart(pi); } - if ((pi->mphase_cal_phase_id == MPHASE_CAL_STATE_RXCAL)) + if (pi->mphase_cal_phase_id == MPHASE_CAL_STATE_RXCAL) wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000); wlapi_suspend_mac_and_wait(pi->sh->physhim); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c index d8b79cb72b58..e7584b842dce 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c @@ -77,6 +77,8 @@ static u16 d11ac_bw(enum brcmu_chan_bw bw) return BRCMU_CHSPEC_D11AC_BW_40; case BRCMU_CHAN_BW_80: return BRCMU_CHSPEC_D11AC_BW_80; + case BRCMU_CHAN_BW_160: + return BRCMU_CHSPEC_D11AC_BW_160; default: WARN_ON(1); } @@ -190,8 +192,38 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) break; } break; - case BRCMU_CHSPEC_D11AC_BW_8080: case BRCMU_CHSPEC_D11AC_BW_160: + switch (ch->sb) { + case BRCMU_CHAN_SB_LLL: + ch->control_ch_num -= CH_70MHZ_APART; + break; + case BRCMU_CHAN_SB_LLU: + ch->control_ch_num -= CH_50MHZ_APART; + break; + case BRCMU_CHAN_SB_LUL: + ch->control_ch_num -= CH_30MHZ_APART; + break; + case BRCMU_CHAN_SB_LUU: + ch->control_ch_num -= CH_10MHZ_APART; + break; + case BRCMU_CHAN_SB_ULL: + ch->control_ch_num += CH_10MHZ_APART; + break; + case BRCMU_CHAN_SB_ULU: + ch->control_ch_num += CH_30MHZ_APART; + break; + case BRCMU_CHAN_SB_UUL: + ch->control_ch_num += CH_50MHZ_APART; + break; + case BRCMU_CHAN_SB_UUU: + ch->control_ch_num += CH_70MHZ_APART; + break; + default: + WARN_ON_ONCE(1); + break; + } + break; + case BRCMU_CHSPEC_D11AC_BW_8080: default: WARN_ON_ONCE(1); break; diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h index 7b9a77981df1..dddebaa60352 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h @@ -29,6 +29,8 @@ #define CH_UPPER_SB 0x01 #define CH_LOWER_SB 0x02 #define CH_EWA_VALID 0x04 +#define CH_70MHZ_APART 14 +#define CH_50MHZ_APART 10 #define CH_30MHZ_APART 6 #define CH_20MHZ_APART 4 #define CH_10MHZ_APART 2 @@ -237,6 +239,7 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec) #define WPA2_AUTH_RESERVED4 0x0400 #define WPA2_AUTH_RESERVED5 0x0800 #define WPA2_AUTH_1X_SHA256 0x1000 /* 1X with SHA256 key derivation */ +#define WPA2_AUTH_FT 0x4000 /* Fast BSS Transition */ #define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */ #define DOT11_DEFAULT_RTS_LEN 2347 diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 9644e7b93645..bbdca13c5a9f 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -5652,7 +5652,7 @@ static void ipw_merge_adhoc_network(struct work_struct *work) } mutex_lock(&priv->mutex); - if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) { + if (priv->ieee->iw_mode == IW_MODE_ADHOC) { IPW_DEBUG_MERGE("remove network %*pE\n", priv->essid_len, priv->essid); ipw_remove_current_network(priv); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c index 497fd766d87c..76b5ddb20248 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c @@ -12,10 +12,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c index fedb108db68f..e7e45846dd07 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c @@ -12,10 +12,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 91ca77c7571c..da5d5f9b2573 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -56,7 +56,7 @@ #include "iwl-config.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 38 +#define IWL_22000_UCODE_API_MAX 41 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 @@ -77,10 +77,13 @@ #define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" #define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-" #define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-" -#define IWL_22000_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-" +#define IWL_22000_HR_B_F0_FW_PRE "iwlwifi-Qu-b0-hr-b0-" +#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-" +#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-" #define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-" #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" #define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-" +#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" #define IWL_22000_HR_MODULE_FIRMWARE(api) \ IWL_22000_HR_FW_PRE __stringify(api) ".ucode" @@ -88,7 +91,11 @@ IWL_22000_JF_FW_PRE __stringify(api) ".ucode" #define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode" -#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \ +#define IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(api) \ + IWL_22000_HR_B_F0_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \ + IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode" @@ -96,6 +103,8 @@ IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \ IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode" +#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ + IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_22000 10 @@ -134,7 +143,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .ucode_api_min = IWL_22000_UCODE_API_MIN, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \ - .non_shared_ant = ANT_A, \ + .non_shared_ant = ANT_B, \ .dccm_offset = IWL_22000_DCCM_OFFSET, \ .dccm_len = IWL_22000_DCCM_LEN, \ .dccm2_offset = IWL_22000_DCCM2_OFFSET, \ @@ -155,7 +164,9 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .gen2 = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ - .min_umac_error_event_table = 0x400000 + .min_umac_error_event_table = 0x400000, \ + .d3_debug_data_base_addr = 0x401000, \ + .d3_debug_data_length = 60 * 1024 #define IWL_DEVICE_22500 \ IWL_DEVICE_22000_COMMON, \ @@ -190,7 +201,54 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = { const struct iwl_cfg iwl22000_2ax_cfg_hr = { .name = "Intel(R) Dual Band Wireless AX 22000", - .fw_name_pre = IWL_22000_HR_FW_PRE, + .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + +/* + * All JF radio modules are part of the 9000 series, but the MAC part + * looks more like 22000. That's why this device is here, but called + * 9560 nevertheless. + */ +const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0 = { + .name = "Intel(R) Wireless-AC 9461", + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, + IWL_DEVICE_22500, +}; + +const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0 = { + .name = "Intel(R) Wireless-AC 9462", + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, + IWL_DEVICE_22500, +}; + +const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = { + .name = "Intel(R) Wireless-AC 9560", + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, + IWL_DEVICE_22500, +}; + +const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = { + .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, + IWL_DEVICE_22500, +}; + +const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = { + .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, + IWL_DEVICE_22500, +}; + +const struct iwl_cfg iwl22000_2ax_cfg_jf = { + .name = "Intel(R) Dual Band Wireless AX 22000", + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap @@ -264,7 +322,10 @@ const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = { MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c index 36151e61a26f..575a7022d045 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c @@ -12,10 +12,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c index b5d8274761d8..30e62a7c9d52 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c @@ -12,10 +12,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c index a62c8346f13a..c973bfaa3414 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c index c46fa712985b..348c40fcddcb 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index 24b2f7cbb308..d55fd23cafe6 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -57,7 +57,7 @@ #include "fw/file.h" /* Highest firmware API version supported */ -#define IWL9000_UCODE_API_MAX 38 +#define IWL9000_UCODE_API_MAX 41 /* Lowest firmware API version supported */ #define IWL9000_UCODE_API_MIN 30 @@ -155,7 +155,9 @@ static const struct iwl_tt_params iwl9000_tt_params = { .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ .min_umac_error_event_table = 0x800000, \ - .csr = &iwl_csr_v1 + .csr = &iwl_csr_v1, \ + .d3_debug_data_base_addr = 0x401000, \ + .d3_debug_data_length = 92 * 1024 const struct iwl_cfg iwl9160_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9160", diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h index b79e38734f2f..431e13c6ee35 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h @@ -16,11 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c index c96f9b1d948a..588b15697710 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c @@ -16,11 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.h b/drivers/net/wireless/intel/iwlwifi/dvm/calib.h index 099e3ce80ffc..c43ba94bfa8b 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.h @@ -16,11 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h index f89736d60a3d..0f4be4be181c 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h @@ -16,11 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c index 096a07c5a33f..3d2e44a642de 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c @@ -13,11 +13,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h index cceb4cd8e501..c5b8376d827f 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c index f21732ec3b25..3dd7d8c45dab 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c index 1bbd17ada974..04c236e9399b 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.h b/drivers/net/wireless/intel/iwlwifi/dvm/led.h index 75f74edd018f..8f93a3246dee 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/led.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c index 2b6ffbc46fa5..b2f172d4f78a 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c @@ -13,11 +13,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 82caae02dd09..49b71dbf8490 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -14,10 +14,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index 030482b357a3..1088ff036e13 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -2,6 +2,7 @@ * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -15,10 +16,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -1651,7 +1648,6 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv) priv->status, table.valid); } - trace_iwlwifi_dev_ucode_error(trans->dev, &table, 0, table.brd_ver); IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); IWL_ERR(priv, "0x%08X | uPc\n", table.pc); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.c b/drivers/net/wireless/intel/iwlwifi/dvm/power.c index 0ad557c89514..8c25e3aefb2b 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.c @@ -14,10 +14,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.h b/drivers/net/wireless/intel/iwlwifi/dvm/power.h index 2fd9b43adafd..a04fd4d375c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/power.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.h @@ -14,10 +14,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index 98050d7be411..ef4b9de256f7 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h index 50c1e951dd2d..b2df3a8cc464 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index c942830af2b5..6f17a5e24e82 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -15,10 +15,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c index 8f3e5586eda9..eee1d48d453a 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c @@ -12,10 +12,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c index 17e6a32384d3..8d7aafb4d9e9 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c @@ -13,11 +13,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c index de6ec9b7ace4..b1792de09594 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c @@ -14,10 +14,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c index 6524533d723c..4de2727ac63e 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c @@ -14,10 +14,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h index d324e9be9cbf..6388c09603c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h @@ -14,10 +14,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index fb40ddfced99..4ff323a3a4e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -13,11 +13,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c index d6013bfe991c..3bf57085b976 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c @@ -14,11 +14,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 75cae54ea7de..32d000cffe9f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -16,9 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index cb5f32c1d705..2439e98431ee 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -16,9 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h index 87c1ddea75ae..68060085010f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -8,6 +8,7 @@ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -203,6 +205,7 @@ enum iwl_bt_activity_grading { BT_ON_NO_CONNECTION = 1, BT_LOW_TRAFFIC = 2, BT_HIGH_TRAFFIC = 3, + BT_VERY_HIGH_TRAFFIC = 4, BT_MAX_AG, }; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 6dad748e5cdc..8b4922bbe139 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -436,7 +436,8 @@ enum iwl_legacy_cmds { /** * @REDUCE_TX_POWER_CMD: - * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd + * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd_v4 + * or &struct iwl_dev_tx_power_cmd */ REDUCE_TX_POWER_CMD = 0x9f, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index 57f4bc242023..6fae02fa4cad 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -374,7 +376,7 @@ enum iwl_wowlan_wakeup_reason { }; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */ -struct iwl_wowlan_gtk_status { +struct iwl_wowlan_gtk_status_v1 { u8 key_index; u8 reserved[3]; u8 decrypt_key[16]; @@ -382,9 +384,84 @@ struct iwl_wowlan_gtk_status { struct iwl_wowlan_rsc_tsc_params_cmd rsc; } __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */ +#define WOWLAN_KEY_MAX_SIZE 32 +#define WOWLAN_GTK_KEYS_NUM 2 +#define WOWLAN_IGTK_KEYS_NUM 2 + +/** + * struct iwl_wowlan_gtk_status - GTK status + * @key: GTK material + * @key_len: GTK legth, if set to 0, the key is not available + * @key_flags: information about the key: + * bits[0:1]: key index assigned by the AP + * bits[2:6]: GTK index of the key in the internal DB + * bit[7]: Set iff this is the currently used GTK + * @reserved: padding + * @tkip_mic_key: TKIP RX MIC key + * @rsc: TSC RSC counters + */ +struct iwl_wowlan_gtk_status { + u8 key[WOWLAN_KEY_MAX_SIZE]; + u8 key_len; + u8 key_flags; + u8 reserved[2]; + u8 tkip_mic_key[8]; + struct iwl_wowlan_rsc_tsc_params_cmd rsc; +} __packed; /* WOWLAN_GTK_MATERIAL_VER_2 */ + +#define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1)) + +/** + * struct iwl_wowlan_igtk_status - IGTK status + * @key: IGTK material + * @ipn: the IGTK packet number (replay counter) + * @key_len: IGTK length, if set to 0, the key is not available + * @key_flags: information about the key: + * bits[0]: key index assigned by the AP (0: index 4, 1: index 5) + * bits[1:5]: IGTK index of the key in the internal DB + * bit[6]: Set iff this is the currently used IGTK + */ +struct iwl_wowlan_igtk_status { + u8 key[WOWLAN_KEY_MAX_SIZE]; + u8 ipn[6]; + u8 key_len; + u8 key_flags; +} __packed; /* WOWLAN_IGTK_MATERIAL_VER_1 */ + +/** + * struct iwl_wowlan_status_v6 - WoWLAN status + * @gtk: GTK data + * @replay_ctr: GTK rekey replay counter + * @pattern_number: number of the matched pattern + * @non_qos_seq_ctr: non-QoS sequence counter to use next + * @qos_seq_ctr: QoS sequence counters to use next + * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason + * @num_of_gtk_rekeys: number of GTK rekeys + * @transmitted_ndps: number of transmitted neighbor discovery packets + * @received_beacons: number of received beacons + * @wake_packet_length: wakeup packet length + * @wake_packet_bufsize: wakeup packet buffer size + * @wake_packet: wakeup packet + */ +struct iwl_wowlan_status_v6 { + struct iwl_wowlan_gtk_status_v1 gtk; + __le64 replay_ctr; + __le16 pattern_number; + __le16 non_qos_seq_ctr; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + __le32 wake_packet_length; + __le32 wake_packet_bufsize; + u8 wake_packet[]; /* can be truncated from _length to _bufsize */ +} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */ + /** * struct iwl_wowlan_status - WoWLAN status * @gtk: GTK data + * @igtk: IGTK data * @replay_ctr: GTK rekey replay counter * @pattern_number: number of the matched pattern * @non_qos_seq_ctr: non-QoS sequence counter to use next @@ -398,7 +475,8 @@ struct iwl_wowlan_gtk_status { * @wake_packet: wakeup packet */ struct iwl_wowlan_status { - struct iwl_wowlan_gtk_status gtk; + struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; __le64 replay_ctr; __le16 pattern_number; __le16 non_qos_seq_ctr; @@ -410,7 +488,12 @@ struct iwl_wowlan_status { __le32 wake_packet_length; __le32 wake_packet_bufsize; u8 wake_packet[]; /* can be truncated from _length to _bufsize */ -} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */ +} __packed; /* WOWLAN_STATUSES_API_S_VER_7 */ + +static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk) +{ + return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK; +} #define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64 #define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index 59b3c6e8f37b..eff3249af48a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -100,6 +100,11 @@ enum iwl_data_path_subcmd_ids { TLC_MNG_CONFIG_CMD = 0xF, /** + * @HE_AIR_SNIFFER_CONFIG_CMD: &struct iwl_he_monitor_cmd + */ + HE_AIR_SNIFFER_CONFIG_CMD = 0x13, + + /** * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif */ TLC_MNG_UPDATE_NOTIF = 0xF7, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index 106782341544..dc1fa377087a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -8,6 +8,7 @@ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -336,6 +338,9 @@ struct iwl_dbg_mem_access_rsp { #define CONT_REC_COMMAND_SIZE 80 #define ENABLE_CONT_RECORDING 0x15 #define DISABLE_CONT_RECORDING 0x16 +#define BUFFER_ALLOCATION 0x27 +#define START_DEBUG_RECORDING 0x29 +#define STOP_DEBUG_RECORDING 0x2A /* * struct iwl_continuous_record_mode - recording mode @@ -353,4 +358,31 @@ struct iwl_continuous_record_cmd { sizeof(struct iwl_continuous_record_mode)]; } __packed; +/* maximum fragments to be allocated per target of allocationId */ +#define IWL_BUFFER_LOCATION_MAX_FRAGS 2 + +/** + * struct iwl_fragment_data single fragment structure + * @address: 64bit start address + * @size: size in bytes + */ +struct iwl_fragment_data { + __le64 address; + __le32 size; +} __packed; /* FRAGMENT_STRUCTURE_API_S_VER_1 */ + +/** + * struct iwl_buffer_allocation_cmd - buffer allocation command structure + * @allocation_id: id of the allocation + * @buffer_location: location of the buffer + * @num_frags: number of fragments + * @fragments: memory fragments + */ +struct iwl_buffer_allocation_cmd { + __le32 allocation_id; + __le32 buffer_location; + __le32 num_frags; + struct iwl_fragment_data fragments[IWL_BUFFER_LOCATION_MAX_FRAGS]; +} __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_1 */ + #endif /* __iwl_fw_api_debug_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h index 17c7ef1662a9..ca49db786ed6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -72,11 +74,58 @@ enum iwl_mac_conf_subcmd_ids { */ LOW_LATENCY_CMD = 0x3, /** + * @PROBE_RESPONSE_DATA_NOTIF: &struct iwl_probe_resp_data_notif + */ + PROBE_RESPONSE_DATA_NOTIF = 0xFC, + + /** * @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif */ CHANNEL_SWITCH_NOA_NOTIF = 0xFF, }; +#define IWL_P2P_NOA_DESC_COUNT (2) + +/** + * struct iwl_p2p_noa_attr - NOA attr contained in probe resp FW notification + * + * @id: attribute id + * @len_low: length low half + * @len_high: length high half + * @idx: instance of NoA timing + * @ctwin: GO's ct window and pwer save capability + * @desc: NoA descriptor + * @reserved: reserved for alignment purposes + */ +struct iwl_p2p_noa_attr { + u8 id; + u8 len_low; + u8 len_high; + u8 idx; + u8 ctwin; + struct ieee80211_p2p_noa_desc desc[IWL_P2P_NOA_DESC_COUNT]; + u8 reserved; +} __packed; + +#define IWL_PROBE_RESP_DATA_NO_CSA (0xff) + +/** + * struct iwl_probe_resp_data_notif - notification with NOA and CSA counter + * + * @mac_id: the mac which should send the probe response + * @noa_active: notifies if the noa attribute should be handled + * @noa_attr: P2P NOA attribute + * @csa_counter: current csa counter + * @reserved: reserved for alignment purposes + */ +struct iwl_probe_resp_data_notif { + __le32 mac_id; + __le32 noa_active; + struct iwl_p2p_noa_attr noa_attr; + u8 csa_counter; + u8 reserved[3]; +} __packed; /* PROBE_RESPONSE_DATA_NTFY_API_S_VER_1 */ + /** * struct iwl_channel_switch_noa_notif - Channel switch NOA notification * diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 55594c93b014..1dd23f846fb9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -578,4 +578,18 @@ struct iwl_he_sta_context_cmd { struct iwl_he_backoff_conf trig_based_txf[AC_NUM]; } __packed; /* STA_CONTEXT_DOT11AX_API_S */ +/** + * struct iwl_he_monitor_cmd - configure air sniffer for HE + * @bssid: the BSSID to sniff for + * @reserved1: reserved for dword alignment + * @aid: the AID to track on for HE MU + * @reserved2: reserved for future use + */ +struct iwl_he_monitor_cmd { + u8 bssid[6]; + __le16 reserved1; + __le16 aid; + u8 reserved2[6]; +} __packed; /* HE_AIR_SNIFFER_CONFIG_CMD_API_S_VER_1 */ + #endif /* __iwl_fw_api_mac_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 6c5338364794..93b392f0c6a4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -165,7 +165,7 @@ struct iwl_nvm_access_resp { */ struct iwl_nvm_get_info { __le32 reserved; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */ +} __packed; /* REGULATORY_NVM_GET_INFO_CMD_API_S_VER_1 */ /** * enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp @@ -180,14 +180,14 @@ enum iwl_nvm_info_general_flags { * @flags: bit 0: 1 - empty, 0 - non-empty * @nvm_version: nvm version * @board_type: board type - * @reserved: reserved + * @n_hw_addrs: number of reserved MAC addresses */ struct iwl_nvm_get_info_general { __le32 flags; __le16 nvm_version; u8 board_type; - u8 reserved; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */ + u8 n_hw_addrs; +} __packed; /* REGULATORY_NVM_GET_INFO_GENERAL_S_VER_2 */ /** * enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku @@ -231,7 +231,7 @@ struct iwl_nvm_get_info_sku { struct iwl_nvm_get_info_phy { __le32 tx_chains; __le32 rx_chains; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */ +} __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */ #define IWL_NUM_CHANNELS (51) @@ -245,7 +245,7 @@ struct iwl_nvm_get_info_regulatory { __le32 lar_enabled; __le16 channel_profile[IWL_NUM_CHANNELS]; __le16 reserved; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */ +} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */ /** * struct iwl_nvm_get_info_rsp - response to get NVM data @@ -259,7 +259,7 @@ struct iwl_nvm_get_info_rsp { struct iwl_nvm_get_info_sku mac_sku; struct iwl_nvm_get_info_phy phy_sku; struct iwl_nvm_get_info_regulatory regulatory; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_2 */ +} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */ /** * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed @@ -270,22 +270,6 @@ struct iwl_nvm_access_complete_cmd { } __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ /** - * struct iwl_mcc_update_cmd_v1 - Request the device to update geographic - * regulatory profile according to the given MCC (Mobile Country Code). - * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. - * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the - * MCC in the cmd response will be the relevant MCC in the NVM. - * @mcc: given mobile country code - * @source_id: the source from where we got the MCC, see iwl_mcc_source - * @reserved: reserved for alignment - */ -struct iwl_mcc_update_cmd_v1 { - __le16 mcc; - u8 source_id; - u8 reserved; -} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */ - -/** * struct iwl_mcc_update_cmd - Request the device to update geographic * regulatory profile according to the given MCC (Mobile Country Code). * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. @@ -306,7 +290,18 @@ struct iwl_mcc_update_cmd { } __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */ /** - * struct iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD. + * enum iwl_geo_information - geographic information. + * @GEO_NO_INFO: no special info for this geo profile. + * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params + * for the 5 GHz band. + */ +enum iwl_geo_information { + GEO_NO_INFO = 0, + GEO_WMM_ETSI_5GHZ_INFO = BIT(0), +}; + +/** + * struct iwl_mcc_update_resp_v3 - response to MCC_UPDATE_CMD. * Contains the new channel control profile map, if changed, and the new MCC * (mobile country code). * The new MCC may be different than what was requested in MCC_UPDATE_CMD. @@ -314,30 +309,23 @@ struct iwl_mcc_update_cmd { * @mcc: the new applied MCC * @cap: capabilities for all channels which matches the MCC * @source_id: the MCC source, see iwl_mcc_source - * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 - * channels, depending on platform) + * @time: time elapsed from the MCC test start (in units of 30 seconds) + * @geo_info: geographic specific profile information + * see &enum iwl_geo_information. + * @n_channels: number of channels in @channels_data. * @channels: channel control data map, DWORD for each channel. Only the first * 16bits are used. */ -struct iwl_mcc_update_resp_v1 { +struct iwl_mcc_update_resp_v3 { __le32 status; __le16 mcc; u8 cap; u8 source_id; + __le16 time; + __le16 geo_info; __le32 n_channels; __le32 channels[0]; -} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */ - -/** - * enum iwl_geo_information - geographic information. - * @GEO_NO_INFO: no special info for this geo profile. - * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params - * for the 5 GHz band. - */ -enum iwl_geo_information { - GEO_NO_INFO = 0, - GEO_WMM_ETSI_5GHZ_INFO = BIT(0), -}; +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */ /** * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD. @@ -347,25 +335,26 @@ enum iwl_geo_information { * @status: see &enum iwl_mcc_update_status * @mcc: the new applied MCC * @cap: capabilities for all channels which matches the MCC - * @source_id: the MCC source, see iwl_mcc_source - * @time: time elapsed from the MCC test start (in 30 seconds TU) + * @time: time elapsed from the MCC test start (in units of 30 seconds) * @geo_info: geographic specific profile information * see &enum iwl_geo_information. - * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 - * channels, depending on platform) + * @source_id: the MCC source, see iwl_mcc_source + * @reserved: for four bytes alignment. + * @n_channels: number of channels in @channels_data. * @channels: channel control data map, DWORD for each channel. Only the first * 16bits are used. */ struct iwl_mcc_update_resp { __le32 status; __le16 mcc; - u8 cap; - u8 source_id; + __le16 cap; __le16 time; __le16 geo_info; + u8 source_id; + u8 reserved[3]; __le32 n_channels; __le32 channels[0]; -} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */ +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */ /** * struct iwl_mcc_chub_notif - chub notifies of mcc change diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index a3c77e01863b..286a22da232d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -316,7 +318,9 @@ enum iwl_dev_tx_power_cmd_mode { IWL_TX_POWER_MODE_SET_DEVICE = 1, IWL_TX_POWER_MODE_SET_CHAINS = 2, IWL_TX_POWER_MODE_SET_ACK = 3, -}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_4 */; + IWL_TX_POWER_MODE_SET_SAR_TIMER = 4, + IWL_TX_POWER_MODE_SET_SAR_TIMER_DEFAULT_TABLE = 5, +}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_5 */; #define IWL_NUM_CHAIN_LIMITS 2 #define IWL_NUM_SUB_BANDS 5 @@ -350,13 +354,35 @@ struct iwl_dev_tx_power_cmd_v3 { * reduction. * @reserved: reserved (padding) */ -struct iwl_dev_tx_power_cmd { +struct iwl_dev_tx_power_cmd_v4 { /* v4 is just an extension of v3 - keep this here */ struct iwl_dev_tx_power_cmd_v3 v3; u8 enable_ack_reduction; u8 reserved[3]; } __packed; /* TX_REDUCED_POWER_API_S_VER_4 */ +/** + * struct iwl_dev_tx_power_cmd - TX power reduction command + * @v3: version 3 of the command, embedded here for easier software handling + * @enable_ack_reduction: enable or disable close range ack TX power + * reduction. + * @per_chain_restriction_changed: is per_chain_restriction has changed + * from last command. used if set_mode is + * IWL_TX_POWER_MODE_SET_SAR_TIMER. + * note: if not changed, the command is used for keep alive only. + * @reserved: reserved (padding) + * @timer_period: timer in milliseconds. if expires FW will change to default + * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER + */ +struct iwl_dev_tx_power_cmd { + /* v5 is just an extension of v3 - keep this here */ + struct iwl_dev_tx_power_cmd_v3 v3; + u8 enable_ack_reduction; + u8 per_chain_restriction_changed; + u8 reserved[2]; + __le32 timer_period; +} __packed; /* TX_REDUCED_POWER_API_S_VER_5 */ + #define IWL_NUM_GEO_PROFILES 3 /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index 087fae91baef..9eddc4dc2ae6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -66,12 +66,24 @@ /** * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags - * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC + * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC. For HE this enables STBC for + * bandwidths <= 80MHz * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC + * @IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK: enable STBC in HE at 160MHz + * bandwidth + * @IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK: enable HE Dual Carrier Modulation + * for BPSK (MCS 0) with 1 spatial + * stream + * @IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK: enable HE Dual Carrier Modulation + * for BPSK (MCS 0) with 2 spatial + * streams */ enum iwl_tlc_mng_cfg_flags { - IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0), - IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(1), + IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0), + IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(1), + IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK = BIT(2), + IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK = BIT(3), + IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK = BIT(4), }; /** @@ -217,66 +229,6 @@ struct iwl_tlc_update_notif { __le32 amsdu_enabled; } __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */ -/** - * enum iwl_tlc_debug_flags - debug options - * @IWL_TLC_DEBUG_FIXED_RATE: set fixed rate for rate scaling - * @IWL_TLC_DEBUG_STATS_TH: threshold for sending statistics to the driver, in - * frames - * @IWL_TLC_DEBUG_STATS_TIME_TH: threshold for sending statistics to the - * driver, in msec - * @IWL_TLC_DEBUG_AGG_TIME_LIM: time limit for a BA session - * @IWL_TLC_DEBUG_AGG_DIS_START_TH: frame with try-count greater than this - * threshold should not start an aggregation session - * @IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM: set max number of frames in an aggregation - * @IWL_TLC_DEBUG_RENEW_ADDBA_DELAY: delay between retries of ADD BA - * @IWL_TLC_DEBUG_START_AC_RATE_IDX: frames per second to start a BA session - * @IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK: disable BW scaling - */ -enum iwl_tlc_debug_flags { - IWL_TLC_DEBUG_FIXED_RATE, - IWL_TLC_DEBUG_STATS_TH, - IWL_TLC_DEBUG_STATS_TIME_TH, - IWL_TLC_DEBUG_AGG_TIME_LIM, - IWL_TLC_DEBUG_AGG_DIS_START_TH, - IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM, - IWL_TLC_DEBUG_RENEW_ADDBA_DELAY, - IWL_TLC_DEBUG_START_AC_RATE_IDX, - IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK, -}; /* TLC_MNG_DEBUG_FLAGS_API_E_VER_1 */ - -/** - * struct iwl_dhc_tlc_dbg - fixed debug config - * @sta_id: bit 0 - enable/disable, bits 1 - 7 hold station id - * @reserved1: reserved - * @flags: bitmap of %IWL_TLC_DEBUG_\* - * @fixed_rate: rate value - * @stats_threshold: if number of tx-ed frames is greater, send statistics - * @time_threshold: statistics threshold in usec - * @agg_time_lim: max agg time - * @agg_dis_start_threshold: frames with try-cont greater than this count will - * not be aggregated - * @agg_frame_count_lim: agg size - * @addba_retry_delay: delay between retries of ADD BA - * @start_ac_rate_idx: frames per second to start a BA session - * @no_far_range_tweak: disable BW scaling - * @reserved2: reserved - */ -struct iwl_dhc_tlc_cmd { - u8 sta_id; - u8 reserved1[3]; - __le32 flags; - __le32 fixed_rate; - __le16 stats_threshold; - __le16 time_threshold; - __le16 agg_time_lim; - __le16 agg_dis_start_threshold; - __le16 agg_frame_count_lim; - __le16 addba_retry_delay; - u8 start_ac_rate_idx[IEEE80211_NUM_ACS]; - u8 no_far_range_tweak; - u8 reserved2[3]; -} __packed; - /* * These serve as indexes into * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT]; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 2f599353c885..0537496b6eb1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -362,18 +362,49 @@ enum iwl_rx_he_phy { /* 6 bits reserved */ IWL_RX_HE_PHY_DELIM_EOF = BIT(31), - /* second dword - MU data */ - IWL_RX_HE_PHY_SIGB_COMPRESSION = BIT_ULL(32 + 0), - IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK = 0x1e00000000ULL, + /* second dword - common data */ IWL_RX_HE_PHY_HE_LTF_NUM_MASK = 0xe000000000ULL, IWL_RX_HE_PHY_RU_ALLOC_SEC80 = BIT_ULL(32 + 8), /* trigger encoded */ IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL, - IWL_RX_HE_PHY_SIGB_MCS_MASK = 0xf000000000000ULL, - /* 1 bit reserved */ - IWL_RX_HE_PHY_SIGB_DCM = BIT_ULL(32 + 21), - IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK = 0xc0000000000000ULL, - /* 8 bits reserved */ + IWL_RX_HE_PHY_INFO_TYPE_MASK = 0xf000000000000000ULL, + IWL_RX_HE_PHY_INFO_TYPE_SU = 0x0, /* TSF low valid (first DW) */ + IWL_RX_HE_PHY_INFO_TYPE_MU = 0x1, /* TSF low/high valid (both DWs) */ + IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO = 0x2, /* same + SIGB-common0/1/2 valid */ + IWL_RX_HE_PHY_INFO_TYPE_TB = 0x3, /* TSF low/high valid (both DWs) */ + + /* second dword - MU data */ + IWL_RX_HE_PHY_MU_SIGB_COMPRESSION = BIT_ULL(32 + 0), + IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK = 0x1e00000000ULL, + IWL_RX_HE_PHY_MU_SIGB_MCS_MASK = 0xf000000000000ULL, + IWL_RX_HE_PHY_MU_SIGB_DCM = BIT_ULL(32 + 21), + IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK = 0xc0000000000000ULL, + + /* second dword - TB data */ + IWL_RX_HE_PHY_TB_PILOT_TYPE = BIT_ULL(32 + 0), + IWL_RX_HE_PHY_TB_LOW_SS_MASK = 0xe00000000ULL +}; + +enum iwl_rx_he_sigb_common0 { + /* the a1/a2/... is what the PHY/firmware calls the values */ + IWL_RX_HE_SIGB_COMMON0_CH1_RU0 = 0x000000ff, /* a1 */ + IWL_RX_HE_SIGB_COMMON0_CH1_RU2 = 0x0000ff00, /* a2 */ + IWL_RX_HE_SIGB_COMMON0_CH2_RU0 = 0x00ff0000, /* b1 */ + IWL_RX_HE_SIGB_COMMON0_CH2_RU2 = 0xff000000, /* b2 */ +}; + +enum iwl_rx_he_sigb_common1 { + IWL_RX_HE_SIGB_COMMON1_CH1_RU1 = 0x000000ff, /* c1 */ + IWL_RX_HE_SIGB_COMMON1_CH1_RU3 = 0x0000ff00, /* c2 */ + IWL_RX_HE_SIGB_COMMON1_CH2_RU1 = 0x00ff0000, /* d1 */ + IWL_RX_HE_SIGB_COMMON1_CH2_RU3 = 0xff000000, /* d2 */ +}; + +enum iwl_rx_he_sigb_common2 { + IWL_RX_HE_SIGB_COMMON2_CH1_CTR_RU = 0x0001, + IWL_RX_HE_SIGB_COMMON2_CH2_CTR_RU = 0x0002, + IWL_RX_HE_SIGB_COMMON2_CH1_CRC_OK = 0x0004, + IWL_RX_HE_SIGB_COMMON2_CH2_CRC_OK = 0x0008, }; /** @@ -381,15 +412,31 @@ enum iwl_rx_he_phy { */ struct iwl_rx_mpdu_desc_v1 { /* DW7 - carries rss_hash only when rpa_en == 1 */ - /** - * @rss_hash: RSS hash value - */ - __le32 rss_hash; + union { + /** + * @rss_hash: RSS hash value + */ + __le32 rss_hash; + + /** + * @sigb_common0: for HE sniffer, HE-SIG-B common part 0 + */ + __le32 sigb_common0; + }; + /* DW8 - carries filter_match only when rpa_en == 1 */ - /** - * @filter_match: filter match value - */ - __le32 filter_match; + union { + /** + * @filter_match: filter match value + */ + __le32 filter_match; + + /** + * @sigb_common1: for HE sniffer, HE-SIG-B common part 1 + */ + __le32 sigb_common1; + }; + /* DW9 */ /** * @rate_n_flags: RX rate/flags encoding @@ -439,15 +486,30 @@ struct iwl_rx_mpdu_desc_v1 { */ struct iwl_rx_mpdu_desc_v3 { /* DW7 - carries filter_match only when rpa_en == 1 */ - /** - * @filter_match: filter match value - */ - __le32 filter_match; + union { + /** + * @filter_match: filter match value + */ + __le32 filter_match; + + /** + * @sigb_common0: for HE sniffer, HE-SIG-B common part 0 + */ + __le32 sigb_common0; + }; + /* DW8 - carries rss_hash only when rpa_en == 1 */ - /** - * @rss_hash: RSS hash value - */ - __le32 rss_hash; + union { + /** + * @rss_hash: RSS hash value + */ + __le32 rss_hash; + + /** + * @sigb_common1: for HE sniffer, HE-SIG-B common part 1 + */ + __le32 sigb_common1; + }; /* DW9 */ /** * @partial_hash: 31:0 ip/tcp header hash @@ -543,10 +605,18 @@ struct iwl_rx_mpdu_desc { * @raw_csum: raw checksum (alledgedly unreliable) */ __le16 raw_csum; - /** - * @l3l4_flags: &enum iwl_rx_l3l4_flags - */ - __le16 l3l4_flags; + + union { + /** + * @l3l4_flags: &enum iwl_rx_l3l4_flags + */ + __le16 l3l4_flags; + + /** + * @sigb_common2: for HE sniffer, HE-SIG-B common part 2 + */ + __le16 sigb_common2; + }; /* DW5 */ /** * @status: &enum iwl_rx_mpdu_status @@ -574,6 +644,69 @@ struct iwl_rx_mpdu_desc { #define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1) +#define IWL_CD_STTS_OPTIMIZED_POS 0 +#define IWL_CD_STTS_OPTIMIZED_MSK 0x01 +#define IWL_CD_STTS_TRANSFER_STATUS_POS 1 +#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E +#define IWL_CD_STTS_WIFI_STATUS_POS 4 +#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0 + +/** + * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3) + * @IWL_CD_STTS_UNUSED: unused + * @IWL_CD_STTS_UNUSED_2: unused + * @IWL_CD_STTS_END_TRANSFER: successful transfer complete. + * In sniffer mode, when split is used, set in last CD completion. (RX) + * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for + * all CD completion. (RX) + * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX) + * @IWL_CD_STTS_ERROR: general error (RX) + */ +enum iwl_completion_desc_transfer_status { + IWL_CD_STTS_UNUSED, + IWL_CD_STTS_UNUSED_2, + IWL_CD_STTS_END_TRANSFER, + IWL_CD_STTS_OVERFLOW, + IWL_CD_STTS_ABORTED, + IWL_CD_STTS_ERROR, +}; + +/** + * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7) + * @IWL_CD_STTS_VALID: the packet is valid (RX) + * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX) + * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX) + * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX) + * @IWL_CD_STTS_DUP: duplicate packet (RX) + * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX) + * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX) + * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX) + * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX) + * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX) + * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX) + * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX) + * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX) + * @IWL_CD_STTS_NOT_USED: completed but not used (RX) + * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX) + */ +enum iwl_completion_desc_wifi_status { + IWL_CD_STTS_VALID, + IWL_CD_STTS_FCS_ERR, + IWL_CD_STTS_SEC_KEY_ERR, + IWL_CD_STTS_DECRYPTION_ERR, + IWL_CD_STTS_DUP, + IWL_CD_STTS_ICV_MIC_ERR, + IWL_CD_STTS_INTERNAL_SNAP_ERR, + IWL_CD_STTS_SEC_PORT_FAIL, + IWL_CD_STTS_BA_OLD_SN, + IWL_CD_STTS_QOS_NULL, + IWL_CD_STTS_MAC_HDR_ERR, + IWL_CD_STTS_MAX_RETRANS, + IWL_CD_STTS_EX_LIFETIME, + IWL_CD_STTS_NOT_USED, + IWL_CD_STTS_REPLAY_ERR, +}; + struct iwl_frame_release { u8 baid; u8 reserved; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index a17c4a79b8d4..18741889ec30 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -262,6 +262,7 @@ enum iwl_scan_channel_flags { IWL_SCAN_CHANNEL_FLAG_EBS = BIT(0), IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1), IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2), + IWL_SCAN_CHANNEL_FLAG_EBS_FRAG = BIT(3), }; /* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S @@ -595,9 +596,12 @@ enum iwl_umac_scan_general_flags { * enum iwl_umac_scan_general_flags2 - UMAC scan general flags #2 * @IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete * notification per channel or not. + * @IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel + * reorder optimization or not. */ enum iwl_umac_scan_general_flags2 { - IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0), + IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0), + IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1), }; /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h index dc40cbd52f92..450227f81706 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -391,7 +393,7 @@ enum iwl_sta_type { * @tfd_queue_msk: tfd queues used by this station. * Obselete for new TX API (9 and above). * @rx_ba_window: aggregation window size - * @sp_length: the size of the SP as it appears in the WME IE + * @sp_length: the size of the SP in actual number of frames * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver * enabled ACs. * diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index 514b86123d3d..358bdf051e83 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -186,7 +186,7 @@ enum iwl_tx_cmd_sec_ctrl { /* * TID for non QoS frames - to be written in tid_tspec */ -#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT +#define IWL_TID_NON_QOS 0 /* * Limits on the retransmissions - to be written in {data,rts}_retry_limit @@ -747,9 +747,9 @@ enum iwl_mvm_ba_resp_flags { * @tfd_cnt: number of TFD-Q elements * @ra_tid_cnt: number of RATID-Q elements * @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd - * for details. + * for details. Length in @tfd_cnt. * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See - * &iwl_mvm_compressed_ba_ratid for more details. + * &iwl_mvm_compressed_ba_ratid for more details. Length in @ra_tid_cnt. */ struct iwl_mvm_compressed_ba_notif { __le32 flags; @@ -766,7 +766,7 @@ struct iwl_mvm_compressed_ba_notif { __le32 tx_rate; __le16 tfd_cnt; __le16 ra_tid_cnt; - struct iwl_mvm_compressed_ba_tfd tfd[1]; + struct iwl_mvm_compressed_ba_tfd tfd[0]; struct iwl_mvm_compressed_ba_ratid ra_tid[0]; } __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index a31a42e673c4..f44c716b1130 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -19,9 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -243,7 +240,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return; - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) { + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) { /* Pull RXF1 */ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); @@ -257,7 +254,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, LMAC2_PRPH_OFFSET, 2); } - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) { + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) { /* Pull TXF data from LMAC1 */ for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ @@ -282,7 +279,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, } } - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) && + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) && fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { /* Pull UMAC internal TXF data from all TXFs */ @@ -458,8 +455,8 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { { .start = 0x00a02400, .end = 0x00a02758 }, }; -static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start, - u32 len_bytes, __le32 *data) +static void iwl_read_prph_block(struct iwl_trans *trans, u32 start, + u32 len_bytes, __le32 *data) { u32 i; @@ -467,21 +464,6 @@ static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start, *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i)); } -static bool iwl_read_prph_block(struct iwl_trans *trans, u32 start, - u32 len_bytes, __le32 *data) -{ - unsigned long flags; - bool success = false; - - if (iwl_trans_grab_nic_access(trans, &flags)) { - success = true; - _iwl_read_prph_block(trans, start, len_bytes, data); - iwl_trans_release_nic_access(trans, &flags); - } - - return success; -} - static void iwl_dump_prph(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data, const struct iwl_prph_range *iwl_prph_dump_addr, @@ -507,11 +489,11 @@ static void iwl_dump_prph(struct iwl_trans *trans, prph = (void *)(*data)->data; prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); - _iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start, - /* our range is inclusive, hence + 4 */ - iwl_prph_dump_addr[i].end - - iwl_prph_dump_addr[i].start + 4, - (void *)prph->data); + iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start, + /* our range is inclusive, hence + 4 */ + iwl_prph_dump_addr[i].end - + iwl_prph_dump_addr[i].start + 4, + (void *)prph->data); *data = iwl_fw_error_next_data(*data); } @@ -556,42 +538,130 @@ static struct scatterlist *alloc_sgtable(int size) return table; } -void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) +static int iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt) +{ + u32 prph_len = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); + i++) { + /* The range includes both boundaries */ + int num_bytes_in_chunk = + iwl_prph_dump_addr_comm[i].end - + iwl_prph_dump_addr_comm[i].start + 4; + + prph_len += sizeof(struct iwl_fw_error_dump_data) + + sizeof(struct iwl_fw_error_dump_prph) + + num_bytes_in_chunk; + } + + if (fwrt->trans->cfg->mq_rx_supported) { + for (i = 0; i < + ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) { + /* The range includes both boundaries */ + int num_bytes_in_chunk = + iwl_prph_dump_addr_9000[i].end - + iwl_prph_dump_addr_9000[i].start + 4; + + prph_len += sizeof(struct iwl_fw_error_dump_data) + + sizeof(struct iwl_fw_error_dump_prph) + + num_bytes_in_chunk; + } + } + return prph_len; +} + +static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data, + u32 len, u32 ofs, u32 type) +{ + struct iwl_fw_error_dump_mem *dump_mem; + + if (!len) + return; + + (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem)); + dump_mem = (void *)(*dump_data)->data; + dump_mem->type = cpu_to_le32(type); + dump_mem->offset = cpu_to_le32(ofs); + iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len); + *dump_data = iwl_fw_error_next_data(*dump_data); + + IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type); +} + +#define ADD_LEN(len, item_len, const_len) \ + do {size_t item = item_len; len += (!!item) * const_len + item; } \ + while (0) + +static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt, + struct iwl_fwrt_shared_mem_cfg *mem_cfg) +{ + size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + u32 fifo_len = 0; + int i; + + if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF))) + goto dump_txf; + + /* Count RXF2 size */ + ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len); + + /* Count RXF1 sizes */ + for (i = 0; i < mem_cfg->num_lmacs; i++) + ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len); + +dump_txf: + if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF))) + goto dump_internal_txf; + + /* Count TXF sizes */ + for (i = 0; i < mem_cfg->num_lmacs; i++) { + int j; + + for (j = 0; j < mem_cfg->num_txfifo_entries; j++) + ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j], + hdr_len); + } + +dump_internal_txf: + if (!((fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) && + fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))) + goto out; + + for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++) + ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len); + +out: + return fifo_len; +} + +static struct iwl_fw_error_dump_file * +_iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dump_ptrs *fw_error_dump) { struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_info *dump_info; - struct iwl_fw_error_dump_mem *dump_mem; struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg; struct iwl_fw_error_dump_trigger_desc *dump_trig; - struct iwl_fw_dump_ptrs *fw_error_dump; - struct scatterlist *sg_dump_data; u32 sram_len, sram_ofs; - const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv; + const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv; struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg; - u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; - u32 smem_len = fwrt->fw->n_dbg_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; - u32 sram2_len = fwrt->fw->n_dbg_mem_tlv ? + u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0; + u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; + u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->dccm2_len; bool monitor_dump_only = false; int i; - IWL_DEBUG_INFO(fwrt, "WRT dump start\n"); - - /* there's no point in fw dump if the bus is dead */ - if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) { - IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n"); - goto out; - } - if (fwrt->dump.trig && fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) monitor_dump_only = true; - fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); - if (!fw_error_dump) - goto out; - /* SRAM - include stack CCM if driver knows the values for it */ if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { const struct fw_img *img; @@ -606,138 +676,43 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) /* reading RXF/TXF sizes */ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { - fifo_data_len = 0; - - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) { - - /* Count RXF2 size */ - if (mem_cfg->rxfifo2_size) { - /* Add header info */ - fifo_data_len += - mem_cfg->rxfifo2_size + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - - /* Count RXF1 sizes */ - for (i = 0; i < mem_cfg->num_lmacs; i++) { - if (!mem_cfg->lmac[i].rxfifo1_size) - continue; - - /* Add header info */ - fifo_data_len += - mem_cfg->lmac[i].rxfifo1_size + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - } - - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) { - size_t fifo_const_len = sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - - /* Count TXF sizes */ - for (i = 0; i < mem_cfg->num_lmacs; i++) { - int j; - - for (j = 0; j < mem_cfg->num_txfifo_entries; - j++) { - if (!mem_cfg->lmac[i].txfifo_size[j]) - continue; - - /* Add header info */ - fifo_data_len += - fifo_const_len + - mem_cfg->lmac[i].txfifo_size[j]; - } - } - } - - if ((fwrt->fw->dbg_dump_mask & - BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) && - fw_has_capa(&fwrt->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { - for (i = 0; - i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); - i++) { - if (!mem_cfg->internal_txfifo_size[i]) - continue; - - /* Add header info */ - fifo_data_len += - mem_cfg->internal_txfifo_size[i] + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - } + fifo_len = iwl_fw_fifo_len(fwrt, mem_cfg); /* Make room for PRPH registers */ if (!fwrt->trans->cfg->gen2 && - fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) { - for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); - i++) { - /* The range includes both boundaries */ - int num_bytes_in_chunk = - iwl_prph_dump_addr_comm[i].end - - iwl_prph_dump_addr_comm[i].start + 4; - - prph_len += sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_prph) + - num_bytes_in_chunk; - } - } - - if (!fwrt->trans->cfg->gen2 && - fwrt->trans->cfg->mq_rx_supported && - fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) { - for (i = 0; i < - ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) { - /* The range includes both boundaries */ - int num_bytes_in_chunk = - iwl_prph_dump_addr_9000[i].end - - iwl_prph_dump_addr_9000[i].start + 4; - - prph_len += sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_prph) + - num_bytes_in_chunk; - } - } + fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) + prph_len += iwl_fw_get_prph_len(fwrt); if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 && - fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG)) + fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG)) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; } - file_len = sizeof(*dump_file) + - fifo_data_len + - prph_len + - radio_len; + file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len; - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) file_len += sizeof(*dump_data) + sizeof(*dump_info); - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg); - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { - /* Make room for the SMEM, if it exists */ - if (smem_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + - smem_len; - - /* Make room for the secondary SRAM, if it exists */ - if (sram2_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + - sram2_len; - - /* Make room for MEM segments */ - for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + - le32_to_cpu(fw_dbg_mem[i].len); - } + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { + size_t hdr_len = sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_mem); + + /* Dump SRAM only if no mem_tlvs */ + if (!fwrt->fw->dbg.n_mem_tlv) + ADD_LEN(file_len, sram_len, hdr_len); + + /* Make room for all mem types that exist */ + ADD_LEN(file_len, smem_len, hdr_len); + ADD_LEN(file_len, sram2_len, hdr_len); + + for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) + ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len); } /* Make room for fw's virtual image pages, if it exists */ - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && !fwrt->trans->cfg->gen2 && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw_paging_db[0].fw_paging_block) @@ -746,33 +721,32 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) sizeof(struct iwl_fw_error_dump_paging) + PAGING_BLOCK_SIZE); + if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) { + file_len += sizeof(*dump_data) + + fwrt->trans->cfg->d3_debug_data_length * 2; + } + /* If we only want a monitor dump, reset the file length */ if (monitor_dump_only) { file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 + sizeof(*dump_info) + sizeof(*dump_smem_cfg); } - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && fwrt->dump.desc) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + fwrt->dump.desc->len; - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) && - !fwrt->fw->n_dbg_mem_tlv) - file_len += sizeof(*dump_data) + sram_len + sizeof(*dump_mem); - dump_file = vzalloc(file_len); - if (!dump_file) { - kfree(fw_error_dump); - goto out; - } + if (!dump_file) + return NULL; fw_error_dump->fwrt_ptr = dump_file; dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); dump_data = (void *)dump_file->data; - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) { + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_info)); dump_info = (void *)dump_data->data; @@ -793,7 +767,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) dump_data = iwl_fw_error_next_data(dump_data); } - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) { + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) { /* Dump shared memory configuration */ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); @@ -824,13 +798,13 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) } /* We only dump the FIFOs if the FW is in error state */ - if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { + if (fifo_len) { iwl_fw_dump_fifos(fwrt, &dump_data); if (radio_len) iwl_read_radio_regs(fwrt, &dump_data); } - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && fwrt->dump.desc) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_trig) + @@ -844,89 +818,54 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) /* In case we only want monitor dump, skip to dump trasport data */ if (monitor_dump_only) - goto dump_trans_data; - - if (!fwrt->fw->n_dbg_mem_tlv && - fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(sram_ofs); - iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data, - sram_len); - dump_data = iwl_fw_error_next_data(dump_data); - } + goto out; + + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { + const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = + fwrt->fw->dbg.mem_tlv; + + if (!fwrt->fw->dbg.n_mem_tlv) + iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs, + IWL_FW_ERROR_DUMP_MEM_SRAM); - for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { - u32 len = le32_to_cpu(fw_dbg_mem[i].len); - u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); - bool success; - - if (!(fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM))) - break; - - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = fw_dbg_mem[i].data_type; - dump_mem->offset = cpu_to_le32(ofs); - - IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", - dump_mem->type); - - switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) { - case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR): - iwl_trans_read_mem_bytes(fwrt->trans, ofs, - dump_mem->data, - len); - success = true; - break; - case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH): - success = iwl_read_prph_block(fwrt->trans, ofs, len, - (void *)dump_mem->data); - break; - default: - /* - * shouldn't get here, we ignored this kind - * of TLV earlier during the TLV parsing?! - */ - WARN_ON(1); - success = false; + for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) { + u32 len = le32_to_cpu(fw_dbg_mem[i].len); + u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); + + iwl_fw_dump_mem(fwrt, &dump_data, len, ofs, + le32_to_cpu(fw_dbg_mem[i].data_type)); } - if (success) - dump_data = iwl_fw_error_next_data(dump_data); - } + iwl_fw_dump_mem(fwrt, &dump_data, smem_len, + fwrt->trans->cfg->smem_offset, + IWL_FW_ERROR_DUMP_MEM_SMEM); - if (smem_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { - IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n"); - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM); - dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset); - iwl_trans_read_mem_bytes(fwrt->trans, - fwrt->trans->cfg->smem_offset, - dump_mem->data, smem_len); - dump_data = iwl_fw_error_next_data(dump_data); + iwl_fw_dump_mem(fwrt, &dump_data, sram2_len, + fwrt->trans->cfg->dccm2_offset, + IWL_FW_ERROR_DUMP_MEM_SRAM); } - if (sram2_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { - IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n"); - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset); - iwl_trans_read_mem_bytes(fwrt->trans, - fwrt->trans->cfg->dccm2_offset, - dump_mem->data, sram2_len); + if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) { + u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr; + size_t data_size = fwrt->trans->cfg->d3_debug_data_length; + + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); + dump_data->len = cpu_to_le32(data_size * 2); + + memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size); + + kfree(fwrt->dump.d3_debug_data); + fwrt->dump.d3_debug_data = NULL; + + iwl_trans_read_mem_bytes(fwrt->trans, addr, + dump_data->data + data_size, + data_size); + dump_data = iwl_fw_error_next_data(dump_data); } /* Dump fw's virtual image */ - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && !fwrt->trans->cfg->gen2 && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw_paging_db[0].fw_paging_block) { @@ -962,13 +901,44 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) ARRAY_SIZE(iwl_prph_dump_addr_9000)); } -dump_trans_data: +out: + dump_file->file_len = cpu_to_le32(file_len); + return dump_file; +} + +void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) +{ + struct iwl_fw_dump_ptrs *fw_error_dump; + struct iwl_fw_error_dump_file *dump_file; + struct scatterlist *sg_dump_data; + u32 file_len; + + IWL_DEBUG_INFO(fwrt, "WRT dump start\n"); + + /* there's no point in fw dump if the bus is dead */ + if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) { + IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n"); + goto out; + } + + fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); + if (!fw_error_dump) + goto out; + + dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump); + if (!dump_file) { + kfree(fw_error_dump); + goto out; + } + fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, fwrt->dump.trig); + file_len = le32_to_cpu(dump_file->file_len); fw_error_dump->fwrt_len = file_len; - if (fw_error_dump->trans_ptr) + if (fw_error_dump->trans_ptr) { file_len += fw_error_dump->trans_ptr->len; - dump_file->file_len = cpu_to_le32(file_len); + dump_file->file_len = cpu_to_le32(file_len); + } sg_dump_data = alloc_sgtable(file_len); if (sg_dump_data) { @@ -1003,20 +973,39 @@ const struct iwl_fw_dump_desc iwl_dump_desc_assert = { }; IWL_EXPORT_SYMBOL(iwl_dump_desc_assert); -int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, - const struct iwl_fw_dump_desc *desc, - const struct iwl_fw_dbg_trigger_tlv *trigger) +void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt) { - unsigned int delay = 0; + struct iwl_fw_dump_desc *iwl_dump_desc_no_alive = + kmalloc(sizeof(*iwl_dump_desc_no_alive), GFP_KERNEL); + + if (!iwl_dump_desc_no_alive) + return; - if (trigger) - delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); + iwl_dump_desc_no_alive->trig_desc.type = + cpu_to_le32(FW_DBG_TRIGGER_NO_ALIVE); + iwl_dump_desc_no_alive->len = 0; + if (WARN_ON(fwrt->dump.desc)) + iwl_fw_free_dump_desc(fwrt); + + IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", + FW_DBG_TRIGGER_NO_ALIVE); + + fwrt->dump.desc = iwl_dump_desc_no_alive; + iwl_fw_error_dump(fwrt); + clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status); +} +IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump); + +int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, + const struct iwl_fw_dump_desc *desc, void *trigger, + unsigned int delay) +{ /* * If the loading of the FW completed successfully, the next step is to * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non * zero, the FW was already loaded successully. If the state is "NO_FW" - * in such a case - WARN and exit, since FW may be dead. Otherwise, we + * in such a case - exit, since FW may be dead. Otherwise, we * can try to collect the data, since FW might just not be fully * loaded (no "ALIVE" yet), and the debug data is accessible. * @@ -1024,12 +1013,12 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, * config. In such a case, due to HW access problems, we might * collect garbage. */ - if (WARN((fwrt->trans->state == IWL_TRANS_NO_FW) && - fwrt->smem_cfg.num_lmacs, - "Can't collect dbg data when FW isn't alive\n")) + if (fwrt->trans->state == IWL_TRANS_NO_FW && + fwrt->smem_cfg.num_lmacs) return -EIO; - if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status)) + if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status) || + test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status)) return -EBUSY; if (WARN_ON(fwrt->dump.desc)) @@ -1050,25 +1039,38 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc); int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, - const struct iwl_fw_dbg_trigger_tlv *trigger) + struct iwl_fw_dbg_trigger_tlv *trigger) { struct iwl_fw_dump_desc *desc; + unsigned int delay = 0; - if (trigger && trigger->flags & IWL_FW_DBG_FORCE_RESTART) { - IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig); - iwl_force_nmi(fwrt->trans); - return 0; + if (trigger) { + u16 occurrences = le16_to_cpu(trigger->occurrences) - 1; + + if (!le16_to_cpu(trigger->occurrences)) + return 0; + + if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) { + IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", + trig); + iwl_force_nmi(fwrt->trans); + return 0; + } + + trigger->occurrences = cpu_to_le16(occurrences); + delay = le16_to_cpu(trigger->trig_dis_ms); } desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); if (!desc) return -ENOMEM; + desc->len = len; desc->trig_desc.type = cpu_to_le32(trig); memcpy(desc->trig_desc.data, str, len); - return iwl_fw_dbg_collect_desc(fwrt, desc, trigger); + return iwl_fw_dbg_collect_desc(fwrt, desc, trigger, delay); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); @@ -1076,13 +1078,9 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) { - u16 occurrences = le16_to_cpu(trigger->occurrences); int ret, len = 0; char buf[64]; - if (!occurrences) - return 0; - if (fmt) { va_list ap; @@ -1105,7 +1103,6 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, if (ret) return ret; - trigger->occurrences = cpu_to_le16(occurrences - 1); return 0; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig); @@ -1116,29 +1113,26 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) int ret; int i; - if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg_conf_tlv), + if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv), "Invalid configuration %d\n", conf_id)) return -EINVAL; /* EARLY START - firmware's configuration is hard coded */ - if ((!fwrt->fw->dbg_conf_tlv[conf_id] || - !fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && + if ((!fwrt->fw->dbg.conf_tlv[conf_id] || + !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) && conf_id == FW_DBG_START_FROM_ALIVE) return 0; - if (!fwrt->fw->dbg_conf_tlv[conf_id]) + if (!fwrt->fw->dbg.conf_tlv[conf_id]) return -EINVAL; if (fwrt->dump.conf != FW_DBG_INVALID) IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n", fwrt->dump.conf); - /* start default config marker cmd for syncing logs */ - iwl_fw_trigger_timestamp(fwrt, 1); - /* Send all HCMDs for configuring the FW debug */ - ptr = (void *)&fwrt->fw->dbg_conf_tlv[conf_id]->hcmd; - for (i = 0; i < fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) { + ptr = (void *)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd; + for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) { struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; struct iwl_host_cmd hcmd = { .id = cmd->id, @@ -1164,6 +1158,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work) { struct iwl_fw_runtime *fwrt = container_of(work, struct iwl_fw_runtime, dump.wk.work); + struct iwl_fw_dbg_params params = {0}; if (fwrt->ops && fwrt->ops->dump_start && fwrt->ops->dump_start(fwrt->ops_ctx)) @@ -1177,41 +1172,42 @@ void iwl_fw_error_dump_wk(struct work_struct *work) goto out; } - if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { - /* stop recording */ - iwl_fw_dbg_stop_recording(fwrt); - - iwl_fw_error_dump(fwrt); - - /* start recording again if the firmware is not crashed */ - if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) && - fwrt->fw->dbg_dest_tlv) { - iwl_clear_bits_prph(fwrt->trans, - MON_BUFF_SAMPLE_CTL, 0x100); - iwl_clear_bits_prph(fwrt->trans, - MON_BUFF_SAMPLE_CTL, 0x1); - iwl_set_bits_prph(fwrt->trans, - MON_BUFF_SAMPLE_CTL, 0x1); - } - } else { - u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE); - u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL); + iwl_fw_dbg_stop_recording(fwrt, ¶ms); + + iwl_fw_error_dump(fwrt); - iwl_fw_dbg_stop_recording(fwrt); + /* start recording again if the firmware is not crashed */ + if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) && + fwrt->fw->dbg.dest_tlv) { /* wait before we collect the data till the DBGC stop */ udelay(500); - - iwl_fw_error_dump(fwrt); - - /* start recording again if the firmware is not crashed */ - if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) && - fwrt->fw->dbg_dest_tlv) { - iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, in_sample); - iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl); - } + iwl_fw_dbg_restart_recording(fwrt, ¶ms); } out: if (fwrt->ops && fwrt->ops->dump_end) fwrt->ops->dump_end(fwrt->ops_ctx); } +void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt) +{ + const struct iwl_cfg *cfg = fwrt->trans->cfg; + + if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt)) + return; + + if (!fwrt->dump.d3_debug_data) { + fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length, + GFP_KERNEL); + if (!fwrt->dump.d3_debug_data) { + IWL_ERR(fwrt, + "failed to allocate memory for D3 debug data\n"); + return; + } + } + + /* if the buffer holds previous debug data it is overwritten */ + iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr, + fwrt->dump.d3_debug_data, + cfg->d3_debug_data_length); +} +IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 507d9a49fa97..d9578dcec24c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -19,9 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -74,6 +71,7 @@ #include "iwl-io.h" #include "file.h" #include "error-dump.h" +#include "api/commands.h" /** * struct iwl_fw_dump_desc - describes the dump @@ -86,6 +84,16 @@ struct iwl_fw_dump_desc { struct iwl_fw_error_dump_trigger_desc trig_desc; }; +/** + * struct iwl_fw_dbg_params - register values to restore + * @in_sample: DBGC_IN_SAMPLE value + * @out_ctrl: DBGC_OUT_CTRL value + */ +struct iwl_fw_dbg_params { + u32 in_sample; + u32 out_ctrl; +}; + extern const struct iwl_fw_dump_desc iwl_dump_desc_assert; static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) @@ -99,25 +107,25 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc, - const struct iwl_fw_dbg_trigger_tlv *trigger); + void *trigger, unsigned int delay); int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, - const struct iwl_fw_dbg_trigger_tlv *trigger); + struct iwl_fw_dbg_trigger_tlv *trigger); int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) __printf(3, 4); int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id); #define iwl_fw_dbg_trigger_enabled(fw, id) ({ \ - void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \ + void *__dbg_trigger = (fw)->dbg.trigger_tlv[(id)]; \ unlikely(__dbg_trigger); \ }) static inline struct iwl_fw_dbg_trigger_tlv* _iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id) { - return fw->dbg_trigger_tlv[id]; + return fw->dbg.trigger_tlv[id]; } #define iwl_fw_dbg_get_trigger(fw, id) ({ \ @@ -146,12 +154,9 @@ iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt, } static inline bool -iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, - struct iwl_fw_dbg_trigger_tlv *trig) +iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, u32 id, u32 dis_ms) { - unsigned long wind_jiff = - msecs_to_jiffies(le16_to_cpu(trig->trig_dis_ms)); - u32 id = le32_to_cpu(trig->id); + unsigned long wind_jiff = msecs_to_jiffies(dis_ms); /* If this is the first event checked, jump to update start ts */ if (fwrt->dump.non_collect_ts_start[id] && @@ -171,7 +176,8 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt, if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev)) return false; - if (iwl_fw_dbg_no_trig_window(fwrt, trig)) { + if (iwl_fw_dbg_no_trig_window(fwrt, le32_to_cpu(trig->id), + le16_to_cpu(trig->trig_dis_ms))) { IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n", trig->id); return false; @@ -180,6 +186,30 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt, return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig); } +static inline struct iwl_fw_dbg_trigger_tlv* +_iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt, + struct wireless_dev *wdev, + const enum iwl_fw_dbg_trigger id) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + + if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id)) + return NULL; + + trig = _iwl_fw_dbg_get_trigger(fwrt->fw, id); + + if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trig)) + return NULL; + + return trig; +} + +#define iwl_fw_dbg_trigger_on(fwrt, wdev, id) ({ \ + BUILD_BUG_ON(!__builtin_constant_p(id)); \ + BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \ + _iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \ +}) + static inline void _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, struct wireless_dev *wdev, @@ -199,17 +229,80 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, iwl_fw_dbg_get_trigger((fwrt)->fw,\ (trig))) -static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt) +static int iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime *fwrt, bool start) +{ + struct iwl_continuous_record_cmd cont_rec = {}; + struct iwl_host_cmd hcmd = { + .id = LDBG_CONFIG_CMD, + .flags = CMD_ASYNC, + .data[0] = &cont_rec, + .len[0] = sizeof(cont_rec), + }; + + cont_rec.record_mode.enable_recording = start ? + cpu_to_le16(START_DEBUG_RECORDING) : + cpu_to_le16(STOP_DEBUG_RECORDING); + + return iwl_trans_send_cmd(fwrt->trans, &hcmd); +} + +static inline void +_iwl_fw_dbg_stop_recording(struct iwl_trans *trans, + struct iwl_fw_dbg_params *params) +{ + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); + return; + } + + if (params) { + params->in_sample = iwl_read_prph(trans, DBGC_IN_SAMPLE); + params->out_ctrl = iwl_read_prph(trans, DBGC_OUT_CTRL); + } + + iwl_write_prph(trans, DBGC_IN_SAMPLE, 0); + udelay(100); + iwl_write_prph(trans, DBGC_OUT_CTRL, 0); +} + +static inline void +iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_params *params) +{ + if (fwrt->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) + _iwl_fw_dbg_stop_recording(fwrt->trans, params); + else + iwl_fw_dbg_start_stop_hcmd(fwrt, false); +} + +static inline void +_iwl_fw_dbg_restart_recording(struct iwl_trans *trans, + struct iwl_fw_dbg_params *params) { - if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { - iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100); + if (WARN_ON(!params)) + return; + + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); + iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); + iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); } else { - iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0); + iwl_write_prph(trans, DBGC_IN_SAMPLE, params->in_sample); udelay(100); - iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0); + iwl_write_prph(trans, DBGC_OUT_CTRL, params->out_ctrl); } } +static inline void +iwl_fw_dbg_restart_recording(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_params *params) +{ + if (fwrt->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) + _iwl_fw_dbg_restart_recording(fwrt->trans, params); + else + iwl_fw_dbg_start_stop_hcmd(fwrt, true); +} + static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) { fwrt->dump.conf = FW_DBG_INVALID; @@ -217,6 +310,16 @@ static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) void iwl_fw_error_dump_wk(struct work_struct *work); +static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt) +{ + return fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_D3_DEBUG) && + fwrt->trans->cfg->d3_debug_data_length && + fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); +} + +void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt); + static inline void iwl_fw_flush_dump(struct iwl_fw_runtime *fwrt) { flush_delayed_work(&fwrt->dump.wk); @@ -263,4 +366,5 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} #endif /* CONFIG_IWLWIFI_DEBUGFS */ +void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt); #endif /* __iwl_fw_dbg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c index 8ba5a60ec9ed..3e120dd47305 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,9 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program. - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -33,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -66,55 +65,117 @@ #include "debugfs.h" #include "dbg.h" -#define FWRT_DEBUGFS_READ_FILE_OPS(name) \ -static ssize_t iwl_dbgfs_##name##_read(struct iwl_fw_runtime *fwrt, \ - char *buf, size_t count, \ - loff_t *ppos); \ +#define FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ +struct dbgfs_##name##_data { \ + argtype *arg; \ + bool read_done; \ + ssize_t rlen; \ + char rbuf[buflen]; \ +}; \ +static int _iwl_dbgfs_##name##_open(struct inode *inode, \ + struct file *file) \ +{ \ + struct dbgfs_##name##_data *data; \ + \ + data = kzalloc(sizeof(*data), GFP_KERNEL); \ + if (!data) \ + return -ENOMEM; \ + \ + data->read_done = false; \ + data->arg = inode->i_private; \ + file->private_data = data; \ + \ + return 0; \ +} + +#define FWRT_DEBUGFS_READ_WRAPPER(name) \ +static ssize_t _iwl_dbgfs_##name##_read(struct file *file, \ + char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + struct dbgfs_##name##_data *data = file->private_data; \ + \ + if (!data->read_done) { \ + data->read_done = true; \ + data->rlen = iwl_dbgfs_##name##_read(data->arg, \ + sizeof(data->rbuf),\ + data->rbuf); \ + } \ + \ + if (data->rlen < 0) \ + return data->rlen; \ + return simple_read_from_buffer(user_buf, count, ppos, \ + data->rbuf, data->rlen); \ +} + +static int _iwl_dbgfs_release(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + + return 0; +} + +#define _FWRT_DEBUGFS_READ_FILE_OPS(name, buflen, argtype) \ +FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ +FWRT_DEBUGFS_READ_WRAPPER(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ - .read = iwl_dbgfs_##name##_read, \ - .open = simple_open, \ + .read = _iwl_dbgfs_##name##_read, \ + .open = _iwl_dbgfs_##name##_open, \ .llseek = generic_file_llseek, \ + .release = _iwl_dbgfs_release, \ } -#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \ -static ssize_t iwl_dbgfs_##name##_write(struct iwl_fw_runtime *fwrt, \ - char *buf, size_t count, \ - loff_t *ppos); \ +#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \ const char __user *user_buf, \ size_t count, loff_t *ppos) \ { \ - struct iwl_fw_runtime *fwrt = file->private_data; \ + argtype *arg = \ + ((struct dbgfs_##name##_data *)file->private_data)->arg;\ char buf[buflen] = {}; \ size_t buf_size = min(count, sizeof(buf) - 1); \ \ if (copy_from_user(buf, user_buf, buf_size)) \ return -EFAULT; \ \ - return iwl_dbgfs_##name##_write(fwrt, buf, buf_size, ppos); \ + return iwl_dbgfs_##name##_write(arg, buf, buf_size); \ } -#define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen) \ -FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \ +#define _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \ +FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ +FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ +FWRT_DEBUGFS_READ_WRAPPER(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = _iwl_dbgfs_##name##_write, \ - .read = iwl_dbgfs_##name##_read, \ - .open = simple_open, \ + .read = _iwl_dbgfs_##name##_read, \ + .open = _iwl_dbgfs_##name##_open, \ .llseek = generic_file_llseek, \ + .release = _iwl_dbgfs_release, \ } -#define FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen) \ -FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \ +#define _FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \ +FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ +FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = _iwl_dbgfs_##name##_write, \ - .open = simple_open, \ + .open = _iwl_dbgfs_##name##_open, \ .llseek = generic_file_llseek, \ + .release = _iwl_dbgfs_release, \ } +#define FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz) \ + _FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz, struct iwl_fw_runtime) + +#define FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ + _FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime) + +#define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ + _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime) + #define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \ - if (!debugfs_create_file(alias, mode, parent, fwrt, \ - &iwl_dbgfs_##name##_ops)) \ - goto err; \ + if (!debugfs_create_file(alias, mode, parent, fwrt, \ + &iwl_dbgfs_##name##_ops)) \ + goto err; \ } while (0) #define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \ FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) @@ -173,8 +234,7 @@ void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay) } static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt, - char *buf, size_t count, - loff_t *ppos) + char *buf, size_t count) { int ret; u32 delay; @@ -188,13 +248,85 @@ static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt, return count; } -FWRT_DEBUGFS_WRITE_FILE_OPS(timestamp_marker, 10); +static ssize_t iwl_dbgfs_timestamp_marker_read(struct iwl_fw_runtime *fwrt, + size_t size, char *buf) +{ + u32 delay_secs = jiffies_to_msecs(fwrt->timestamp.delay) / 1000; + + return scnprintf(buf, size, "%d\n", delay_secs); +} + +FWRT_DEBUGFS_READ_WRITE_FILE_OPS(timestamp_marker, 16); + +struct hcmd_write_data { + __be32 cmd_id; + __be32 flags; + __be16 length; + u8 data[0]; +} __packed; + +static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf, + size_t count) +{ + size_t header_size = (sizeof(u32) * 2 + sizeof(u16)) * 2; + size_t data_size = (count - 1) / 2; + int ret; + struct hcmd_write_data *data; + struct iwl_host_cmd hcmd = { + .len = { 0, }, + .data = { NULL, }, + }; + + if (fwrt->ops && fwrt->ops->fw_running && + !fwrt->ops->fw_running(fwrt->ops_ctx)) + return -EIO; + + if (count < header_size + 1 || count > 1024 * 4) + return -EINVAL; + + data = kmalloc(data_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + ret = hex2bin((u8 *)data, buf, data_size); + if (ret) + goto out; + + hcmd.id = be32_to_cpu(data->cmd_id); + hcmd.flags = be32_to_cpu(data->flags); + hcmd.len[0] = be16_to_cpu(data->length); + hcmd.data[0] = data->data; + + if (count != header_size + hcmd.len[0] * 2 + 1) { + IWL_ERR(fwrt, + "host command data size does not match header length\n"); + ret = -EINVAL; + goto out; + } + + if (fwrt->ops && fwrt->ops->send_hcmd) + ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd); + else + ret = -EPERM; + + if (ret < 0) + goto out; + + if (hcmd.flags & CMD_WANT_SKB) + iwl_free_resp(&hcmd); +out: + kfree(data); + return ret ?: count; +} + +FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512); int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, struct dentry *dbgfs_dir) { INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk); FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200); + FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200); return 0; err: IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h index cbbfa8e9e66d..88255035e8ef 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h @@ -18,9 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program. - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index ed7beca8817e..6fede174c664 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -8,6 +8,7 @@ * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -35,6 +31,7 @@ * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -116,6 +113,7 @@ enum iwl_fw_error_dump_type { IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14, IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */ IWL_FW_ERROR_DUMP_MEM_CFG = 16, + IWL_FW_ERROR_DUMP_D3_DEBUG_DATA = 17, IWL_FW_ERROR_DUMP_MAX, }; @@ -330,6 +328,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) * @FW_DBG_TDLS: trigger log collection upon TDLS related events. * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when * the firmware sends a tx reply. + * @FW_DBG_TRIGGER_NO_ALIVE: trigger log collection if alive flow fails */ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_INVALID = 0, @@ -347,6 +346,7 @@ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_TX_LATENCY, FW_DBG_TRIGGER_TDLS, FW_DBG_TRIGGER_TX_STATUS, + FW_DBG_TRIGGER_NO_ALIVE, /* must be last */ FW_DBG_TRIGGER_MAX, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index bbf2b265a06a..6005a41c53d1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -258,6 +253,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * deprecated. * @IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2: This ucode supports version 8 * of scan request: SCAN_REQUEST_CMD_UMAC_API_S_VER_8 + * @IWL_UCODE_TLV_API_FRAG_EBS: This ucode supports fragmented EBS + * @IWL_UCODE_TLV_API_REDUCE_TX_POWER: This ucode supports v5 of + * the REDUCE_TX_POWER_CMD. * * @NUM_IWL_UCODE_TLV_API: number of bits used */ @@ -276,9 +274,12 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_OCE = (__force iwl_ucode_tlv_api_t)33, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34, IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35, + IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL = (__force iwl_ucode_tlv_api_t)36, IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY = (__force iwl_ucode_tlv_api_t)38, IWL_UCODE_TLV_API_DEPRECATE_TTAK = (__force iwl_ucode_tlv_api_t)41, IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2 = (__force iwl_ucode_tlv_api_t)42, + IWL_UCODE_TLV_API_FRAG_EBS = (__force iwl_ucode_tlv_api_t)44, + IWL_UCODE_TLV_API_REDUCE_TX_POWER = (__force iwl_ucode_tlv_api_t)45, NUM_IWL_UCODE_TLV_API #ifdef __CHECKER__ @@ -325,6 +326,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related + * @IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2: firmware implements Coex Schema 2 * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT @@ -335,7 +337,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * antenna the beacon should be transmitted * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon * from AP and will send it upon d0i3 exit. - * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2 + * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3: support LAR API V3 * @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill * @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature * thresholds reporting @@ -349,6 +351,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * command size (command version 4) that supports toggling ACK TX * power reduction. * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload + * @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3 + * @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax + * capability. * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -381,6 +386,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_D0I3_END_FIRST = (__force iwl_ucode_tlv_capa_t)41, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44, + IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2 = (__force iwl_ucode_tlv_capa_t)45, IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, @@ -388,7 +394,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = (__force iwl_ucode_tlv_capa_t)70, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3 = (__force iwl_ucode_tlv_capa_t)73, IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75, IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76, @@ -396,7 +402,9 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80, IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81, IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84, - IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)86, + IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87, + IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88, + IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89, IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96, NUM_IWL_UCODE_TLV_CAPA @@ -528,22 +536,9 @@ enum iwl_fw_dbg_monitor_mode { }; /** - * enum iwl_fw_mem_seg_type - memory segment type - * @FW_DBG_MEM_TYPE_MASK: mask for the type indication - * @FW_DBG_MEM_TYPE_REGULAR: regular memory - * @FW_DBG_MEM_TYPE_PRPH: periphery memory (requires special reading) - */ -enum iwl_fw_mem_seg_type { - FW_DBG_MEM_TYPE_MASK = 0xff000000, - FW_DBG_MEM_TYPE_REGULAR = 0x00000000, - FW_DBG_MEM_TYPE_PRPH = 0x01000000, -}; - -/** * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments * - * @data_type: the memory segment type to record, see &enum iwl_fw_mem_seg_type - * for what we care about + * @data_type: the memory segment type to record * @ofs: the memory segment offset * @len: the memory segment length, in bytes * diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index 0861b97c4233..54dbbd998abf 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -203,6 +198,29 @@ enum iwl_fw_type { }; /** + * struct iwl_fw_dbg - debug data + * + * @dest_tlv: points to debug destination TLV (typically SRAM or DRAM) + * @n_dest_reg: num of reg_ops in dest_tlv + * @conf_tlv: array of pointers to configuration HCMDs + * @trigger_tlv: array of pointers to triggers TLVs + * @trigger_tlv_len: lengths of the @dbg_trigger_tlv entries + * @mem_tlv: Runtime addresses to dump + * @n_mem_tlv: number of runtime addresses + * @dump_mask: bitmask of dump regions +*/ +struct iwl_fw_dbg { + struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv; + u8 n_dest_reg; + struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX]; + struct iwl_fw_dbg_trigger_tlv *trigger_tlv[FW_DBG_TRIGGER_MAX]; + size_t trigger_tlv_len[FW_DBG_TRIGGER_MAX]; + struct iwl_fw_dbg_mem_seg_tlv *mem_tlv; + size_t n_mem_tlv; + u32 dump_mask; +}; + +/** * struct iwl_fw - variables associated with the firmware * * @ucode_ver: ucode version from the ucode file @@ -222,12 +240,6 @@ enum iwl_fw_type { * @cipher_scheme: optional external cipher scheme. * @human_readable: human readable version * we get the ALIVE from the uCode - * @dbg_dest_tlv: points to the destination TLV for debug - * @dbg_conf_tlv: array of pointers to configuration TLVs for debug - * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries - * @dbg_trigger_tlv: array of pointers to triggers TLVs - * @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries - * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv */ struct iwl_fw { u32 ucode_ver; @@ -255,15 +267,7 @@ struct iwl_fw { struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS]; u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; - struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv; - struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; - size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; - struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; - struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; - size_t n_dbg_mem_tlv; - size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; - u8 dbg_dest_reg_num; - u32 dbg_dump_mask; + struct iwl_fw_dbg dbg; }; static inline const char *get_fw_dbg_mode_string(int mode) @@ -285,7 +289,7 @@ static inline const char *get_fw_dbg_mode_string(int mode) static inline bool iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id) { - const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id]; + const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg.conf_tlv[id]; if (!conf_tlv) return false; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c index 1096c945a68b..379735e086dc 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h index 368884be4e7c..61b067eeeac9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index ed23367f7088..6b95d0e75889 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -71,6 +71,7 @@ struct iwl_fw_runtime_ops { int (*dump_start)(void *ctx); void (*dump_end)(void *ctx); bool (*fw_running)(void *ctx); + int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd); }; #define MAX_NUM_LMAC 2 @@ -88,6 +89,7 @@ struct iwl_fwrt_shared_mem_cfg { enum iwl_fw_runtime_status { IWL_FWRT_STATUS_DUMPING = 0, + IWL_FWRT_STATUS_WAIT_ALIVE, }; /** @@ -136,6 +138,7 @@ struct iwl_fw_runtime { /* ts of the beginning of a non-collect fw dbg data period */ unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1]; + u32 *d3_debug_data; } dump; #ifdef CONFIG_IWLWIFI_DEBUGFS struct { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h index ee9347a54cdc..359537620c93 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h @@ -16,11 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 12fddcf15bab..5eb906a0d0d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -388,6 +383,8 @@ struct iwl_csr_params { * @gen2: 22000 and on transport operation * @cdb: CDB support * @nvm_type: see &enum iwl_nvm_type + * @d3_debug_data_base_addr: base address where D3 debug data is stored + * @d3_debug_data_length: length of the D3 debug data * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -452,6 +449,8 @@ struct iwl_cfg { u8 ucode_api_min; u32 min_umac_error_event_table; u32 extra_phy_cfg_flags; + u32 d3_debug_data_base_addr; + u32 d3_debug_data_length; }; static const struct iwl_csr_params iwl_csr_v1 = { @@ -574,11 +573,18 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr; extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwl22000_2ac_cfg_jf; extern const struct iwl_cfg iwl22000_2ax_cfg_hr; +extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0; +extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0; +extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0; +extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0; +extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0; +extern const struct iwl_cfg iwl22000_2ax_cfg_jf; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0; +extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0; extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb; -#endif /* CONFIG_IWLMVM */ +#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h index 4b6fdf3b15fb..5ed07e37e3ee 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h @@ -64,20 +64,41 @@ * the init done for driver command that configures several system modes * @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug * @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump - * @IWL_CTXT_INFO_RB_SIZE_4K: Use 4K RB size (the default is 2K) * @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size * exponent, the actual size is 2**value, valid sizes are 8-2048. * The value is four bits long. Maximum valid exponent is 12 * @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the * default is short format - not supported by the driver) + * @IWL_CTXT_INFO_RB_SIZE_POS: RB size position + * (values are IWL_CTXT_INFO_RB_SIZE_*K) + * @IWL_CTXT_INFO_RB_SIZE_1K: Value for 1K RB size + * @IWL_CTXT_INFO_RB_SIZE_2K: Value for 2K RB size + * @IWL_CTXT_INFO_RB_SIZE_4K: Value for 4K RB size + * @IWL_CTXT_INFO_RB_SIZE_8K: Value for 8K RB size + * @IWL_CTXT_INFO_RB_SIZE_12K: Value for 12K RB size + * @IWL_CTXT_INFO_RB_SIZE_16K: Value for 16K RB size + * @IWL_CTXT_INFO_RB_SIZE_20K: Value for 20K RB size + * @IWL_CTXT_INFO_RB_SIZE_24K: Value for 24K RB size + * @IWL_CTXT_INFO_RB_SIZE_28K: Value for 28K RB size + * @IWL_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size */ enum iwl_context_info_flags { IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0), IWL_CTXT_INFO_EARLY_DEBUG = BIT(1), IWL_CTXT_INFO_ENABLE_CDMP = BIT(2), - IWL_CTXT_INFO_RB_SIZE_4K = BIT(3), IWL_CTXT_INFO_RB_CB_SIZE_POS = 4, IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8), + IWL_CTXT_INFO_RB_SIZE_POS = 9, + IWL_CTXT_INFO_RB_SIZE_1K = 0x1, + IWL_CTXT_INFO_RB_SIZE_2K = 0x2, + IWL_CTXT_INFO_RB_SIZE_4K = 0x4, + IWL_CTXT_INFO_RB_SIZE_8K = 0x8, + IWL_CTXT_INFO_RB_SIZE_12K = 0x9, + IWL_CTXT_INFO_RB_SIZE_16K = 0xa, + IWL_CTXT_INFO_RB_SIZE_20K = 0xb, + IWL_CTXT_INFO_RB_SIZE_24K = 0xc, + IWL_CTXT_INFO_RB_SIZE_28K = 0xd, + IWL_CTXT_INFO_RB_SIZE_32K = 0xe, }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 9019de99f077..caa5806acd81 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -188,6 +183,7 @@ #define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0) #define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) #define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) +#define CSR_HW_IF_CONFIG_REG_D3_DEBUG (0x00000200) #define CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00) #define CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000) #define CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c index b1c3b0d0fcc6..e1a41fd503a8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c @@ -16,11 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h index c023fcf5d452..a2af68a0d34b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h @@ -13,10 +13,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h index a80e4202cd03..2cc6c019d0e1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h @@ -2,6 +2,7 @@ * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -12,10 +13,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -76,12 +73,11 @@ TRACE_EVENT(iwlwifi_dev_rx_data, TP_ARGS(dev, trans, rxbuf, len), TP_STRUCT__entry( DEV_ENTRY - __dynamic_array(u8, data, - len - iwl_rx_trace_len(trans, rxbuf, len)) + len - iwl_rx_trace_len(trans, rxbuf, len, NULL)) ), TP_fast_assign( - size_t offs = iwl_rx_trace_len(trans, rxbuf, len); + size_t offs = iwl_rx_trace_len(trans, rxbuf, len, NULL); DEV_ASSIGN; if (offs < len) memcpy(__get_dynamic_array(data), diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h index 4164dc1745ed..7bb4e0e9bb69 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h @@ -12,10 +12,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h index 27e3e4e96aa2..8e87186682e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h @@ -3,6 +3,7 @@ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -13,10 +14,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -75,13 +72,18 @@ TRACE_EVENT(iwlwifi_dev_rx, TP_STRUCT__entry( DEV_ENTRY __field(u16, cmd) - __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len)) + __field(u8, hdr_offset) + __dynamic_array(u8, rxbuf, + iwl_rx_trace_len(trans, pkt, len, NULL)) ), TP_fast_assign( + size_t hdr_offset = 0; + DEV_ASSIGN; __entry->cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); memcpy(__get_dynamic_array(rxbuf), pkt, - iwl_rx_trace_len(trans, pkt, len)); + iwl_rx_trace_len(trans, pkt, len, &hdr_offset)); + __entry->hdr_offset = hdr_offset; ), TP_printk("[%s] RX cmd %#.2x", __get_str(dev), __entry->cmd) @@ -126,61 +128,6 @@ TRACE_EVENT(iwlwifi_dev_tx, __entry->framelen, __entry->skbaddr) ); -struct iwl_error_event_table; -TRACE_EVENT(iwlwifi_dev_ucode_error, - TP_PROTO(const struct device *dev, const struct iwl_error_event_table *table, - u32 hw_ver, u32 brd_ver), - TP_ARGS(dev, table, hw_ver, brd_ver), - TP_STRUCT__entry( - DEV_ENTRY - __field(u32, desc) - __field(u32, tsf_low) - __field(u32, data1) - __field(u32, data2) - __field(u32, line) - __field(u32, blink2) - __field(u32, ilink1) - __field(u32, ilink2) - __field(u32, bcon_time) - __field(u32, gp1) - __field(u32, gp2) - __field(u32, rev_type) - __field(u32, major) - __field(u32, minor) - __field(u32, hw_ver) - __field(u32, brd_ver) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->desc = table->error_id; - __entry->tsf_low = table->tsf_low; - __entry->data1 = table->data1; - __entry->data2 = table->data2; - __entry->line = table->line; - __entry->blink2 = table->blink2; - __entry->ilink1 = table->ilink1; - __entry->ilink2 = table->ilink2; - __entry->bcon_time = table->bcon_time; - __entry->gp1 = table->gp1; - __entry->gp2 = table->gp2; - __entry->rev_type = table->gp3; - __entry->major = table->ucode_ver; - __entry->minor = table->hw_ver; - __entry->hw_ver = hw_ver; - __entry->brd_ver = brd_ver; - ), - TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, " - "blink2 0x%05X ilink 0x%05X 0x%05X " - "bcon_tm %010u gp 0x%08X 0x%08X rev_type 0x%08X major 0x%08X " - "minor 0x%08X hw 0x%08X brd 0x%08X", - __get_str(dev), __entry->desc, __entry->tsf_low, - __entry->data1, __entry->data2, __entry->line, - __entry->blink2, __entry->ilink1, __entry->ilink2, - __entry->bcon_time, __entry->gp1, __entry->gp2, - __entry->rev_type, __entry->major, __entry->minor, - __entry->hw_ver, __entry->brd_ver) -); - TRACE_EVENT(iwlwifi_dev_ucode_event, TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev), TP_ARGS(dev, time, data, ev), diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h index 5dfc9295a7e0..32984c1f39a1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h index e9b8673dd245..53842226ef1b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c index 6aa719865a58..9805432f124f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -11,10 +12,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,12 +27,10 @@ #ifndef __CHECKER__ #include "iwl-trans.h" -#include "dvm/commands.h" #define CREATE_TRACE_POINTS #include "iwl-devtrace.h" EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event); #endif diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h index f5c1127253cb..fc649b2bc017 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h @@ -1,7 +1,8 @@ /****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. - * Copyright(C) 2016 Intel Deutschland GmbH + * Copyright(C) 2016 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -12,10 +13,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -60,16 +57,23 @@ static inline bool iwl_trace_data(struct sk_buff *skb) } static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans, - void *rxbuf, size_t len) + void *rxbuf, size_t len, + size_t *out_hdr_offset) { struct iwl_cmd_header *cmd = (void *)((u8 *)rxbuf + sizeof(__le32)); - struct ieee80211_hdr *hdr; + struct ieee80211_hdr *hdr = NULL; + size_t hdr_offset; if (cmd->cmd != trans->rx_mpdu_cmd) return len; - hdr = (void *)((u8 *)cmd + sizeof(struct iwl_cmd_header) + - trans->rx_mpdu_cmd_hdr_size); + hdr_offset = sizeof(struct iwl_cmd_header) + + trans->rx_mpdu_cmd_hdr_size; + + if (out_hdr_offset) + *out_hdr_offset = hdr_offset; + + hdr = (void *)((u8 *)cmd + hdr_offset); if (!ieee80211_is_data(hdr->frame_control)) return len; /* maybe try to identify EAPOL frames? */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index c0631255aee7..ba41d23b4211 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -173,12 +168,12 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) { int i; - kfree(drv->fw.dbg_dest_tlv); - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) - kfree(drv->fw.dbg_conf_tlv[i]); - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) - kfree(drv->fw.dbg_trigger_tlv[i]); - kfree(drv->fw.dbg_mem_tlv); + kfree(drv->fw.dbg.dest_tlv); + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) + kfree(drv->fw.dbg.conf_tlv[i]); + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) + kfree(drv->fw.dbg.trigger_tlv[i]); + kfree(drv->fw.dbg.mem_tlv); kfree(drv->fw.iml); for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) @@ -308,7 +303,7 @@ struct iwl_firmware_pieces { struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; - size_t n_dbg_mem_tlv; + size_t n_mem_tlv; }; /* @@ -941,7 +936,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, IWL_INFO(drv, "Found debug destination: %s\n", get_fw_dbg_mode_string(mon_mode)); - drv->fw.dbg_dest_reg_num = (dest_v1) ? + drv->fw.dbg.n_dest_reg = (dest_v1) ? tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv_v1, reg_ops) : @@ -949,8 +944,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, offsetof(struct iwl_fw_dbg_dest_tlv, reg_ops); - drv->fw.dbg_dest_reg_num /= - sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]); + drv->fw.dbg.n_dest_reg /= + sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]); break; } @@ -964,7 +959,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, break; } - if (conf->id >= ARRAY_SIZE(drv->fw.dbg_conf_tlv)) { + if (conf->id >= ARRAY_SIZE(drv->fw.dbg.conf_tlv)) { IWL_ERR(drv, "Skip unknown configuration: %d\n", conf->id); @@ -993,7 +988,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, (void *)tlv_data; u32 trigger_id = le32_to_cpu(trigger->id); - if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) { + if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) { IWL_ERR(drv, "Skip unknown trigger: %u\n", trigger->id); @@ -1020,7 +1015,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, break; } - drv->fw.dbg_dump_mask = + drv->fw.dbg.dump_mask = le32_to_cpup((__le32 *)tlv_data); break; } @@ -1065,38 +1060,23 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, case IWL_UCODE_TLV_FW_MEM_SEG: { struct iwl_fw_dbg_mem_seg_tlv *dbg_mem = (void *)tlv_data; - u32 type; size_t size; struct iwl_fw_dbg_mem_seg_tlv *n; if (tlv_len != (sizeof(*dbg_mem))) goto invalid_tlv_len; - type = le32_to_cpu(dbg_mem->data_type); - IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n", dbg_mem->data_type); - switch (type & FW_DBG_MEM_TYPE_MASK) { - case FW_DBG_MEM_TYPE_REGULAR: - case FW_DBG_MEM_TYPE_PRPH: - /* we know how to handle these */ - break; - default: - IWL_ERR(drv, - "Found debug memory segment with invalid type: 0x%x\n", - type); - return -EINVAL; - } - size = sizeof(*pieces->dbg_mem_tlv) * - (pieces->n_dbg_mem_tlv + 1); + (pieces->n_mem_tlv + 1); n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL); if (!n) return -ENOMEM; pieces->dbg_mem_tlv = n; - pieces->dbg_mem_tlv[pieces->n_dbg_mem_tlv] = *dbg_mem; - pieces->n_dbg_mem_tlv++; + pieces->dbg_mem_tlv[pieces->n_mem_tlv] = *dbg_mem; + pieces->n_mem_tlv++; break; } case IWL_UCODE_TLV_IML: { @@ -1275,8 +1255,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) fw->ucode_capa.standard_phy_calibration_size = IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; - /* dump all fw memory areas by default */ - fw->dbg_dump_mask = 0xffffffff; + /* dump all fw memory areas by default except d3 debug data */ + fw->dbg.dump_mask = 0xfffdffff; pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); if (!pieces) @@ -1343,21 +1323,21 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) goto out_free_fw; if (pieces->dbg_dest_tlv_init) { - size_t dbg_dest_size = sizeof(*drv->fw.dbg_dest_tlv) + - sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) * - drv->fw.dbg_dest_reg_num; + size_t dbg_dest_size = sizeof(*drv->fw.dbg.dest_tlv) + + sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) * + drv->fw.dbg.n_dest_reg; - drv->fw.dbg_dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL); + drv->fw.dbg.dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL); - if (!drv->fw.dbg_dest_tlv) + if (!drv->fw.dbg.dest_tlv) goto out_free_fw; if (*pieces->dbg_dest_ver == 0) { - memcpy(drv->fw.dbg_dest_tlv, pieces->dbg_dest_tlv_v1, + memcpy(drv->fw.dbg.dest_tlv, pieces->dbg_dest_tlv_v1, dbg_dest_size); } else { struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv = - drv->fw.dbg_dest_tlv; + drv->fw.dbg.dest_tlv; dest_tlv->version = pieces->dbg_dest_tlv->version; dest_tlv->monitor_mode = @@ -1372,8 +1352,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) pieces->dbg_dest_tlv->base_shift; memcpy(dest_tlv->reg_ops, pieces->dbg_dest_tlv->reg_ops, - sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) * - drv->fw.dbg_dest_reg_num); + sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) * + drv->fw.dbg.n_dest_reg); /* In version 1 of the destination tlv, which is * relevant for internal buffer exclusively, @@ -1389,15 +1369,13 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) } } - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) { + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) { if (pieces->dbg_conf_tlv[i]) { - drv->fw.dbg_conf_tlv_len[i] = - pieces->dbg_conf_tlv_len[i]; - drv->fw.dbg_conf_tlv[i] = + drv->fw.dbg.conf_tlv[i] = kmemdup(pieces->dbg_conf_tlv[i], - drv->fw.dbg_conf_tlv_len[i], + pieces->dbg_conf_tlv_len[i], GFP_KERNEL); - if (!drv->fw.dbg_conf_tlv[i]) + if (!pieces->dbg_conf_tlv_len[i]) goto out_free_fw; } } @@ -1424,7 +1402,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] = sizeof(struct iwl_fw_dbg_trigger_tdls); - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) { + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) { if (pieces->dbg_trigger_tlv[i]) { /* * If the trigger isn't long enough, WARN and exit. @@ -1437,22 +1415,22 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) (trigger_tlv_sz[i] + sizeof(struct iwl_fw_dbg_trigger_tlv)))) goto out_free_fw; - drv->fw.dbg_trigger_tlv_len[i] = + drv->fw.dbg.trigger_tlv_len[i] = pieces->dbg_trigger_tlv_len[i]; - drv->fw.dbg_trigger_tlv[i] = + drv->fw.dbg.trigger_tlv[i] = kmemdup(pieces->dbg_trigger_tlv[i], - drv->fw.dbg_trigger_tlv_len[i], + drv->fw.dbg.trigger_tlv_len[i], GFP_KERNEL); - if (!drv->fw.dbg_trigger_tlv[i]) + if (!drv->fw.dbg.trigger_tlv[i]) goto out_free_fw; } } /* Now that we can no longer fail, copy information */ - drv->fw.dbg_mem_tlv = pieces->dbg_mem_tlv; + drv->fw.dbg.mem_tlv = pieces->dbg_mem_tlv; pieces->dbg_mem_tlv = NULL; - drv->fw.n_dbg_mem_tlv = pieces->n_dbg_mem_tlv; + drv->fw.dbg.n_mem_tlv = pieces->n_mem_tlv; /* * The (size - 16) / 12 formula is based on the information recorded @@ -1493,6 +1471,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) break; default: WARN(1, "Invalid fw type %d\n", fw->type); + /* fall through */ case IWL_FW_MVM: op = &iwlwifi_opmode_table[MVM_OP_MODE]; break; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h index 1f8a2eeb7dff..2be30af7bdc3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index a4c96215933b..4e3422a1c7bb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -18,9 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -745,7 +742,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, else rx_chains = hweight8(rx_chains); - if (!(data->sku_cap_11n_enable) || !cfg->ht_params) { + if (!(data->sku_cap_11n_enable) || + (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) || + !cfg->ht_params) { ht_info->ht_supported = false; return; } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h index 8be50ed12300..d910bda087f7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c index ac965c34a2f8..a6db6a814257 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h index 1ed78be06c23..47fced159800 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h @@ -16,11 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index df0e9ffff706..c6a534303936 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -18,9 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program. - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -68,6 +65,8 @@ #include <linux/types.h> #include <linux/bitfield.h> +#include "iwl-trans.h" + /****************************/ /* Flow Handler Definitions */ /****************************/ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index efb1998dcabd..4f10914f6048 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -14,10 +14,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h index 5c8c0e130194..38085850a2d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h @@ -13,10 +13,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index 97072cf75bca..6fc8dac4aab7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -17,9 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 73969dbeb5c5..96e101d79662 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -476,30 +471,40 @@ static struct ieee80211_sband_iftype_data iwl_he_capa = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = - IEEE80211_HE_MAC_CAP0_HTC_HE, + IEEE80211_HE_MAC_CAP0_HTC_HE | + IEEE80211_HE_MAC_CAP0_TWT_REQ, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | - IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8, + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP | + IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = - IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU | - IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2, - .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, + .mac_cap_info[4] = + IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU | + IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39, + .mac_cap_info[5] = + IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 | + IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 | + IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU, .phy_cap_info[0] = - IEEE80211_HE_PHY_CAP0_DUAL_BAND | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | - IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS, + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | - IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ, + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, .phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | @@ -511,18 +516,31 @@ static struct ieee80211_sband_iftype_data iwl_he_capa = { IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, .phy_cap_info[5] = IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | - IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2, + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 | + IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK | + IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK, .phy_cap_info[6] = + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | + IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB | + IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO | IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, .phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | - IEEE80211_HE_PHY_CAP7_MAX_NC_7, + IEEE80211_HE_PHY_CAP7_MAX_NC_1, .phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | - IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU, + IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ, + .phy_cap_info[9] = + IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB, }, /* * Set default Tx/Rx HE MCS NSS Support field. Indicate support @@ -559,9 +577,11 @@ static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband, /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */ if ((tx_chains & rx_chains) != ANT_AB) { iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &= - ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS; + ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS; iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &= - ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS; + ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS; + iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[7] &= + ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK; } } @@ -1315,6 +1335,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); + bool empty_otp; u32 mac_flags; u32 sbands_flags = 0; @@ -1330,7 +1351,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, } rsp = (void *)hcmd.resp_pkt->data; - if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP) + empty_otp = !!(le32_to_cpu(rsp->general.flags) & + NVM_GENERAL_FLAGS_EMPTY_OTP); + if (empty_otp) IWL_INFO(trans, "OTP is empty\n"); nvm = kzalloc(sizeof(*nvm) + @@ -1354,6 +1377,11 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, /* Initialize general data */ nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); + nvm->n_hw_addrs = rsp->general.n_hw_addrs; + if (nvm->n_hw_addrs == 0) + IWL_WARN(trans, + "Firmware declares no reserved mac addresses. OTP is empty: %d\n", + empty_otp); /* Initialize MAC sku data */ mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 234d1009a9de..b7e1ddf8f177 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h index b49eda8150bb..cbd1a8eed620 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h @@ -8,6 +8,7 @@ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -35,6 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c index b7cd813ba70f..ae83cfdb750e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h index d34de3f71db6..7020dca05221 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h @@ -16,11 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 421a869633a3..0f51c7bea8d0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-scd.h b/drivers/net/wireless/intel/iwlwifi/iwl-scd.h index 99b43da32adf..9f11f3912816 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-scd.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-scd.h @@ -16,11 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index 7e9c924e1220..727f73e0b3f1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 279dd7b7a3fb..26b3c73051ca 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -269,6 +264,7 @@ struct iwl_rx_cmd_buffer { bool _page_stolen; u32 _rx_page_order; unsigned int truesize; + u8 status; }; static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) @@ -538,9 +534,6 @@ struct iwl_trans_rxq_dma_data { * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last * TX'ed commands and similar. The buffer will be vfree'd by the caller. * Note that the transport must fill in the proper file headers. - * @dump_regs: dump using IWL_ERR configuration space and memory mapped - * registers of the device to diagnose failure, e.g., when HW becomes - * inaccessible. */ struct iwl_trans_ops { @@ -569,7 +562,7 @@ struct iwl_trans_ops { bool configure_scd); /* 22000 functions */ int (*txq_alloc)(struct iwl_trans *trans, - struct iwl_tx_queue_cfg_cmd *cmd, + __le16 flags, u8 sta_id, u8 tid, int cmd_id, int size, unsigned int queue_wdg_timeout); void (*txq_free)(struct iwl_trans *trans, int queue); @@ -611,8 +604,6 @@ struct iwl_trans_ops { struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, const struct iwl_fw_dbg_trigger_tlv *trigger); - - void (*dump_regs)(struct iwl_trans *trans); }; /** @@ -688,6 +679,19 @@ enum iwl_plat_pm_mode { * enter/exit (in msecs). */ #define IWL_TRANS_IDLE_TIMEOUT 2000 +#define IWL_MAX_DEBUG_ALLOCATIONS 1 + +/** + * struct iwl_dram_data + * @physical: page phy pointer + * @block: pointer to the allocated block/page + * @size: size of the block/page + */ +struct iwl_dram_data { + dma_addr_t physical; + void *block; + int size; +}; /** * struct iwl_trans - transport common data @@ -721,7 +725,9 @@ enum iwl_plat_pm_mode { * @dbg_dest_tlv: points to the destination TLV for debug * @dbg_conf_tlv: array of pointers to configuration TLVs for debug * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug - * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv + * @dbg_n_dest_reg: num of reg_ops in %dbg_dest_tlv + * @num_blocks: number of blocks in fw_mon + * @fw_mon: address of the buffers for firmware monitor * @system_pm_mode: the system-wide power management mode in use. * This mode is set dynamically, depending on the WoWLAN values * configured from the userspace at runtime. @@ -772,7 +778,9 @@ struct iwl_trans { const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; u32 dbg_dump_mask; - u8 dbg_dest_reg_num; + u8 dbg_n_dest_reg; + int num_blocks; + struct iwl_dram_data fw_mon[IWL_MAX_DEBUG_ALLOCATIONS]; enum iwl_plat_pm_mode system_pm_mode; enum iwl_plat_pm_mode runtime_pm_mode; @@ -897,12 +905,6 @@ iwl_trans_dump_data(struct iwl_trans *trans, return trans->ops->dump_data(trans, trigger); } -static inline void iwl_trans_dump_regs(struct iwl_trans *trans) -{ - if (trans->ops->dump_regs) - trans->ops->dump_regs(trans); -} - static inline struct iwl_device_cmd * iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) { @@ -985,7 +987,7 @@ iwl_trans_txq_free(struct iwl_trans *trans, int queue) static inline int iwl_trans_txq_alloc(struct iwl_trans *trans, - struct iwl_tx_queue_cfg_cmd *cmd, + __le16 flags, u8 sta_id, u8 tid, int cmd_id, int size, unsigned int wdg_timeout) { @@ -999,7 +1001,8 @@ iwl_trans_txq_alloc(struct iwl_trans *trans, return -EIO; } - return trans->ops->txq_alloc(trans, cmd, cmd_id, size, wdg_timeout); + return trans->ops->txq_alloc(trans, flags, sta_id, tid, + cmd_id, size, wdg_timeout); } static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c index 75d35f6b041e..4094a4158032 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 016e03a5034f..730e37744dc0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -331,7 +326,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, struct ieee80211_chanctx_conf *chanctx_conf; /* default smps_mode is AUTOMATIC - only used for client modes */ enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; - u32 bt_activity_grading; + u32 bt_activity_grading, min_ag_for_static_smps; int ave_rssi; lockdep_assert_held(&mvm->mutex); @@ -363,8 +358,13 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, return; } + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2)) + min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC; + else + min_ag_for_static_smps = BT_HIGH_TRAFFIC; + bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading); - if (bt_activity_grading >= BT_HIGH_TRAFFIC) + if (bt_activity_grading >= min_ag_for_static_smps) smps_mode = IEEE80211_SMPS_STATIC; else if (bt_activity_grading >= BT_LOW_TRAFFIC) smps_mode = IEEE80211_SMPS_DYNAMIC; @@ -691,6 +691,15 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, return bt_activity >= BT_LOW_TRAFFIC; } +u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants) +{ + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) && + (mvm->cfg->non_shared_ant & enabled_ants)) + return mvm->cfg->non_shared_ant; + + return first_antenna(enabled_ants); +} + u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index d61ff66ce07b..d96ada3c06fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 79bdae994822..210be26aadaa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -434,23 +434,13 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 chains_static, chains_dynamic; struct cfg80211_chan_def chandef; int ret, i; - struct iwl_binding_cmd binding_cmd = {}; + struct iwl_binding_cmd_v1 binding_cmd = {}; struct iwl_time_quota_cmd quota_cmd = {}; struct iwl_time_quota_data *quota; u32 status; - int size; - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) { - size = sizeof(binding_cmd); - if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ || - !iwl_mvm_is_cdb_supported(mvm)) - binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX); - else - binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX); - } else { - size = IWL_BINDING_CMD_SIZE_V1; - } + + if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm))) + return -EINVAL; /* add back the PHY */ if (WARN_ON(!mvmvif->phy_ctxt)) @@ -497,7 +487,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, status = 0; ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, - size, &binding_cmd, &status); + IWL_BINDING_CMD_SIZE_V1, &binding_cmd, + &status); if (ret) { IWL_ERR(mvm, "Failed to add binding: %d\n", ret); return ret; @@ -1042,7 +1033,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, * the recording automatically before entering D3. This can * be removed once the FW starts doing that. */ - iwl_fw_dbg_stop_recording(&mvm->fwrt); + _iwl_fw_dbg_stop_recording(mvm->fwrt.trans, NULL); /* must be last -- this switches firmware state */ ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); @@ -1362,7 +1353,7 @@ static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm, struct ieee80211_key_conf *key, struct iwl_wowlan_status *status) { - union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc; + union iwl_all_tsc_rsc *rsc = &status->gtk[0].rsc.all_tsc_rsc; switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: @@ -1419,7 +1410,8 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, */ if (sta) { struct ieee80211_key_seq seq = {}; - union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc; + union iwl_all_tsc_rsc *sc = + &data->status->gtk[0].rsc.all_tsc_rsc; if (data->find_phase) return; @@ -1501,22 +1493,24 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, u8 key[32]; } conf = { .conf.cipher = gtkdata.cipher, - .conf.keyidx = status->gtk.key_index, + .conf.keyidx = + iwlmvm_wowlan_gtk_idx(&status->gtk[0]), }; + __be64 replay_ctr; switch (gtkdata.cipher) { case WLAN_CIPHER_SUITE_CCMP: conf.conf.keylen = WLAN_KEY_LEN_CCMP; - memcpy(conf.conf.key, status->gtk.decrypt_key, + memcpy(conf.conf.key, status->gtk[0].key, WLAN_KEY_LEN_CCMP); break; case WLAN_CIPHER_SUITE_TKIP: conf.conf.keylen = WLAN_KEY_LEN_TKIP; - memcpy(conf.conf.key, status->gtk.decrypt_key, 16); + memcpy(conf.conf.key, status->gtk[0].key, 16); /* leave TX MIC key zeroed, we don't use it anyway */ memcpy(conf.conf.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, - status->gtk.tkip_mic_key, 8); + status->gtk[0].tkip_mic_key, 8); break; } @@ -1524,11 +1518,10 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, if (IS_ERR(key)) return false; iwl_mvm_set_key_rx_seq(mvm, key, status); - } - if (status->num_of_gtk_rekeys) { - __be64 replay_ctr = + replay_ctr = cpu_to_be64(le64_to_cpu(status->replay_ctr)); + ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, (void *)&replay_ctr, GFP_KERNEL); } @@ -1541,6 +1534,107 @@ out: return true; } +struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm) +{ + struct iwl_wowlan_status *v7, *status; + struct iwl_host_cmd cmd = { + .id = WOWLAN_GET_STATUSES, + .flags = CMD_WANT_SKB, + }; + int ret, len, status_size; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) { + IWL_ERR(mvm, "failed to query wakeup status (%d)\n", ret); + return ERR_PTR(ret); + } + + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) { + struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data; + int data_size; + + status_size = sizeof(*v6); + len = iwl_rx_packet_payload_len(cmd.resp_pkt); + + if (len < status_size) { + IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); + status = ERR_PTR(-EIO); + goto out_free_resp; + } + + data_size = ALIGN(le32_to_cpu(v6->wake_packet_bufsize), 4); + + if (len != (status_size + data_size)) { + IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); + status = ERR_PTR(-EIO); + goto out_free_resp; + } + + status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); + if (!status) + goto out_free_resp; + + BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) > + sizeof(status->gtk[0].key)); + BUILD_BUG_ON(sizeof(v6->gtk.tkip_mic_key) > + sizeof(status->gtk[0].tkip_mic_key)); + + /* copy GTK info to the right place */ + memcpy(status->gtk[0].key, v6->gtk.decrypt_key, + sizeof(v6->gtk.decrypt_key)); + memcpy(status->gtk[0].tkip_mic_key, v6->gtk.tkip_mic_key, + sizeof(v6->gtk.tkip_mic_key)); + memcpy(&status->gtk[0].rsc, &v6->gtk.rsc, + sizeof(status->gtk[0].rsc)); + + /* hardcode the key length to 16 since v6 only supports 16 */ + status->gtk[0].key_len = 16; + + /* + * The key index only uses 2 bits (values 0 to 3) and + * we always set bit 7 which means this is the + * currently used key. + */ + status->gtk[0].key_flags = v6->gtk.key_index | BIT(7); + + status->replay_ctr = v6->replay_ctr; + + /* everything starting from pattern_number is identical */ + memcpy(&status->pattern_number, &v6->pattern_number, + offsetof(struct iwl_wowlan_status, wake_packet) - + offsetof(struct iwl_wowlan_status, pattern_number) + + data_size); + + goto out_free_resp; + } + + v7 = (void *)cmd.resp_pkt->data; + status_size = sizeof(*v7); + len = iwl_rx_packet_payload_len(cmd.resp_pkt); + + if (len < status_size) { + IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); + status = ERR_PTR(-EIO); + goto out_free_resp; + } + + if (len != (status_size + + ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4))) { + IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); + status = ERR_PTR(-EIO); + goto out_free_resp; + } + + status = kmemdup(v7, len, GFP_KERNEL); + +out_free_resp: + iwl_free_resp(&cmd); + return status; +} + static struct iwl_wowlan_status * iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -1550,12 +1644,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) u32 valid; u32 error_id; } err_info; - struct iwl_host_cmd cmd = { - .id = WOWLAN_GET_STATUSES, - .flags = CMD_WANT_SKB, - }; - struct iwl_wowlan_status *status, *fw_status; - int ret, len, status_size; + int ret; iwl_trans_read_mem_bytes(mvm->trans, base, &err_info, sizeof(err_info)); @@ -1578,34 +1667,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (ret) IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret) { - IWL_ERR(mvm, "failed to query status (%d)\n", ret); - return ERR_PTR(ret); - } - - status_size = sizeof(*fw_status); - - len = iwl_rx_packet_payload_len(cmd.resp_pkt); - if (len < status_size) { - IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); - fw_status = ERR_PTR(-EIO); - goto out_free_resp; - } - - status = (void *)cmd.resp_pkt->data; - if (len != (status_size + - ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) { - IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); - fw_status = ERR_PTR(-EIO); - goto out_free_resp; - } - - fw_status = kmemdup(status, len, GFP_KERNEL); - -out_free_resp: - iwl_free_resp(&cmd); - return fw_status; + return iwl_mvm_send_wowlan_get_status(mvm); } /* releases the MVM mutex */ @@ -1883,6 +1945,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) goto err; } + iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); /* query SRAM first in case we want event logging */ iwl_mvm_read_d3_sram(mvm); @@ -2117,6 +2180,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) mvm->d3_test_active = false; + iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); + rtnl_lock(); __iwl_mvm_resume(mvm, true); rtnl_unlock(); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 798605c4f122..1aa6c7e93088 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 05b77419953c..3b6b3d8fb961 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -671,16 +666,11 @@ iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf, }; int ret, bt_force_ant_mode; - for (bt_force_ant_mode = 0; - bt_force_ant_mode < ARRAY_SIZE(modes_str); - bt_force_ant_mode++) { - if (!strcmp(buf, modes_str[bt_force_ant_mode])) - break; - } - - if (bt_force_ant_mode >= ARRAY_SIZE(modes_str)) - return -EINVAL; + ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf); + if (ret < 0) + return ret; + bt_force_ant_mode = ret; ret = 0; mutex_lock(&mvm->mutex); if (mvm->bt_force_ant_mode == bt_force_ant_mode) @@ -1733,6 +1723,35 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf, } static ssize_t +iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_he_monitor_cmd he_mon_cmd = {}; + u32 aid; + int ret; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid, + &he_mon_cmd.bssid[0], &he_mon_cmd.bssid[1], + &he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3], + &he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]); + if (ret != 7) + return -EINVAL; + + he_mon_cmd.aid = cpu_to_le16(aid); + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, + DATA_PATH_GROUP, 0), 0, + sizeof(he_mon_cmd), &he_mon_cmd); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -1801,6 +1820,8 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8); MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile); #endif +MVM_DEBUGFS_WRITE_FILE_OPS(he_sniffer_params, 32); + static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -1989,6 +2010,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) #ifdef CONFIG_ACPI MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400); #endif + MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0200); if (!debugfs_create_bool("enable_scan_iteration_notif", 0600, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h index ede6ef8d390e..a83d252c0602 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index e8e74dd558f7..143c7fcaea41 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 6bb1a99a197a..c5df73231ba3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -304,6 +299,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; static const u16 alive_cmd[] = { MVM_ALIVE }; + set_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status); if (ucode_type == IWL_UCODE_REGULAR && iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && !(fw_has_capa(&mvm->fw->ucode_capa, @@ -374,6 +370,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, atomic_set(&mvm->mac80211_queue_stop_count[i], 0); set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); + clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status); return 0; } @@ -704,8 +701,12 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) enabled = !!(wifi_pkg->package.elements[1].integer.value); n_profiles = wifi_pkg->package.elements[2].integer.value; - /* in case of BIOS bug */ - if (n_profiles <= 0) { + /* + * Check the validity of n_profiles. The EWRD profiles start + * from index 1, so the maximum value allowed here is + * ACPI_SAR_PROFILES_NUM - 1. + */ + if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) { ret = -EINVAL; goto out_free; } @@ -773,19 +774,28 @@ out_free: int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) { - struct iwl_dev_tx_power_cmd cmd = { - .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), - }; + union { + struct iwl_dev_tx_power_cmd v5; + struct iwl_dev_tx_power_cmd_v4 v4; + } cmd; int i, j, idx; int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b }; - int len = sizeof(cmd); + int len; BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2); BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS != ACPI_SAR_TABLE_SIZE); - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) - len = sizeof(cmd.v3); + cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS); + + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_REDUCE_TX_POWER)) + len = sizeof(cmd.v5); + else if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) + len = sizeof(cmd.v4); + else + len = sizeof(cmd.v4.v3); for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) { struct iwl_mvm_sar_profile *prof; @@ -812,7 +822,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i); for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) { idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j; - cmd.v3.per_chain_restriction[i][j] = + cmd.v5.v3.per_chain_restriction[i][j] = cpu_to_le16(prof->table[idx]); IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n", j, prof->table[idx]); @@ -1018,7 +1028,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) mvm->fwrt.dump.conf = FW_DBG_INVALID; /* if we have a destination, assume EARLY START */ - if (mvm->fw->dbg_dest_tlv) + if (mvm->fw->dbg.dest_tlv) mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c index b27269504a62..9bb1de1cad64 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index b3fd20502abb..6486cfb33f40 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -35,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -85,6 +82,10 @@ const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = { IWL_GEN2_EDCA_TX_FIFO_VI, IWL_GEN2_EDCA_TX_FIFO_BE, IWL_GEN2_EDCA_TX_FIFO_BK, + IWL_GEN2_TRIG_TX_FIFO_VO, + IWL_GEN2_TRIG_TX_FIFO_VI, + IWL_GEN2_TRIG_TX_FIFO_BE, + IWL_GEN2_TRIG_TX_FIFO_BK, }; struct iwl_mvm_mac_iface_iterator_data { @@ -1486,12 +1487,11 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, IWL_MVM_MISSED_BEACONS_THRESHOLD) ieee80211_beacon_loss(vif); - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, - FW_DBG_TRIGGER_MISSED_BEACONS)) + trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_MISSED_BEACONS); + if (!trigger) return; - trigger = iwl_fw_dbg_get_trigger(mvm->fw, - FW_DBG_TRIGGER_MISSED_BEACONS); bcon_trig = (void *)trigger->data; stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon); stop_trig_missed_bcon_since_rx = @@ -1499,11 +1499,6 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, /* TODO: implement start trigger */ - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), - trigger)) - return; - if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || rx_missed_bcon >= stop_trig_missed_bcon) iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); @@ -1568,6 +1563,65 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, ieee80211_rx_napi(mvm->hw, NULL, skb, NULL); } +static void iwl_mvm_probe_resp_data_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_probe_resp_data_notif *notif = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_probe_resp_data *old_data, *new_data; + + if (mvmvif->id != (u16)le32_to_cpu(notif->mac_id)) + return; + + new_data = kzalloc(sizeof(*new_data), GFP_KERNEL); + if (!new_data) + return; + + memcpy(&new_data->notif, notif, sizeof(new_data->notif)); + + /* noa_attr contains 1 reserved byte, need to substruct it */ + new_data->noa_len = sizeof(struct ieee80211_vendor_ie) + + sizeof(new_data->notif.noa_attr) - 1; + + /* + * If it's a one time NoA, only one descriptor is needed, + * adjust the length according to len_low. + */ + if (new_data->notif.noa_attr.len_low == + sizeof(struct ieee80211_p2p_noa_desc) + 2) + new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc); + + old_data = rcu_dereference_protected(mvmvif->probe_resp_data, + lockdep_is_held(&mvmvif->mvm->mutex)); + rcu_assign_pointer(mvmvif->probe_resp_data, new_data); + + if (old_data) + kfree_rcu(old_data, rcu_head); + + if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA && + notif->csa_counter >= 1) + ieee80211_csa_set_counter(vif, notif->csa_counter); +} + +void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_probe_resp_data_notif *notif = (void *)pkt->data; + int len = iwl_rx_packet_payload_len(pkt); + + if (WARN_ON_ONCE(len < sizeof(*notif))) + return; + + IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n", + notif->noa_active, notif->csa_counter); + + ieee80211_iterate_active_interfaces(mvm->hw, + IEEE80211_IFACE_ITER_ACTIVE, + iwl_mvm_probe_resp_data_iter, + notif); +} + void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index b15b0d84bb7e..505b0385d800 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -559,8 +554,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->max_remain_on_channel_duration = 10000; hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; - /* we can compensate an offset of up to 3 channels = 15 MHz */ - hw->wiphy->max_adj_channel_rssi_comp = 3 * 5; /* Extract MAC address */ memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); @@ -864,16 +857,13 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_BA); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; - switch (action) { case IEEE80211_AMPDU_TX_OPERATIONAL: { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); @@ -1035,6 +1025,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, mvmvif->phy_ctxt = NULL; memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); + memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data)); } static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) @@ -1124,7 +1115,9 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) * would do. */ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); +#ifdef CONFIG_PM iwl_mvm_d0i3_enable_tx(mvm, NULL); +#endif } return ret; @@ -1162,7 +1155,9 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) mutex_lock(&mvm->mutex); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); +#ifdef CONFIG_PM iwl_mvm_d0i3_enable_tx(mvm, NULL); +#endif ret = iwl_mvm_update_quotas(mvm, true, NULL); if (ret) IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", @@ -1233,12 +1228,15 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) iwl_mvm_del_aux_sta(mvm); /* - * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() - * won't be called in this case). + * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the + * hw (as restart_complete() won't be called in this case) and mac80211 + * won't execute the restart. * But make sure to cleanup interfaces that have gone down before/during * HW restart was requested. */ - if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || + test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status)) ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); @@ -1308,19 +1306,28 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, s16 tx_power) { - struct iwl_dev_tx_power_cmd cmd = { - .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), - .v3.mac_context_id = + int len; + union { + struct iwl_dev_tx_power_cmd v5; + struct iwl_dev_tx_power_cmd_v4 v4; + } cmd = { + .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), + .v5.v3.mac_context_id = cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), - .v3.pwr_restriction = cpu_to_le16(8 * tx_power), + .v5.v3.pwr_restriction = cpu_to_le16(8 * tx_power), }; - int len = sizeof(cmd); if (tx_power == IWL_DEFAULT_MAX_TX_POWER) - cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); + cmd.v5.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) - len = sizeof(cmd.v3); + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_REDUCE_TX_POWER)) + len = sizeof(cmd.v5); + else if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) + len = sizeof(cmd.v4); + else + len = sizeof(cmd.v4.v3); return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); } @@ -1333,6 +1340,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, int ret; mvmvif->mvm = mvm; + RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); /* * make sure D0i3 exit is completed, otherwise a target access @@ -1497,6 +1505,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_probe_resp_data *probe_data; iwl_mvm_prepare_mac_removal(mvm, vif); @@ -1506,6 +1515,12 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); + probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, + lockdep_is_held(&mvm->mutex)); + RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); + if (probe_data) + kfree_rcu(probe_data, rcu_head); + if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | @@ -1978,10 +1993,6 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); } - if (sta->he_cap.he_cap_elem.mac_cap_info[2] & - IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED) - sta_ctxt_cmd.htc_flags |= - cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED); if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); if (sta->he_cap.he_cap_elem.mac_cap_info[3] & @@ -2459,6 +2470,9 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_mac_ctxt_remove(mvm, vif); + kfree(mvmvif->ap_wep_key); + mvmvif->ap_wep_key = NULL; + mutex_unlock(&mvm->mutex); } @@ -2788,14 +2802,12 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tdls *tdls_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_TDLS); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS); tdls_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; if (!(tdls_trig->action_bitmap & BIT(action))) return; @@ -2931,7 +2943,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band); + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + false); ret = iwl_mvm_update_sta(mvm, vif, sta); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { @@ -2947,9 +2960,16 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band); + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + true); - ret = 0; + /* if wep is used, need to set the key for the station now */ + if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) + ret = iwl_mvm_set_sta_key(mvm, vif, sta, + mvmvif->ap_wep_key, + STA_KEY_IDX_INVALID); + else + ret = 0; } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { /* disable beacon filtering */ @@ -3132,8 +3152,15 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: - key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; + if (!mvm->trans->cfg->gen2) { + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; + } else if (vif->type == NL80211_IFTYPE_STATION) { + key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE; + } else { + IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n"); + return -EOPNOTSUPP; + } break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: @@ -3148,13 +3175,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: - /* For non-client mode, only use WEP keys for TX as we probably - * don't have a station yet anyway and would then have to keep - * track of the keys, linking them to each of the clients/peers - * as they appear. For now, don't do that, for performance WEP - * offload doesn't really matter much, but we need it for some - * other offload features in client mode. - */ + if (vif->type == NL80211_IFTYPE_AP) { + struct iwl_mvm_vif *mvmvif = + iwl_mvm_vif_from_mac80211(vif); + + mvmvif->ap_wep_key = kmemdup(key, + sizeof(*key) + key->keylen, + GFP_KERNEL); + if (!mvmvif->ap_wep_key) + return -ENOMEM; + } + if (vif->type != NL80211_IFTYPE_STATION) return 0; break; @@ -4458,14 +4489,12 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_MLME); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; if (event->u.mlme.data == ASSOC_EVENT) { if (event->u.mlme.status == MLME_DENIED) @@ -4500,14 +4529,12 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_BA); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index b3987a0a7018..8f71eeed50d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -336,6 +331,18 @@ struct iwl_mvm_vif_bf_data { }; /** + * struct iwl_probe_resp_data - data for NoA/CSA updates + * @rcu_head: used for freeing the data on update + * @notif: notification data + * @noa_len: length of NoA attribute, calculated from the notification + */ +struct iwl_probe_resp_data { + struct rcu_head rcu_head; + struct iwl_probe_resp_data_notif notif; + int noa_len; +}; + +/** * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context * @id: between 0 and 3 * @color: to solve races upon MAC addition and removal @@ -365,6 +372,8 @@ struct iwl_mvm_vif_bf_data { * average signal of beacons retrieved from the firmware * @csa_failed: CSA failed to schedule time event, report an error later * @features: hw features active for this vif + * @probe_resp_data: data from FW notification to store NOA and CSA related + * data to be inserted into probe response. */ struct iwl_mvm_vif { struct iwl_mvm *mvm; @@ -460,6 +469,9 @@ struct iwl_mvm_vif { /* TCP Checksum Offload */ netdev_features_t features; + + struct iwl_probe_resp_data __rcu *probe_resp_data; + struct ieee80211_key_conf *ap_wep_key; }; static inline struct iwl_mvm_vif * @@ -1229,6 +1241,11 @@ static inline bool iwl_mvm_is_oce_supported(struct iwl_mvm *mvm) return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_OCE); } +static inline bool iwl_mvm_is_frag_ebs_supported(struct iwl_mvm *mvm) +{ + return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAG_EBS); +} + static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) { /* For now we only use this mode to differentiate between @@ -1602,6 +1619,8 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, struct ieee80211_vif *exclude_vif); +void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* Bindings */ @@ -1685,7 +1704,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) #endif /* CONFIG_IWLWIFI_DEBUGFS */ /* rate scaling */ -int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init); +int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync); void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate); void rs_update_last_rssi(struct iwl_mvm *mvm, @@ -1733,6 +1752,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx); extern const struct file_operations iwl_dbgfs_d3_test_ops; +struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm); #ifdef CONFIG_PM int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -1776,10 +1796,13 @@ void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); bool iwl_mvm_ref_taken(struct iwl_mvm *mvm); + +#ifdef CONFIG_PM void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq); int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode); int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode); int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm); +#endif /* BT Coex */ int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm); @@ -1796,6 +1819,7 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant); bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm); bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, enum nl80211_band band); +u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants); u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index cf48517944ec..3633f27d048a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -482,15 +477,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, u32 status; int resp_len, n_channels; u16 mcc; - bool resp_v2 = fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2); if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) return ERR_PTR(-EOPNOTSUPP); cmd.len[0] = sizeof(struct iwl_mcc_update_cmd); - if (!resp_v2) - cmd.len[0] = sizeof(struct iwl_mcc_update_cmd_v1); IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n", alpha2[0], alpha2[1], src_id); @@ -502,7 +493,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, pkt = cmd.resp_pkt; /* Extract MCC response */ - if (resp_v2) { + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) { struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data; n_channels = __le32_to_cpu(mcc_resp->n_channels); @@ -514,9 +506,9 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, goto exit; } } else { - struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data; + struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data; - n_channels = __le32_to_cpu(mcc_resp_v1->n_channels); + n_channels = __le32_to_cpu(mcc_resp_v3->n_channels); resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels * sizeof(__le32); resp_cp = kzalloc(resp_len, GFP_KERNEL); @@ -525,12 +517,14 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, goto exit; } - resp_cp->status = mcc_resp_v1->status; - resp_cp->mcc = mcc_resp_v1->mcc; - resp_cp->cap = mcc_resp_v1->cap; - resp_cp->source_id = mcc_resp_v1->source_id; - resp_cp->n_channels = mcc_resp_v1->n_channels; - memcpy(resp_cp->channels, mcc_resp_v1->channels, + resp_cp->status = mcc_resp_v3->status; + resp_cp->mcc = mcc_resp_v3->mcc; + resp_cp->cap = cpu_to_le16(mcc_resp_v3->cap); + resp_cp->source_id = mcc_resp_v3->source_id; + resp_cp->time = mcc_resp_v3->time; + resp_cp->geo_info = mcc_resp_v3->geo_info; + resp_cp->n_channels = mcc_resp_v3->n_channels; + memcpy(resp_cp->channels, mcc_resp_v3->channels, n_channels * sizeof(__le32)); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c index 6338d9cf7070..6d71e05626ad 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 0e26619fb330..0e2092526fae 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -182,6 +177,9 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; + if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt)) + reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG; + iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP | @@ -189,7 +187,8 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | - CSR_HW_IF_CONFIG_REG_BIT_MAC_SI, + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | + CSR_HW_IF_CONFIG_REG_D3_DEBUG, reg_val); IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, @@ -491,7 +490,9 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { /* this forward declaration can avoid to export the function */ static void iwl_mvm_async_handlers_wk(struct work_struct *wk); +#ifdef CONFIG_PM static void iwl_mvm_d0i3_exit_work(struct work_struct *wk); +#endif static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm) { @@ -564,10 +565,23 @@ static bool iwl_mvm_fwrt_fw_running(void *ctx) return iwl_mvm_firmware_running(ctx); } +static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd) +{ + struct iwl_mvm *mvm = (struct iwl_mvm *)ctx; + int ret; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd(mvm, host_cmd); + mutex_unlock(&mvm->mutex); + + return ret; +} + static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { .dump_start = iwl_mvm_fwrt_dump_start, .dump_end = iwl_mvm_fwrt_dump_end, .fw_running = iwl_mvm_fwrt_fw_running, + .send_hcmd = iwl_mvm_fwrt_send_hcmd, }; static struct iwl_op_mode * @@ -583,6 +597,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, }; int err, scan_size; u32 min_backoff; + enum iwl_amsdu_size rb_size_default; /* * We use IWL_MVM_STATION_COUNT to check the validity of the station @@ -602,9 +617,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (cfg->max_rx_agg_size) hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size; + else + hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; if (cfg->max_tx_agg_size) hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; + else + hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; op_mode = hw->priv; @@ -661,7 +680,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); +#ifdef CONFIG_PM INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); +#endif INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); @@ -691,8 +712,16 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.op_mode = op_mode; trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); + + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + rb_size_default = IWL_AMSDU_2K; + else + rb_size_default = IWL_AMSDU_4K; + switch (iwlwifi_mod_params.amsdu_size) { case IWL_AMSDU_DEF: + trans_cfg.rx_buf_size = rb_size_default; + break; case IWL_AMSDU_4K: trans_cfg.rx_buf_size = IWL_AMSDU_4K; break; @@ -705,16 +734,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, default: pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, iwlwifi_mod_params.amsdu_size); - trans_cfg.rx_buf_size = IWL_AMSDU_4K; - } - - /* the hardware splits the A-MSDU */ - if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { - trans_cfg.rx_buf_size = IWL_AMSDU_2K; - /* TODO: remove when balanced power mode is fw supported */ - iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM; - } else if (mvm->cfg->mq_rx_supported) { - trans_cfg.rx_buf_size = IWL_AMSDU_4K; + trans_cfg.rx_buf_size = rb_size_default; } trans->wide_cmd_header = true; @@ -745,12 +765,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_trans_configure(mvm->trans, &trans_cfg); trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; - trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv; - trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; - memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, + trans->dbg_dest_tlv = mvm->fw->dbg.dest_tlv; + trans->dbg_n_dest_reg = mvm->fw->dbg.n_dest_reg; + memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv, sizeof(trans->dbg_conf_tlv)); - trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; - trans->dbg_dump_mask = mvm->fw->dbg_dump_mask; + trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv; + trans->dbg_dump_mask = mvm->fw->dbg.dump_mask; trans->iml = mvm->fw->iml; trans->iml_len = mvm->fw->iml_len; @@ -781,6 +801,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mutex_lock(&mvm->mutex); iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); err = iwl_run_init_mvm_ucode(mvm, true); + if (test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status)) + iwl_fw_alive_error_dump(&mvm->fwrt); if (!iwlmvm_mod_params.init_dbg || !err) iwl_mvm_stop_device(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); @@ -950,15 +972,13 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_cmd *cmds_trig; int i; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, + FW_DBG_TRIGGER_FW_NOTIF); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF); cmds_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) - return; - for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { /* don't collect on CMD 0 */ if (!cmds_trig->cmds[i].cmd_id) @@ -1220,7 +1240,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) */ if (!mvm->fw_restart && fw_error) { iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, - NULL); + NULL, 0); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; @@ -1246,7 +1266,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); schedule_work(&reprobe->work); } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && - mvm->hw_registered) { + mvm->hw_registered && + !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { /* don't let the transport/FW power down */ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); @@ -1261,7 +1282,8 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - iwl_mvm_dump_nic_error_log(mvm); + if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) + iwl_mvm_dump_nic_error_log(mvm); iwl_mvm_nic_restart(mvm, true); } @@ -1274,6 +1296,7 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) iwl_mvm_nic_restart(mvm, true); } +#ifdef CONFIG_PM struct iwl_d0i3_iter_data { struct iwl_mvm *mvm; struct ieee80211_vif *connected_vif; @@ -1596,25 +1619,23 @@ out: static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work); - struct iwl_host_cmd get_status_cmd = { - .id = WOWLAN_GET_STATUSES, - .flags = CMD_HIGH_PRIO | CMD_WANT_SKB, - }; struct iwl_mvm_d0i3_exit_work_iter_data iter_data = { .mvm = mvm, }; struct iwl_wowlan_status *status; - int ret; u32 wakeup_reasons = 0; __le16 *qos_seq = NULL; mutex_lock(&mvm->mutex); - ret = iwl_mvm_send_cmd(mvm, &get_status_cmd); - if (ret) + + status = iwl_mvm_send_wowlan_get_status(mvm); + if (IS_ERR_OR_NULL(status)) { + /* set to NULL so we don't need to check before kfree'ing */ + status = NULL; goto out; + } - status = (void *)get_status_cmd.resp_pkt->data; wakeup_reasons = le32_to_cpu(status->wakeup_reasons); qos_seq = status->qos_seq_ctr; @@ -1633,8 +1654,7 @@ out: wakeup_reasons); /* qos_seq might point inside resp_pkt, so free it only now */ - if (get_status_cmd.resp_pkt) - iwl_free_resp(&get_status_cmd); + kfree(status); /* the FW might have updated the regdomain */ iwl_mvm_update_changed_regdom(mvm); @@ -1685,6 +1705,13 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode) return _iwl_mvm_exit_d0i3(mvm); } +#define IWL_MVM_D0I3_OPS \ + .enter_d0i3 = iwl_mvm_enter_d0i3, \ + .exit_d0i3 = iwl_mvm_exit_d0i3, +#else /* CONFIG_PM */ +#define IWL_MVM_D0I3_OPS +#endif /* CONFIG_PM */ + #define IWL_MVM_COMMON_OPS \ /* these could be differentiated */ \ .async_cb = iwl_mvm_async_cb, \ @@ -1695,8 +1722,7 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode) .nic_error = iwl_mvm_nic_error, \ .cmd_queue_full = iwl_mvm_cmd_queue_full, \ .nic_config = iwl_mvm_nic_config, \ - .enter_d0i3 = iwl_mvm_enter_d0i3, \ - .exit_d0i3 = iwl_mvm_exit_d0i3, \ + IWL_MVM_D0I3_OPS \ /* as we only register one, these MUST be common! */ \ .start = iwl_op_mode_mvm_start, \ .stop = iwl_op_mode_mvm_stop diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index c11fe2621d51..5a0a28fd762d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c index 690559bdf421..5e62b97af48b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 8169d1450b3b..7a98e1a1dc40 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -117,20 +117,42 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm, { struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; bool vht_ena = vht_cap && vht_cap->vht_supported; u16 flags = 0; if (mvm->cfg->ht_params->stbc && - (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && - ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) || - (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)))) - flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; + (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) { + if (he_cap && he_cap->has_he) { + if (he_cap->he_cap_elem.phy_cap_info[2] & + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) + flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; + + if (he_cap->he_cap_elem.phy_cap_info[7] & + IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ) + flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK; + } else if ((ht_cap && + (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) || + (vht_ena && + (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))) + flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; + } if (mvm->cfg->ht_params->ldpc && ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) || (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; + if (he_cap && he_cap->has_he && + (he_cap->he_cap_elem.phy_cap_info[3] & + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) { + flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK; + + if (he_cap->he_cap_elem.phy_cap_info[3] & + IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2) + flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK; + } + return flags; } @@ -311,7 +333,7 @@ out: } void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band) + enum nl80211_band band, bool update) { struct ieee80211_hw *hw = mvm->hw; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); @@ -320,7 +342,8 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_supported_band *sband; struct iwl_tlc_config_cmd cfg_cmd = { .sta_id = mvmsta->sta_id, - .max_ch_width = rs_fw_bw_from_sta_bw(sta), + .max_ch_width = update ? + rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20, .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)), .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)), .max_mpdu_len = cpu_to_le16(sta->max_amsdu_len), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 30cfd7d50bc9..2c75f51a04e4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1276,7 +1276,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, (unsigned long)(lq_sta->last_tx + (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); - iwl_mvm_rs_rate_init(mvm, sta, info->band); + iwl_mvm_rs_rate_init(mvm, sta, info->band, true); return; } lq_sta->last_tx = jiffies; @@ -2859,9 +2859,8 @@ void rs_update_last_rssi(struct iwl_mvm *mvm, static void rs_initialize_lq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, - enum nl80211_band band) + enum nl80211_band band, bool update) { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_scale_tbl_info *tbl; struct rs_rate *rate; u8 active_tbl = 0; @@ -2890,8 +2889,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, rs_set_expected_tpt_table(lq_sta, tbl); rs_fill_lq_cmd(mvm, sta, lq_sta, rate); /* TODO restore station should remember the lq cmd */ - iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, - mvmsta->sta_state < IEEE80211_STA_AUTHORIZED); + iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update); } static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, @@ -3144,7 +3142,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) * Called after adding a new station to initialize rate scaling */ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band) + enum nl80211_band band, bool update) { int i, j; struct ieee80211_hw *hw = mvm->hw; @@ -3215,7 +3213,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, /* These values will be overridden later */ lq_sta->lq.single_stream_ant_msk = - first_antenna(iwl_mvm_get_valid_tx_ant(mvm)); + iwl_mvm_bt_coex_get_single_ant_msk(mvm, iwl_mvm_get_valid_tx_ant(mvm)); lq_sta->lq.dual_stream_ant_msk = ANT_AB; /* as default allow aggregation for all tids */ @@ -3224,7 +3222,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_mvm_reset_frame_stats(mvm); #endif - rs_initialize_lq(mvm, sta, lq_sta, band); + rs_initialize_lq(mvm, sta, lq_sta, band, update); } static void rs_drv_rate_update(void *mvm_r, @@ -3244,7 +3242,7 @@ static void rs_drv_rate_update(void *mvm_r, for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) ieee80211_stop_tx_ba_session(sta, tid); - iwl_mvm_rs_rate_init(mvm, sta, sband->band); + iwl_mvm_rs_rate_init(mvm, sta, sband->band, true); } #ifdef CONFIG_MAC80211_DEBUGFS @@ -3578,7 +3576,8 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm, mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - if (num_of_ant(initial_rate->ant) == 1) + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) && + num_of_ant(initial_rate->ant) == 1) lq_cmd->single_stream_ant_msk = initial_rate->ant; lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize; @@ -4098,12 +4097,12 @@ static const struct rate_control_ops rs_mvm_ops_drv = { }; void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band) + enum nl80211_band band, bool update) { if (iwl_mvm_has_tlc_offload(mvm)) - rs_fw_rate_init(mvm, sta, band); + rs_fw_rate_init(mvm, sta, band, update); else - rs_drv_rate_init(mvm, sta, band); + rs_drv_rate_init(mvm, sta, band, update); } int iwl_mvm_rate_control_register(void) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index d2cf484e2b73..d0f47899f284 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -420,7 +420,7 @@ struct iwl_lq_sta { /* Initialize station's rate scaling information after adding station */ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band); + enum nl80211_band band, bool init); /* Notify RS about Tx status */ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, @@ -461,7 +461,7 @@ void rs_remove_sta_debugfs(void *mvm, void *mvm_sta); void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta); void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band); + enum nl80211_band band, bool update); int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable); void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index bfb163419c67..ef624833cf1b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -438,13 +433,14 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_vif *tx_blocked_vif = rcu_dereference(mvm->csa_tx_blocked_vif); + struct iwl_fw_dbg_trigger_tlv *trig; + struct ieee80211_vif *vif = mvmsta->vif; /* We have tx blocked stations (with CS bit). If we heard * frames from a blocked station on a new channel we can * TX to it again. */ - if (unlikely(tx_blocked_vif) && - mvmsta->vif == tx_blocked_vif) { + if (unlikely(tx_blocked_vif) && vif == tx_blocked_vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); @@ -455,23 +451,18 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rs_update_last_rssi(mvm, mvmsta, rx_status); - if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) && - ieee80211_is_beacon(hdr->frame_control)) { - struct iwl_fw_dbg_trigger_tlv *trig; + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_RSSI); + + if (trig && ieee80211_is_beacon(hdr->frame_control)) { struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; - bool trig_check; s32 rssi; - trig = iwl_fw_dbg_get_trigger(mvm->fw, - FW_DBG_TRIGGER_RSSI); rssi_trig = (void *)trig->data; rssi = le32_to_cpu(rssi_trig->rssi); - trig_check = - iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(mvmsta->vif), - trig); - if (trig_check && rx_status->signal < rssi) + if (rx_status->signal < rssi) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); } @@ -698,15 +689,12 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) struct iwl_fw_dbg_trigger_stats *trig_stats; u32 trig_offset, trig_thold; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_STATS); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS); trig_stats = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) - return; - trig_offset = le32_to_cpu(trig_stats->stop_offset); trig_thold = le32_to_cpu(trig_stats->stop_threshold); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index b53148f972a4..26ac9402568d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -283,6 +283,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK)) return 0; + if (mvm->trans->cfg->gen2 && + !(status & RX_MPDU_RES_STATUS_MIC_OK)) + stats->flag |= RX_FLAG_MMIC_ERROR; + *crypt_len = IEEE80211_TKIP_IV_LEN; /* fall through if TTAK OK */ case IWL_RX_MPDU_STATUS_SEC_WEP: @@ -294,8 +298,11 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, IWL_RX_MPDU_STATUS_SEC_WEP) *crypt_len = IEEE80211_WEP_IV_LEN; - if (pkt_flags & FH_RSCSR_RADA_EN) + if (pkt_flags & FH_RSCSR_RADA_EN) { stats->flag |= RX_FLAG_ICV_STRIPPED; + if (mvm->trans->cfg->gen2) + stats->flag |= RX_FLAG_MMIC_STRIPPED; + } return 0; case IWL_RX_MPDU_STATUS_SEC_EXT_ENC: @@ -856,6 +863,444 @@ static void iwl_mvm_flip_address(u8 *addr) ether_addr_copy(addr, mac_addr); } +static void iwl_mvm_decode_he_sigb(struct iwl_mvm *mvm, + struct iwl_rx_mpdu_desc *desc, + u32 rate_n_flags, + struct ieee80211_radiotap_he_mu *he_mu) +{ + u32 sigb0, sigb1; + u16 sigb2; + + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + sigb0 = le32_to_cpu(desc->v3.sigb_common0); + sigb1 = le32_to_cpu(desc->v3.sigb_common1); + } else { + sigb0 = le32_to_cpu(desc->v1.sigb_common0); + sigb1 = le32_to_cpu(desc->v1.sigb_common1); + } + + sigb2 = le16_to_cpu(desc->sigb_common2); + + if (FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH1_CRC_OK, sigb2)) { + he_mu->flags1 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN); + + he_mu->flags1 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH1_CTR_RU, + sigb2), + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU); + + he_mu->ru_ch1[0] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH1_RU0, + sigb0); + he_mu->ru_ch1[1] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH1_RU1, + sigb1); + he_mu->ru_ch1[2] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH1_RU2, + sigb0); + he_mu->ru_ch1[3] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH1_RU3, + sigb1); + } + + if (FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH2_CRC_OK, sigb2) && + (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) != RATE_MCS_CHAN_WIDTH_20) { + he_mu->flags1 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN); + + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH2_CTR_RU, + sigb2), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU); + + he_mu->ru_ch2[0] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH2_RU0, + sigb0); + he_mu->ru_ch2[1] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH2_RU1, + sigb1); + he_mu->ru_ch2[2] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH2_RU2, + sigb0); + he_mu->ru_ch2[3] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH2_RU3, + sigb1); + } +} + +static void +iwl_mvm_decode_he_phy_ru_alloc(u64 he_phy_data, u32 rate_n_flags, + struct ieee80211_radiotap_he *he, + struct ieee80211_radiotap_he_mu *he_mu, + struct ieee80211_rx_status *rx_status) +{ + /* + * Unfortunately, we have to leave the mac80211 data + * incorrect for the case that we receive an HE-MU + * transmission and *don't* have the HE phy data (due + * to the bits being used for TSF). This shouldn't + * happen though as management frames where we need + * the TSF/timers are not be transmitted in HE-MU. + */ + u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data); + u8 offs = 0; + + rx_status->bw = RATE_INFO_BW_HE_RU; + + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); + + switch (ru) { + case 0 ... 36: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; + offs = ru; + break; + case 37 ... 52: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; + offs = ru - 37; + break; + case 53 ... 60: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; + offs = ru - 53; + break; + case 61 ... 64: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; + offs = ru - 61; + break; + case 65 ... 66: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; + offs = ru - 65; + break; + case 67: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; + break; + case 68: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; + break; + } + he->data2 |= le16_encode_bits(offs, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); + he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN); + if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80) + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); + + if (he_mu) { +#define CHECK_BW(bw) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \ + RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS) + CHECK_BW(20); + CHECK_BW(40); + CHECK_BW(80); + CHECK_BW(160); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, + rate_n_flags), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); + } +} + +static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, + struct iwl_rx_mpdu_desc *desc, + struct ieee80211_radiotap_he *he, + struct ieee80211_radiotap_he_mu *he_mu, + struct ieee80211_rx_status *rx_status, + u64 he_phy_data, u32 rate_n_flags, + int queue) +{ + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + bool sigb_data; + u16 d1known = IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN; + u16 d2known = IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN; + + he->data1 |= cpu_to_le16(d1known); + he->data2 |= cpu_to_le16(d2known); + he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BSS_COLOR_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR); + he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_UPLINK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA3_UL_DL); + he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_LDPC_EXT_SYM, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG); + he->data4 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SPATIAL_REUSE_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE); + he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PRE_FEC_PAD_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD); + he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PE_DISAMBIG, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG); + he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_TXOP_DUR_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA6_TXOP); + he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_DOPPLER, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA6_DOPPLER); + + switch (he_type) { + case RATE_MCS_HE_TYPE_MU: + he_mu->flags1 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_DCM, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM); + he_mu->flags1 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_MCS_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_COMPRESSION, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); + + sigb_data = FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, + he_phy_data) == + IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO; + if (sigb_data) + iwl_mvm_decode_he_sigb(mvm, desc, rate_n_flags, he_mu); + /* fall through */ + case RATE_MCS_HE_TYPE_TRIG: + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); + he->data5 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); + break; + case RATE_MCS_HE_TYPE_SU: + case RATE_MCS_HE_TYPE_EXT_SU: + he->data1 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN); + he->data3 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BEAM_CHNG, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE); + break; + } + + switch (FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data)) { + case IWL_RX_HE_PHY_INFO_TYPE_MU: + case IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO: + case IWL_RX_HE_PHY_INFO_TYPE_TB: + iwl_mvm_decode_he_phy_ru_alloc(he_phy_data, rate_n_flags, + he, he_mu, rx_status); + break; + default: + /* nothing */ + break; + } +} + +static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, + struct iwl_rx_mpdu_desc *desc, + u32 rate_n_flags, u16 phy_info, int queue) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + /* this is invalid e.g. because puncture type doesn't allow 0b11 */ +#define HE_PHY_DATA_INVAL ((u64)-1) + u64 he_phy_data = HE_PHY_DATA_INVAL; + struct ieee80211_radiotap_he *he = NULL; + struct ieee80211_radiotap_he_mu *he_mu = NULL; + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + u8 stbc, ltf; + static const struct ieee80211_radiotap_he known = { + .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN), + .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN), + }; + static const struct ieee80211_radiotap_he_mu mu_known = { + .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN), + .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN), + }; + unsigned int radiotap_len = 0; + + he = skb_put_data(skb, &known, sizeof(known)); + radiotap_len += sizeof(known); + rx_status->flag |= RX_FLAG_RADIOTAP_HE; + + if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) { + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + he_phy_data = le64_to_cpu(desc->v3.he_phy_data); + else + he_phy_data = le64_to_cpu(desc->v1.he_phy_data); + + if (he_type == RATE_MCS_HE_TYPE_MU) { + he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known)); + radiotap_len += sizeof(mu_known); + rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU; + } + } + + /* temporarily hide the radiotap data */ + __skb_pull(skb, radiotap_len); + + if (he_phy_data != HE_PHY_DATA_INVAL && + he_type == RATE_MCS_HE_TYPE_SU) { + /* report the AMPDU-EOF bit on single frames */ + if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { + rx_status->flag |= RX_FLAG_AMPDU_DETAILS; + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, he_phy_data)) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; + } + } + + if (he_phy_data != HE_PHY_DATA_INVAL) + iwl_mvm_decode_he_phy_data(mvm, desc, he, he_mu, rx_status, + he_phy_data, rate_n_flags, queue); + + /* update aggregation data for monitor sake on default queue */ + if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { + bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; + + /* toggle is switched whenever new aggregation starts */ + if (toggle_bit != mvm->ampdu_toggle && + he_phy_data != HE_PHY_DATA_INVAL && + (he_type == RATE_MCS_HE_TYPE_MU || + he_type == RATE_MCS_HE_TYPE_SU)) { + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, + he_phy_data)) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; + } + } + + if (he_type == RATE_MCS_HE_TYPE_EXT_SU && + rate_n_flags & RATE_MCS_HE_106T_MSK) { + rx_status->bw = RATE_INFO_BW_HE_RU; + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; + } + + /* actually data is filled in mac80211 */ + if (he_type == RATE_MCS_HE_TYPE_SU || + he_type == RATE_MCS_HE_TYPE_EXT_SU) + he->data1 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); + + stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; + rx_status->nss = + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> + RATE_VHT_MCS_NSS_POS) + 1; + rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; + rx_status->encoding = RX_ENC_HE; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; + if (rate_n_flags & RATE_MCS_BF_MSK) + rx_status->enc_flags |= RX_ENC_FLAG_BF; + + rx_status->he_dcm = + !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); + +#define CHECK_TYPE(F) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \ + (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS)) + + CHECK_TYPE(SU); + CHECK_TYPE(EXT_SU); + CHECK_TYPE(MU); + CHECK_TYPE(TRIG); + + he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS); + + if (rate_n_flags & RATE_MCS_BF_MSK) + he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF); + + switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >> + RATE_MCS_HE_GI_LTF_POS) { + case 0: + if (he_type == RATE_MCS_HE_TYPE_TRIG) + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + if (he_type == RATE_MCS_HE_TYPE_MU) + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; + else + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X; + break; + case 1: + if (he_type == RATE_MCS_HE_TYPE_TRIG) + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; + break; + case 2: + if (he_type == RATE_MCS_HE_TYPE_TRIG) { + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; + } else { + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; + } + break; + case 3: + if ((he_type == RATE_MCS_HE_TYPE_SU || + he_type == RATE_MCS_HE_TYPE_EXT_SU) && + rate_n_flags & RATE_MCS_SGI_MSK) + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; + break; + } + + he->data5 |= le16_encode_bits(ltf, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); + + if (he_type == RATE_MCS_HE_TYPE_SU || + he_type == RATE_MCS_HE_TYPE_EXT_SU) { + u16 val; + + /* LTF syms correspond to streams */ + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); + switch (rx_status->nss) { + case 1: + val = 0; + break; + case 2: + val = 1; + break; + case 3: + case 4: + val = 2; + break; + case 5: + case 6: + val = 3; + break; + case 7: + case 8: + val = 4; + break; + default: + WARN_ONCE(1, "invalid nss: %d\n", + rx_status->nss); + val = 0; + } + + he->data5 |= + le16_encode_bits(val, + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); + } +} + void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { @@ -869,12 +1314,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct ieee80211_sta *sta = NULL; struct sk_buff *skb; u8 crypt_len = 0, channel, energy_a, energy_b; - struct ieee80211_radiotap_he *he = NULL; - struct ieee80211_radiotap_he_mu *he_mu = NULL; - u32 he_type = 0xffffffff; - /* this is invalid e.g. because puncture type doesn't allow 0b11 */ -#define HE_PHY_DATA_INVAL ((u64)-1) - u64 he_phy_data = HE_PHY_DATA_INVAL; size_t desc_size; if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) @@ -918,49 +1357,24 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status = IEEE80211_SKB_RXCB(skb); - if (rate_n_flags & RATE_MCS_HE_MSK) { - static const struct ieee80211_radiotap_he known = { - .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | - IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN | - IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN | - IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN), - .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN | - IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN), - }; - static const struct ieee80211_radiotap_he_mu mu_known = { - .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN | - IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN | - IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN | - IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN), - .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN), - }; - unsigned int radiotap_len = 0; - - he = skb_put_data(skb, &known, sizeof(known)); - radiotap_len += sizeof(known); - rx_status->flag |= RX_FLAG_RADIOTAP_HE; - - he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; - - if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) { - if (mvm->trans->cfg->device_family >= - IWL_DEVICE_FAMILY_22560) - he_phy_data = le64_to_cpu(desc->v3.he_phy_data); - else - he_phy_data = le64_to_cpu(desc->v1.he_phy_data); - - if (he_type == RATE_MCS_HE_TYPE_MU) { - he_mu = skb_put_data(skb, &mu_known, - sizeof(mu_known)); - radiotap_len += sizeof(mu_known); - rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU; - } - } - - /* temporarily hide the radiotap data */ - __skb_pull(skb, radiotap_len); + /* This may be overridden by iwl_mvm_rx_he() to HE_RU */ + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + break; + case RATE_MCS_CHAN_WIDTH_40: + rx_status->bw = RATE_INFO_BW_40; + break; + case RATE_MCS_CHAN_WIDTH_80: + rx_status->bw = RATE_INFO_BW_80; + break; + case RATE_MCS_CHAN_WIDTH_160: + rx_status->bw = RATE_INFO_BW_160; + break; } + if (rate_n_flags & RATE_MCS_HE_MSK) + iwl_mvm_rx_he(mvm, skb, desc, rate_n_flags, phy_info, queue); + rx_status = IEEE80211_SKB_RXCB(skb); if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc, @@ -995,53 +1409,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->mactime = tsf_on_air_rise; /* TSF as indicated by the firmware is at INA time */ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; - } else if (he_type == RATE_MCS_HE_TYPE_SU) { - u64 he_phy_data; - - if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) - he_phy_data = le64_to_cpu(desc->v3.he_phy_data); - else - he_phy_data = le64_to_cpu(desc->v1.he_phy_data); - - he->data1 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN); - if (FIELD_GET(IWL_RX_HE_PHY_UPLINK, - he_phy_data)) - he->data3 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL); - - if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { - rx_status->ampdu_reference = mvm->ampdu_ref; - mvm->ampdu_ref++; - - rx_status->flag |= RX_FLAG_AMPDU_DETAILS; - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; - if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, - he_phy_data)) - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; - } - } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) { - he_mu->flags1 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); - he_mu->flags1 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM); - he_mu->flags1 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS); - he_mu->flags2 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); - he_mu->flags2 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); } + rx_status->device_timestamp = gp2_on_air_rise; rx_status->band = channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; @@ -1066,15 +1435,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (toggle_bit != mvm->ampdu_toggle) { mvm->ampdu_ref++; mvm->ampdu_toggle = toggle_bit; - - if (he_phy_data != HE_PHY_DATA_INVAL && - he_type == RATE_MCS_HE_TYPE_MU) { - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; - if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, - he_phy_data)) - rx_status->flag |= - RX_FLAG_AMPDU_EOF_BIT; - } } } @@ -1103,6 +1463,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, u8 baid = (u8)((le32_to_cpu(desc->reorder_data) & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT); + struct iwl_fw_dbg_trigger_tlv *trig; + struct ieee80211_vif *vif = mvmsta->vif; if (!mvm->tcm.paused && len >= sizeof(*hdr) && !is_multicast_ether_addr(hdr->addr1) && @@ -1115,8 +1477,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, * frames from a blocked station on a new channel we can * TX to it again. */ - if (unlikely(tx_blocked_vif) && - tx_blocked_vif == mvmsta->vif) { + if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); @@ -1127,23 +1488,18 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rs_update_last_rssi(mvm, mvmsta, rx_status); - if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) && - ieee80211_is_beacon(hdr->frame_control)) { - struct iwl_fw_dbg_trigger_tlv *trig; + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_RSSI); + + if (trig && ieee80211_is_beacon(hdr->frame_control)) { struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; - bool trig_check; s32 rssi; - trig = iwl_fw_dbg_get_trigger(mvm->fw, - FW_DBG_TRIGGER_RSSI); rssi_trig = (void *)trig->data; rssi = le32_to_cpu(rssi_trig->rssi); - trig_check = - iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(mvmsta->vif), - trig); - if (trig_check && rx_status->signal < rssi) + if (rx_status->signal < rssi) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); } @@ -1183,84 +1539,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, } } - switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { - case RATE_MCS_CHAN_WIDTH_20: - break; - case RATE_MCS_CHAN_WIDTH_40: - rx_status->bw = RATE_INFO_BW_40; - break; - case RATE_MCS_CHAN_WIDTH_80: - rx_status->bw = RATE_INFO_BW_80; - break; - case RATE_MCS_CHAN_WIDTH_160: - rx_status->bw = RATE_INFO_BW_160; - break; - } - - if (he_type == RATE_MCS_HE_TYPE_EXT_SU && - rate_n_flags & RATE_MCS_HE_106T_MSK) { - rx_status->bw = RATE_INFO_BW_HE_RU; - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; - } - - if (rate_n_flags & RATE_MCS_HE_MSK && - phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD && - he_type == RATE_MCS_HE_TYPE_MU) { - /* - * Unfortunately, we have to leave the mac80211 data - * incorrect for the case that we receive an HE-MU - * transmission and *don't* have the he_mu pointer, - * i.e. we don't have the phy data (due to the bits - * being used for TSF). This shouldn't happen though - * as management frames where we need the TSF/timers - * are not be transmitted in HE-MU, I think. - */ - u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data); - u8 offs = 0; - - rx_status->bw = RATE_INFO_BW_HE_RU; - - switch (ru) { - case 0 ... 36: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; - offs = ru; - break; - case 37 ... 52: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; - offs = ru - 37; - break; - case 53 ... 60: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; - offs = ru - 53; - break; - case 61 ... 64: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; - offs = ru - 61; - break; - case 65 ... 66: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; - offs = ru - 65; - break; - case 67: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; - break; - case 68: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; - break; - } - he->data2 |= - le16_encode_bits(offs, - IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); - he->data2 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN); - if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80) - he->data2 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); - } else if (he) { - he->data1 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); - } - if (!(rate_n_flags & RATE_MCS_CCK_MSK) && rate_n_flags & RATE_MCS_SGI_MSK) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; @@ -1285,120 +1563,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; - } else if (he) { - u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> - RATE_MCS_STBC_POS; - rx_status->nss = - ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> - RATE_VHT_MCS_NSS_POS) + 1; - rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; - rx_status->encoding = RX_ENC_HE; - rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - if (rate_n_flags & RATE_MCS_BF_MSK) - rx_status->enc_flags |= RX_ENC_FLAG_BF; - - rx_status->he_dcm = - !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); - -#define CHECK_TYPE(F) \ - BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \ - (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS)) - - CHECK_TYPE(SU); - CHECK_TYPE(EXT_SU); - CHECK_TYPE(MU); - CHECK_TYPE(TRIG); - - he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS); - - if (rate_n_flags & RATE_MCS_BF_POS) - he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF); - - switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >> - RATE_MCS_HE_GI_LTF_POS) { - case 0: - rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; - break; - case 1: - rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; - break; - case 2: - rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; - break; - case 3: - if (rate_n_flags & RATE_MCS_SGI_MSK) - rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; - else - rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; - break; - } - - switch (he_type) { - case RATE_MCS_HE_TYPE_SU: { - u16 val; - - /* LTF syms correspond to streams */ - he->data2 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); - switch (rx_status->nss) { - case 1: - val = 0; - break; - case 2: - val = 1; - break; - case 3: - case 4: - val = 2; - break; - case 5: - case 6: - val = 3; - break; - case 7: - case 8: - val = 4; - break; - default: - WARN_ONCE(1, "invalid nss: %d\n", - rx_status->nss); - val = 0; - } - he->data5 |= - le16_encode_bits(val, - IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); - } - break; - case RATE_MCS_HE_TYPE_MU: { - u16 val; - u64 he_phy_data; - - if (mvm->trans->cfg->device_family >= - IWL_DEVICE_FAMILY_22560) - he_phy_data = le64_to_cpu(desc->v3.he_phy_data); - else - he_phy_data = le64_to_cpu(desc->v1.he_phy_data); - - if (he_phy_data == HE_PHY_DATA_INVAL) - break; - - val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK, - he_phy_data); - - he->data2 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); - he->data5 |= - cpu_to_le16(FIELD_PREP( - IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, - val)); - } - break; - case RATE_MCS_HE_TYPE_EXT_SU: - case RATE_MCS_HE_TYPE_TRIG: - /* not supported yet */ - break; - } - } else { + } else if (!(rate_n_flags & RATE_MCS_HE_MSK)) { int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status->band); @@ -1409,7 +1574,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, goto out; } rx_status->rate_idx = rate; - } /* management stuff on default queue */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 11ecdf63b732..ffcd0ca86041 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -19,9 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -836,16 +833,25 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa; + bool low_latency; + + if (iwl_mvm_is_cdb_supported(mvm)) + low_latency = iwl_mvm_low_latency_band(mvm, NL80211_BAND_5GHZ); + else + low_latency = iwl_mvm_low_latency(mvm); /* We can only use EBS if: * 1. the feature is supported; * 2. the last EBS was successful; * 3. if only single scan, the single scan EBS API is supported; * 4. it's not a p2p find operation. + * 5. we are not in low latency mode, + * or if fragmented ebs is supported by the FW */ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS && - vif->type != NL80211_IFTYPE_P2P_DEVICE); + vif->type != NL80211_IFTYPE_P2P_DEVICE && + (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm))); } static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params) @@ -1442,6 +1448,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED) cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; + + cmd->v8.general_flags2 = + IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER; } cmd->scan_start_mac_id = scan_vif->id; @@ -1449,11 +1458,21 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); - if (iwl_mvm_scan_use_ebs(mvm, vif)) + if (iwl_mvm_scan_use_ebs(mvm, vif)) { channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; + /* set fragmented ebs for fragmented scan on HB channels */ + if (iwl_mvm_is_frag_ebs_supported(mvm)) { + if (gen_flags & + IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED || + (!iwl_mvm_is_cdb_supported(mvm) && + gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED)) + channel_flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG; + } + } + chan_param->flags = channel_flags; chan_param->count = params->n_channels; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c index 539b06bf0803..d1d76bb9a750 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 18db1ed92d9b..8f929c774e70 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -72,6 +67,14 @@ #include "sta.h" #include "rs.h" +static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm); + +static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, + u32 sta_id, + struct ieee80211_key_conf *key, bool mcast, + u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, + u8 key_offset, bool mfp); + /* * New version of ADD_STA_sta command added new fields at the end of the * structure, so sending the size of the relevant API's structure is enough to @@ -2101,6 +2104,19 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, &cfg, timeout); + if (mvmvif->ap_wep_key) { + u8 key_offset = iwl_mvm_set_fw_key_idx(mvm); + + if (key_offset == STA_KEY_IDX_INVALID) + return -ENOSPC; + + ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id, + mvmvif->ap_wep_key, 1, 0, NULL, 0, + key_offset, 0); + if (ret) + return ret; + } + return 0; } @@ -3133,10 +3149,6 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_TKIP: - if (vif->type == NL80211_IFTYPE_AP) { - ret = -EINVAL; - break; - } addr = iwl_mvm_get_mac_addr(mvm, vif, sta); /* get phase 1 key from mac80211 */ ieee80211_get_key_rx_seq(keyconf, 0, &seq); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index 67f360c0d17e..e02f4eb20359 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -18,9 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program. - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h b/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h index cbbc16fd006a..ff82af11de8d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index cd91bc44259c..e1a6f4e22253 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -254,17 +254,14 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_time_event *te_trig; int i; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, + ieee80211_vif_to_wdev(te_data->vif), + FW_DBG_TRIGGER_TIME_EVENT); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT); te_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(te_data->vif), - trig)) - return; - for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) { u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id); u32 trig_action_bitmap = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h index 3d2e8b6159bb..1dd3d01245ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c index 2d0b8a391308..01e0a999063b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c @@ -16,11 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h index 2ff560aa1a82..8138d0606c52 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h @@ -16,11 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 1232f63278eb..0b3e5c99d316 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index ff193dca2020..99c64ea2619b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -35,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -82,15 +79,12 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) - return; - if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) return; @@ -245,14 +239,18 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, ssn); } else { - tx_cmd->tid_tspec = IWL_TID_NON_QOS; + if (ieee80211_is_data(fc)) + tx_cmd->tid_tspec = IWL_TID_NON_QOS; + else + tx_cmd->tid_tspec = IWL_MAX_TID_COUNT; + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) tx_flags |= TX_CMD_FLG_SEQ_CTL; else tx_flags &= ~TX_CMD_FLG_SEQ_CTL; } - /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */ + /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */ if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; else @@ -620,6 +618,66 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, } } +static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_mvm_vif *mvmvif = + iwl_mvm_vif_from_mac80211(info->control.vif); + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt; + struct iwl_probe_resp_data *resp_data; + u8 *ie, *pos; + u8 match[] = { + (WLAN_OUI_WFA >> 16) & 0xff, + (WLAN_OUI_WFA >> 8) & 0xff, + WLAN_OUI_WFA & 0xff, + WLAN_OUI_TYPE_WFA_P2P, + }; + + rcu_read_lock(); + + resp_data = rcu_dereference(mvmvif->probe_resp_data); + if (!resp_data) + goto out; + + if (!resp_data->notif.noa_active) + goto out; + + ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC, + mgmt->u.probe_resp.variable, + skb->len - base_len, + match, 4, 2); + if (!ie) { + IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n"); + goto out; + } + + if (skb_tailroom(skb) < resp_data->noa_len) { + if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) { + IWL_ERR(mvm, + "Failed to reallocate probe resp\n"); + goto out; + } + } + + pos = skb_put(skb, resp_data->noa_len); + + *pos++ = WLAN_EID_VENDOR_SPECIFIC; + /* Set length of IE body (not including ID and length itself) */ + *pos++ = resp_data->noa_len - 2; + *pos++ = (WLAN_OUI_WFA >> 16) & 0xff; + *pos++ = (WLAN_OUI_WFA >> 8) & 0xff; + *pos++ = WLAN_OUI_WFA & 0xff; + *pos++ = WLAN_OUI_TYPE_WFA_P2P; + + memcpy(pos, &resp_data->notif.noa_attr, + resp_data->noa_len - sizeof(struct ieee80211_vendor_ie)); + +out: + rcu_read_unlock(); +} + int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -628,6 +686,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) struct iwl_device_cmd *dev_cmd; u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); + __le16 fc = hdr->frame_control; int queue; /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used @@ -668,7 +727,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || info.control.vif->type == NL80211_IFTYPE_AP || info.control.vif->type == NL80211_IFTYPE_ADHOC) { - if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE) + if (!ieee80211_is_data(hdr->frame_control)) sta_id = mvmvif->bcast_sta.sta_id; else sta_id = mvmvif->mcast_sta.sta_id; @@ -689,6 +748,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) } } + if (unlikely(ieee80211_is_probe_resp(fc))) + iwl_mvm_probe_resp_set_noa(mvm, skb); + IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); @@ -775,6 +837,36 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, return 0; } +static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + unsigned int tid) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band; + u8 ac = tid_to_mac80211_ac[tid]; + unsigned int txf; + int lmac = IWL_LMAC_24G_INDEX; + + if (iwl_mvm_is_cdb_supported(mvm) && + band == NL80211_BAND_5GHZ) + lmac = IWL_LMAC_5G_INDEX; + + /* For HE redirect to trigger based fifos */ + if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) + ac += 4; + + txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); + + /* + * Don't send an AMSDU that will be longer than the TXF. + * Add a security margin of 256 for the TX command + headers. + * We also want to have the start of the next packet inside the + * fifo to be able to send bursts. + */ + return min_t(unsigned int, mvmsta->max_amsdu_len, + mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); +} + static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, @@ -787,7 +879,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, u16 snap_ip_tcp, pad; unsigned int dbg_max_amsdu_len; netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG; - u8 tid, txf; + u8 tid; snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + tcp_hdrlen(skb); @@ -826,20 +918,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, !(mvmsta->amsdu_enabled & BIT(tid))) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); - max_amsdu_len = mvmsta->max_amsdu_len; - - /* the Tx FIFO to which this A-MSDU will be routed */ - txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]); - - /* - * Don't send an AMSDU that will be longer than the TXF. - * Add a security margin of 256 for the TX command + headers. - * We also want to have the start of the next packet inside the - * fifo to be able to send bursts. - */ - max_amsdu_len = min_t(unsigned int, max_amsdu_len, - mvm->fwrt.smem_cfg.lmac[0].txfifo_size[txf] - - 256); + max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid); if (unlikely(dbg_max_amsdu_len)) max_amsdu_len = min_t(unsigned int, max_amsdu_len, @@ -1010,6 +1089,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) return -1; + if (unlikely(ieee80211_is_probe_resp(fc))) + iwl_mvm_probe_resp_set_noa(mvm, skb); + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, sta, mvmsta->sta_id); if (!dev_cmd) @@ -1049,6 +1131,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, /* update the tx_cmd hdr as it was already copied */ tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl; } + } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) { + tid = IWL_TID_NON_QOS; } txq_id = mvmsta->tid_data[tid].txq_id; @@ -1327,15 +1411,13 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tx_status *status_trig; int i; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, + FW_DBG_TRIGGER_TX_STATUS); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS); status_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) - return; - for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { /* don't collect on status 0 */ if (!status_trig->statuses[i].status) @@ -1405,6 +1487,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, while (!skb_queue_empty(&skbs)) { struct sk_buff *skb = __skb_dequeue(&skbs); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; bool flushed = false; skb_freed++; @@ -1434,6 +1517,14 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, break; } + /* + * If we are freeing multiple frames, mark all the frames + * but the first one as acked, since they were acknowledged + * before + * */ + if (skb_freed > 1) + info->flags |= IEEE80211_TX_STAT_ACK; + iwl_mvm_tx_status_check_trigger(mvm, status); info->status.rates[0].count = tx_resp->failure_frame + 1; @@ -1449,11 +1540,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; info->flags &= ~IEEE80211_TX_CTL_AMPDU; - /* W/A FW bug: seq_ctl is wrong when the status isn't success */ - if (status != TX_STATUS_SUCCESS) { - struct ieee80211_hdr *hdr = (void *)skb->data; + /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */ + if (ieee80211_is_back_req(hdr->frame_control)) + seq_ctl = 0; + else if (status != TX_STATUS_SUCCESS) seq_ctl = le16_to_cpu(hdr->seq_ctrl); - } if (unlikely(!seq_ctl)) { struct ieee80211_hdr *hdr = (void *)skb->data; @@ -1525,7 +1616,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, iwl_mvm_tx_airtime(mvm, mvmsta, le16_to_cpu(tx_resp->wireless_media_time)); - if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) { + if (sta->wme && tid != IWL_MGMT_TID) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; bool send_eosp_ndp = false; @@ -1645,20 +1736,24 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, u16 sequence = le16_to_cpu(pkt->hdr.sequence); struct iwl_mvm_sta *mvmsta; int queue = SEQ_TO_QUEUE(sequence); + struct ieee80211_sta *sta; if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE && (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))) return; - if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS)) - return; - iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + if (WARN_ON_ONCE(!sta || !sta->wme)) { + rcu_read_unlock(); + return; + } + if (!WARN_ON_ONCE(!mvmsta)) { mvmsta->tid_data[tid].rate_n_flags = le32_to_cpu(tx_resp->initial_rate); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index b002a7afb5f5..6c14d3413bdc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -551,7 +546,6 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); - trace_iwlwifi_dev_ucode_error(trans->dev, &table, table.hw_ver, table.brd_ver); IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0); @@ -725,19 +719,15 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, u8 sta_id, u8 tid, unsigned int timeout) { - struct iwl_tx_queue_cfg_cmd cmd = { - .flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), - .sta_id = sta_id, - .tid = tid, - }; int queue, size = IWL_DEFAULT_QUEUE_SIZE; - if (cmd.tid == IWL_MAX_TID_COUNT) { - cmd.tid = IWL_MGMT_TID; + if (tid == IWL_MAX_TID_COUNT) { + tid = IWL_MGMT_TID; size = IWL_MGMT_QUEUE_SIZE; } - queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd, - SCD_QUEUE_CFG, size, timeout); + queue = iwl_trans_txq_alloc(mvm->trans, + cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), + sta_id, tid, SCD_QUEUE_CFG, size, timeout); if (queue < 0) { IWL_DEBUG_TX_QUEUES(mvm, @@ -900,20 +890,19 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, /** * iwl_mvm_send_lq_cmd() - Send link quality command - * @init: This command is sent as part of station initialization right - * after station has been added. + * @sync: This command can be sent synchronously. * * The link quality command is sent as the last step of station creation. * This is the special case in which init is set and we call a callback in * this case to clear the state indicating that station creation is in * progress. */ -int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init) +int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync) { struct iwl_host_cmd cmd = { .id = LQ_CMD, .len = { sizeof(struct iwl_lq_cmd), }, - .flags = init ? 0 : CMD_ASYNC, + .flags = sync ? 0 : CMD_ASYNC, .data = { lq, }, }; @@ -1249,14 +1238,12 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_MLME); + if (!trig) goto out; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - goto out; if (trig_mlme->stop_connection_loss && --trig_mlme->stop_connection_loss) @@ -1441,14 +1428,12 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_BA); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid))) return; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index 2146fda8da2f..05ed4fb88e0c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -96,9 +96,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, /* Configure debug, for integration */ iwl_pcie_alloc_fw_monitor(trans, 0); prph_sc_ctrl->hwm_cfg.hwm_base_addr = - cpu_to_le64(trans_pcie->fw_mon_phys); + cpu_to_le64(trans->fw_mon[0].physical); prph_sc_ctrl->hwm_cfg.hwm_size = - cpu_to_le32(trans_pcie->fw_mon_size); + cpu_to_le32(trans->fw_mon[0].size); /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index b2cd7ef5fc3a..6f45a0303ddd 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -162,7 +162,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_context_info *ctxt_info; struct iwl_context_info_rbd_cfg *rx_cfg; - u32 control_flags = 0; + u32 control_flags = 0, rb_size; int ret; ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info), @@ -177,11 +177,29 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, /* size is in DWs */ ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4); + switch (trans_pcie->rx_buf_size) { + case IWL_AMSDU_2K: + rb_size = IWL_CTXT_INFO_RB_SIZE_2K; + break; + case IWL_AMSDU_4K: + rb_size = IWL_CTXT_INFO_RB_SIZE_4K; + break; + case IWL_AMSDU_8K: + rb_size = IWL_CTXT_INFO_RB_SIZE_8K; + break; + case IWL_AMSDU_12K: + rb_size = IWL_CTXT_INFO_RB_SIZE_12K; + break; + default: + WARN_ON(1); + rb_size = IWL_CTXT_INFO_RB_SIZE_4K; + } + BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF); - control_flags = IWL_CTXT_INFO_RB_SIZE_4K | - IWL_CTXT_INFO_TFD_FORMAT_LONG | - RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) << - IWL_CTXT_INFO_RB_CB_SIZE_POS; + control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG | + (RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) << + IWL_CTXT_INFO_RB_CB_SIZE_POS) | + (rb_size << IWL_CTXT_INFO_RB_SIZE_POS); ctxt_info->control.control_flags = cpu_to_le32(control_flags); /* initialize RX default queue */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index b150da4c6721..9e015212c2c0 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -646,34 +641,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_soc)}, + + {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)}, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index b63d44b7cd7c..f9c4c64dee66 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -17,9 +17,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program. - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -102,66 +99,6 @@ struct isr_statistics { u32 unhandled; }; -#define IWL_CD_STTS_OPTIMIZED_POS 0 -#define IWL_CD_STTS_OPTIMIZED_MSK 0x01 -#define IWL_CD_STTS_TRANSFER_STATUS_POS 1 -#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E -#define IWL_CD_STTS_WIFI_STATUS_POS 4 -#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0 - -/** - * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3) - * @IWL_CD_STTS_END_TRANSFER: successful transfer complete. - * In sniffer mode, when split is used, set in last CD completion. (RX) - * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for - * all CD completion. (RX) - * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX) - */ -enum iwl_completion_desc_transfer_status { - IWL_CD_STTS_UNUSED, - IWL_CD_STTS_UNUSED_2, - IWL_CD_STTS_END_TRANSFER, - IWL_CD_STTS_OVERFLOW, - IWL_CD_STTS_ABORTED, - IWL_CD_STTS_ERROR, -}; - -/** - * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7) - * @IWL_CD_STTS_VALID: the packet is valid (RX) - * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX) - * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX) - * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX) - * @IWL_CD_STTS_DUP: duplicate packet (RX) - * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX) - * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX) - * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX) - * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX) - * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX) - * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX) - * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX) - * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX) - * @IWL_CD_STTS_NOT_USED: completed but not used (RX) - * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX) - */ -enum iwl_completion_desc_wifi_status { - IWL_CD_STTS_VALID, - IWL_CD_STTS_FCS_ERR, - IWL_CD_STTS_SEC_KEY_ERR, - IWL_CD_STTS_DECRYPTION_ERR, - IWL_CD_STTS_DUP, - IWL_CD_STTS_ICV_MIC_ERR, - IWL_CD_STTS_INTERNAL_SNAP_ERR, - IWL_CD_STTS_SEC_PORT_FAIL, - IWL_CD_STTS_BA_OLD_SN, - IWL_CD_STTS_QOS_NULL, - IWL_CD_STTS_MAC_HDR_ERR, - IWL_CD_STTS_MAX_RETRANS, - IWL_CD_STTS_EX_LIFETIME, - IWL_CD_STTS_NOT_USED, - IWL_CD_STTS_REPLAY_ERR, -}; - #define IWL_RX_TD_TYPE_MSK 0xff000000 #define IWL_RX_TD_SIZE_MSK 0x00ffffff #define IWL_RX_TD_SIZE_2K BIT(11) @@ -464,18 +401,6 @@ enum iwl_image_response_code { }; /** - * struct iwl_dram_data - * @physical: page phy pointer - * @block: pointer to the allocated block/page - * @size: size of the block/page - */ -struct iwl_dram_data { - dma_addr_t physical; - void *block; - int size; -}; - -/** * struct iwl_self_init_dram - dram data used by self init process * @fw: lmac and umac dram data * @fw_cnt: total number of items in array @@ -516,6 +441,7 @@ struct iwl_self_init_dram { * @ucode_write_complete: indicates that the ucode has been copied. * @ucode_write_waitq: wait queue for uCode load * @cmd_queue - command queue number + * @def_rx_queue - default rx queue number * @rx_buf_size: Rx buffer size * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue @@ -525,9 +451,6 @@ struct iwl_self_init_dram { * @reg_lock: protect hw register access * @mutex: to protect stop_device / start_fw / start_hw * @cmd_in_flight: true when we have a host command in flight - * @fw_mon_phys: physical address of the buffer for the firmware monitor - * @fw_mon_page: points to the first page of the buffer for the firmware monitor - * @fw_mon_size: size of the buffer for the firmware monitor * @msix_entries: array of MSI-X entries * @msix_enabled: true if managed to enable MSI-X * @shared_vec_mask: the type of causes the shared vector handles @@ -539,7 +462,6 @@ struct iwl_self_init_dram { * @fh_mask: current unmasked fh causes * @hw_mask: current unmasked hw causes * @in_rescan: true if we have triggered a device rescan - * @scheduled_for_removal: true if we have scheduled a device removal */ struct iwl_trans_pcie { struct iwl_rxq *rxq; @@ -596,6 +518,7 @@ struct iwl_trans_pcie { u8 page_offs, dev_cmd_offs; u8 cmd_queue; + u8 def_rx_queue; u8 cmd_fifo; unsigned int cmd_q_wdg_timeout; u8 n_no_reclaim_cmds; @@ -615,10 +538,6 @@ struct iwl_trans_pcie { bool cmd_hold_nic_awake; bool ref_cmd_in_flight; - dma_addr_t fw_mon_phys; - struct page *fw_mon_page; - u32 fw_mon_size; - struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; bool msix_enabled; u8 shared_vec_mask; @@ -631,7 +550,6 @@ struct iwl_trans_pcie { cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; u16 tx_cmd_queue_size; bool in_rescan; - bool scheduled_for_removal; }; static inline struct iwl_trans_pcie * @@ -673,6 +591,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans); /***************************************************** * RX ******************************************************/ +int _iwl_pcie_rx_init(struct iwl_trans *trans); int iwl_pcie_rx_init(struct iwl_trans *trans); int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); irqreturn_t iwl_pcie_msix_isr(int irq, void *data); @@ -686,6 +605,7 @@ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq); int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget); void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, struct iwl_rxq *rxq); +int iwl_pcie_rx_alloc(struct iwl_trans *trans); /***************************************************** * ICT - interrupt handling @@ -700,7 +620,8 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans); * TX / HCMD ******************************************************/ int iwl_pcie_tx_init(struct iwl_trans *trans); -int iwl_pcie_gen2_tx_init(struct iwl_trans *trans); +int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, + int queue_size); void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); int iwl_pcie_tx_stop(struct iwl_trans *trans); void iwl_pcie_tx_free(struct iwl_trans *trans); @@ -717,11 +638,17 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id); void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); +void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx); +void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, + struct iwl_txq *txq); void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb); void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); +void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, + struct iwl_txq *txq, u16 byte_cnt, + int num_tbs); static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, u8 idx) @@ -1039,6 +966,7 @@ static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, } void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); +void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); @@ -1057,6 +985,7 @@ void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); void iwl_pcie_rx_allocator_work(struct work_struct *data); /* common functions that are used by gen2 transport */ +int iwl_pcie_gen2_apm_init(struct iwl_trans *trans); void iwl_pcie_apm_config(struct iwl_trans *trans); int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); @@ -1088,8 +1017,16 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill); void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); +void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, + struct iwl_txq *txq); +int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans, + struct iwl_txq **intxq, int size, + unsigned int timeout); +int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_host_cmd *hcmd); int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, - struct iwl_tx_queue_cfg_cmd *cmd, + __le16 flags, u8 sta_id, u8 tid, int cmd_id, int size, unsigned int timeout); void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index d017aa2a0a8b..e965cc588850 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -17,9 +17,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program. - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -776,7 +773,7 @@ err: return -ENOMEM; } -static int iwl_pcie_rx_alloc(struct iwl_trans *trans) +int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; @@ -1002,7 +999,7 @@ int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) return 0; } -static int _iwl_pcie_rx_init(struct iwl_trans *trans) +int _iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *def_rxq; @@ -1107,6 +1104,9 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) { + /* Set interrupt coalescing timer to default (2048 usecs) */ + iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); + /* * We don't configure the RFH. * Restock will be done at alive, after firmware configured the RFH. @@ -1144,6 +1144,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) kfree(trans_pcie->rxq); } +static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, + struct iwl_rb_allocator *rba) +{ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); +} + /* * iwl_pcie_rx_reuse_rbd - Recycle used RBDs * @@ -1175,9 +1183,7 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { /* Move the 2 RBDs to the allocator ownership. Allocator has another 6 from pool for the request completion*/ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); - spin_unlock(&rba->lock); + iwl_pcie_rx_move_to_allocator(rxq, rba); atomic_inc(&rba->req_pending); queue_work(rba->alloc_wq, &rba->rx_alloc); @@ -1187,7 +1193,8 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, struct iwl_rxq *rxq, struct iwl_rx_mem_buffer *rxb, - bool emergency) + bool emergency, + int i) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; @@ -1213,6 +1220,9 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, .truesize = max_len, }; + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + rxcb.status = rxq->cd[i].status; + pkt = rxb_addr(&rxcb); if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) { @@ -1267,7 +1277,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, index = SEQ_TO_INDEX(sequence); cmd_index = iwl_pcie_get_cmd_index(txq, index); - if (rxq->id == 0) + if (rxq->id == trans_pcie->def_rx_queue) iwl_op_mode_rx(trans->op_mode, &rxq->napi, &rxcb); else @@ -1396,17 +1406,25 @@ restart: IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); while (i != r) { + struct iwl_rb_allocator *rba = &trans_pcie->rba; struct iwl_rx_mem_buffer *rxb; - - if (unlikely(rxq->used_count == rxq->queue_size / 2)) + /* number of RBDs still waiting for page allocation */ + u32 rb_pending_alloc = + atomic_read(&trans_pcie->rba.req_pending) * + RX_CLAIM_REQ_ALLOC; + + if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && + !emergency)) { + iwl_pcie_rx_move_to_allocator(rxq, rba); emergency = true; + } rxb = iwl_pcie_get_rxb(trans, rxq, i); if (!rxb) goto out; IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); - iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); + iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); i = (i + 1) & (rxq->queue_size - 1); @@ -1421,17 +1439,13 @@ restart: iwl_pcie_rx_allocator_get(trans, rxq); if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { - struct iwl_rb_allocator *rba = &trans_pcie->rba; - /* Add the remaining empty RBDs for allocator use */ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); - spin_unlock(&rba->lock); + iwl_pcie_rx_move_to_allocator(rxq, rba); } else if (emergency) { count++; if (count == 8) { count = 0; - if (rxq->used_count < rxq->queue_size / 3) + if (rb_pending_alloc < rxq->queue_size / 3) emergency = false; rxq->read = i; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 2bc67219ed3e..77f3610e5ca9 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -55,13 +55,14 @@ #include "iwl-context-info.h" #include "iwl-context-info-gen3.h" #include "internal.h" +#include "fw/dbg.h" /* * Start up NIC's basic functionality after it has been reset * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) * NOTE: This does not load uCode nor start the embedded processor */ -static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) +int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) { int ret = 0; @@ -164,9 +165,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) trans_pcie->is_down = true; /* Stop dbgc before stopping device */ - iwl_write_prph(trans, DBGC_IN_SAMPLE, 0); - udelay(100); - iwl_write_prph(trans, DBGC_OUT_CTRL, 0); + _iwl_fw_dbg_stop_recording(trans, NULL); /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); @@ -265,7 +264,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) return -ENOMEM; /* Allocate or reset and init all Tx and Command queues */ - if (iwl_pcie_gen2_tx_init(trans)) + if (iwl_pcie_gen2_tx_init(trans, trans_pcie->cmd_queue, TFD_CMD_SLOTS)) return -ENOMEM; /* enable shadow regs in HW */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 7d319b6863fe..5bafb3f46eb8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -19,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -92,7 +87,7 @@ #define IWL_FW_MEM_EXTENDED_START 0x40000 #define IWL_FW_MEM_EXTENDED_END 0x57FFF -static void iwl_trans_pcie_dump_regs(struct iwl_trans *trans) +void iwl_trans_pcie_dump_regs(struct iwl_trans *trans) { #define PCI_DUMP_SIZE 64 #define PREFIX_LEN 32 @@ -190,72 +185,42 @@ static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans) static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - if (!trans_pcie->fw_mon_page) - return; + int i; - dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys, - trans_pcie->fw_mon_size, DMA_FROM_DEVICE); - __free_pages(trans_pcie->fw_mon_page, - get_order(trans_pcie->fw_mon_size)); - trans_pcie->fw_mon_page = NULL; - trans_pcie->fw_mon_phys = 0; - trans_pcie->fw_mon_size = 0; + for (i = 0; i < trans->num_blocks; i++) { + dma_free_coherent(trans->dev, trans->fw_mon[i].size, + trans->fw_mon[i].block, + trans->fw_mon[i].physical); + trans->fw_mon[i].block = NULL; + trans->fw_mon[i].physical = 0; + trans->fw_mon[i].size = 0; + trans->num_blocks--; + } } -void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) +static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans, + u8 max_power, u8 min_power) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct page *page = NULL; - dma_addr_t phys; + void *cpu_addr = NULL; + dma_addr_t phys = 0; u32 size = 0; u8 power; - if (!max_power) { - /* default max_power is maximum */ - max_power = 26; - } else { - max_power += 11; - } - - if (WARN(max_power > 26, - "External buffer size for monitor is too big %d, check the FW TLV\n", - max_power)) - return; - - if (trans_pcie->fw_mon_page) { - dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys, - trans_pcie->fw_mon_size, - DMA_FROM_DEVICE); - return; - } - - phys = 0; - for (power = max_power; power >= 11; power--) { - int order; - + for (power = max_power; power >= min_power; power--) { size = BIT(power); - order = get_order(size); - page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO, - order); - if (!page) + cpu_addr = dma_alloc_coherent(trans->dev, size, &phys, + GFP_KERNEL | __GFP_NOWARN | + __GFP_ZERO | __GFP_COMP); + if (!cpu_addr) continue; - phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order, - DMA_FROM_DEVICE); - if (dma_mapping_error(trans->dev, phys)) { - __free_pages(page, order); - page = NULL; - continue; - } IWL_INFO(trans, - "Allocated 0x%08x bytes (order %d) for firmware monitor.\n", - size, order); + "Allocated 0x%08x bytes for firmware monitor.\n", + size); break; } - if (WARN_ON_ONCE(!page)) + if (WARN_ON_ONCE(!cpu_addr)) return; if (power != max_power) @@ -264,9 +229,34 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) (unsigned long)BIT(power - 10), (unsigned long)BIT(max_power - 10)); - trans_pcie->fw_mon_page = page; - trans_pcie->fw_mon_phys = phys; - trans_pcie->fw_mon_size = size; + trans->fw_mon[trans->num_blocks].block = cpu_addr; + trans->fw_mon[trans->num_blocks].physical = phys; + trans->fw_mon[trans->num_blocks].size = size; + trans->num_blocks++; +} + +void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) +{ + if (!max_power) { + /* default max_power is maximum */ + max_power = 26; + } else { + max_power += 11; + } + + if (WARN(max_power > 26, + "External buffer size for monitor is too big %d, check the FW TLV\n", + max_power)) + return; + + /* + * This function allocats the default fw monitor. + * The optional additional ones will be allocated in runtime + */ + if (trans->num_blocks) + return; + + iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11); } static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) @@ -930,7 +920,6 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, void iwl_pcie_apply_destination(struct iwl_trans *trans) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv; int i; @@ -942,7 +931,7 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans) else IWL_WARN(trans, "PCI should have external buffer debug\n"); - for (i = 0; i < trans->dbg_dest_reg_num; i++) { + for (i = 0; i < trans->dbg_n_dest_reg; i++) { u32 addr = le32_to_cpu(dest->reg_ops[i].addr); u32 val = le32_to_cpu(dest->reg_ops[i].val); @@ -981,18 +970,18 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans) } monitor: - if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) { + if (dest->monitor_mode == EXTERNAL_MODE && trans->fw_mon[0].size) { iwl_write_prph(trans, le32_to_cpu(dest->base_reg), - trans_pcie->fw_mon_phys >> dest->base_shift); + trans->fw_mon[0].physical >> dest->base_shift); if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) iwl_write_prph(trans, le32_to_cpu(dest->end_reg), - (trans_pcie->fw_mon_phys + - trans_pcie->fw_mon_size - 256) >> + (trans->fw_mon[0].physical + + trans->fw_mon[0].size - 256) >> dest->end_shift); else iwl_write_prph(trans, le32_to_cpu(dest->end_reg), - (trans_pcie->fw_mon_phys + - trans_pcie->fw_mon_size) >> + (trans->fw_mon[0].physical + + trans->fw_mon[0].size) >> dest->end_shift); } } @@ -1000,7 +989,6 @@ monitor: static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, const struct fw_img *image) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret = 0; int first_ucode_section; @@ -1030,12 +1018,12 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { iwl_pcie_alloc_fw_monitor(trans, 0); - if (trans_pcie->fw_mon_size) { + if (trans->fw_mon[0].size) { iwl_write_prph(trans, MON_BUFF_BASE_ADDR, - trans_pcie->fw_mon_phys >> 4); + trans->fw_mon[0].physical >> 4); iwl_write_prph(trans, MON_BUFF_END_ADDR, - (trans_pcie->fw_mon_phys + - trans_pcie->fw_mon_size) >> 4); + (trans->fw_mon[0].physical + + trans->fw_mon[0].size) >> 4); } } else if (trans->dbg_dest_tlv) { iwl_pcie_apply_destination(trans); @@ -1262,13 +1250,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) trans_pcie->is_down = true; /* Stop dbgc before stopping device */ - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { - iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); - } else { - iwl_write_prph(trans, DBGC_IN_SAMPLE, 0); - udelay(100); - iwl_write_prph(trans, DBGC_OUT_CTRL, 0); - } + _iwl_fw_dbg_stop_recording(trans, NULL); /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); @@ -1830,18 +1812,30 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } +static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) +{ + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + return 0x00FFFFFF; + else + return 0x000FFFFF; +} + static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) { + u32 mask = iwl_trans_pcie_prph_msk(trans); + iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, - ((reg & 0x000FFFFF) | (3 << 24))); + ((reg & mask) | (3 << 24))); return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); } static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val) { + u32 mask = iwl_trans_pcie_prph_msk(trans); + iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, - ((addr & 0x000FFFFF) | (3 << 24))); + ((addr & mask) | (3 << 24))); iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); } @@ -2013,7 +2007,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) { struct iwl_trans_pcie_removal *removal; - if (trans_pcie->scheduled_for_removal) + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) goto err; IWL_ERR(trans, "Device gone - scheduling removal!\n"); @@ -2039,7 +2033,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, * we don't need to clear this flag, because * the trans will be freed and reallocated. */ - trans_pcie->scheduled_for_removal = true; + set_bit(STATUS_TRANS_DEAD, &trans->status); removal->pdev = to_pci_dev(trans->dev); INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); @@ -2266,6 +2260,10 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) unsigned long now = jiffies; u8 wr_ptr; + /* Make sure the NIC is still alive in the bus */ + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return -ENODEV; + if (!test_bit(txq_idx, trans_pcie->queue_used)) return -EINVAL; @@ -2861,10 +2859,9 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data, u32 monitor_len) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 len = 0; - if ((trans_pcie->fw_mon_page && + if ((trans->num_blocks && trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || trans->dbg_dest_tlv) { struct iwl_fw_error_dump_fw_mon *fw_mon_data; @@ -2892,22 +2889,12 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, cpu_to_le32(iwl_read_prph(trans, base)); len += sizeof(**data) + sizeof(*fw_mon_data); - if (trans_pcie->fw_mon_page) { - /* - * The firmware is now asserted, it won't write anything - * to the buffer. CPU can take ownership to fetch the - * data. The buffer will be handed back to the device - * before the firmware will be restarted. - */ - dma_sync_single_for_cpu(trans->dev, - trans_pcie->fw_mon_phys, - trans_pcie->fw_mon_size, - DMA_FROM_DEVICE); + if (trans->num_blocks) { memcpy(fw_mon_data->data, - page_address(trans_pcie->fw_mon_page), - trans_pcie->fw_mon_size); + trans->fw_mon[0].block, + trans->fw_mon[0].size); - monitor_len = trans_pcie->fw_mon_size; + monitor_len = trans->fw_mon[0].size; } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) { /* * Update pointers to reflect actual values after @@ -2943,36 +2930,15 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, return len; } -static struct iwl_trans_dump_data -*iwl_trans_pcie_dump_data(struct iwl_trans *trans, - const struct iwl_fw_dbg_trigger_tlv *trigger) +static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, int *len) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_fw_error_dump_data *data; - struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue]; - struct iwl_fw_error_dump_txcmd *txcmd; - struct iwl_trans_dump_data *dump_data; - u32 len, num_rbs = 0; - u32 monitor_len; - int i, ptr; - bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && - !trans->cfg->mq_rx_supported && - trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); - - /* transport dump header */ - len = sizeof(*dump_data); - - /* host commands */ - len += sizeof(*data) + - cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); - - /* FW monitor */ - if (trans_pcie->fw_mon_page) { - len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + - trans_pcie->fw_mon_size; - monitor_len = trans_pcie->fw_mon_size; + if (trans->num_blocks) { + *len += sizeof(struct iwl_fw_error_dump_data) + + sizeof(struct iwl_fw_error_dump_fw_mon) + + trans->fw_mon[0].size; + return trans->fw_mon[0].size; } else if (trans->dbg_dest_tlv) { - u32 base, end, cfg_reg; + u32 base, end, cfg_reg, monitor_len; if (trans->dbg_dest_tlv->version == 1) { cfg_reg = le32_to_cpu(trans->dbg_dest_tlv->base_reg); @@ -3002,11 +2968,39 @@ static struct iwl_trans_dump_data end += (1 << trans->dbg_dest_tlv->end_shift); monitor_len = end - base; } - len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + - monitor_len; - } else { - monitor_len = 0; + *len += sizeof(struct iwl_fw_error_dump_data) + + sizeof(struct iwl_fw_error_dump_fw_mon) + + monitor_len; + return monitor_len; } + return 0; +} + +static struct iwl_trans_dump_data +*iwl_trans_pcie_dump_data(struct iwl_trans *trans, + const struct iwl_fw_dbg_trigger_tlv *trigger) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_fw_error_dump_data *data; + struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_fw_error_dump_txcmd *txcmd; + struct iwl_trans_dump_data *dump_data; + u32 len, num_rbs = 0; + u32 monitor_len; + int i, ptr; + bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && + !trans->cfg->mq_rx_supported && + trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); + + /* transport dump header */ + len = sizeof(*dump_data); + + /* host commands */ + len += sizeof(*data) + + cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); + + /* FW monitor */ + monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { if (!(trans->dbg_dump_mask & @@ -3175,7 +3169,6 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans) .ref = iwl_trans_pcie_ref, \ .unref = iwl_trans_pcie_unref, \ .dump_data = iwl_trans_pcie_dump_data, \ - .dump_regs = iwl_trans_pcie_dump_regs, \ .d3_suspend = iwl_trans_pcie_d3_suspend, \ .d3_resume = iwl_trans_pcie_d3_resume @@ -3277,6 +3270,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, PCIE_LINK_STATE_CLKPM); } + trans_pcie->def_rx_queue = 0; + if (cfg->use_tfh) { addr_size = 64; trans_pcie->max_tbs = IWL_TFH_NUM_TBS; @@ -3327,6 +3322,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, iwl_disable_interrupts(trans); trans->hw_rev = iwl_read32(trans, CSR_HW_REV); + if (trans->hw_rev == 0xffffffff) { + dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); + ret = -EIO; + goto out_no_pci; + } + /* * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have * changed, and now the revision step also includes bit 0-1 (no more diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index b99f33ff9123..b71cf55480fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -87,9 +87,9 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans) /* * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array */ -static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, - struct iwl_txq *txq, u16 byte_cnt, - int num_tbs) +void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, + struct iwl_txq *txq, u16 byte_cnt, + int num_tbs) { struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); @@ -127,8 +127,8 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, /* * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware */ -static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, - struct iwl_txq *txq) +void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, + struct iwl_txq *txq) { lockdep_assert_held(&txq->lock); @@ -416,6 +416,37 @@ out_err: return NULL; } +static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans, + struct sk_buff *skb, + struct iwl_tfh_tfd *tfd, + struct iwl_cmd_meta *out_meta) +{ + int i; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + dma_addr_t tb_phys; + int tb_idx; + + if (!skb_frag_size(frag)) + continue; + + tb_phys = skb_frag_dma_map(trans->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + return -ENOMEM; + tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, + skb_frag_size(frag)); + if (tb_idx < 0) + return tb_idx; + + out_meta->tbs |= BIT(tb_idx); + } + + return 0; +} + static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, struct iwl_txq *txq, @@ -428,7 +459,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); dma_addr_t tb_phys; - int i, len, tb1_len, tb2_len; + int len, tb1_len, tb2_len; void *tb1_addr; tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); @@ -467,24 +498,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len); } - /* set up the remaining entries to point to the data */ - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - int tb_idx; - - if (!skb_frag_size(frag)) - continue; - - tb_phys = skb_frag_dma_map(trans->dev, frag, 0, - skb_frag_size(frag), DMA_TO_DEVICE); - - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) - goto out_err; - tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, - skb_frag_size(frag)); - - out_meta->tbs |= BIT(tb_idx); - } + if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta)) + goto out_err; trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); @@ -526,7 +541,12 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, hdr_len = ieee80211_hdrlen(hdr->frame_control); - if (amsdu) + /* + * Only build A-MSDUs here if doing so by GSO, otherwise it may be + * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been + * built in the higher layers already. + */ + if (amsdu && skb_shinfo(skb)->gso_size) return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, out_meta, hdr_len, len); @@ -1065,8 +1085,8 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) iwl_wake_queue(trans, txq); } -static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, - struct iwl_txq *txq) +void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, + struct iwl_txq *txq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct device *dev = trans->dev; @@ -1120,23 +1140,13 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) clear_bit(txq_id, trans_pcie->queue_used); } -int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, - struct iwl_tx_queue_cfg_cmd *cmd, - int cmd_id, int size, - unsigned int timeout) +int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans, + struct iwl_txq **intxq, int size, + unsigned int timeout) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_queue_cfg_rsp *rsp; - struct iwl_txq *txq; - struct iwl_host_cmd hcmd = { - .id = cmd_id, - .len = { sizeof(*cmd) }, - .data = { cmd, }, - .flags = CMD_WANT_SKB, - }; - int ret, qid; - u32 wr_ptr; + int ret; + struct iwl_txq *txq; txq = kzalloc(sizeof(*txq), GFP_KERNEL); if (!txq) return -ENOMEM; @@ -1164,20 +1174,30 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, txq->wd_timeout = msecs_to_jiffies(timeout); - cmd->tfdq_addr = cpu_to_le64(txq->dma_addr); - cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); - cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); + *intxq = txq; + return 0; - ret = iwl_trans_send_cmd(trans, &hcmd); - if (ret) - goto error; +error: + iwl_pcie_gen2_txq_free_memory(trans, txq); + return ret; +} + +int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_host_cmd *hcmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tx_queue_cfg_rsp *rsp; + int ret, qid; + u32 wr_ptr; - if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) { + if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != + sizeof(*rsp))) { ret = -EINVAL; goto error_free_resp; } - rsp = (void *)hcmd.resp_pkt->data; + rsp = (void *)hcmd->resp_pkt->data; qid = le16_to_cpu(rsp->queue_number); wr_ptr = le16_to_cpu(rsp->write_pointer); @@ -1204,11 +1224,48 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, (txq->write_ptr) | (qid << 16)); IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); - iwl_free_resp(&hcmd); + iwl_free_resp(hcmd); return qid; error_free_resp: - iwl_free_resp(&hcmd); + iwl_free_resp(hcmd); + iwl_pcie_gen2_txq_free_memory(trans, txq); + return ret; +} + +int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, + __le16 flags, u8 sta_id, u8 tid, + int cmd_id, int size, + unsigned int timeout) +{ + struct iwl_txq *txq = NULL; + struct iwl_tx_queue_cfg_cmd cmd = { + .flags = flags, + .sta_id = sta_id, + .tid = tid, + }; + struct iwl_host_cmd hcmd = { + .id = cmd_id, + .len = { sizeof(cmd) }, + .data = { &cmd, }, + .flags = CMD_WANT_SKB, + }; + int ret; + + ret = iwl_trans_pcie_dyn_txq_alloc_dma(trans, &txq, size, timeout); + if (ret) + return ret; + + cmd.tfdq_addr = cpu_to_le64(txq->dma_addr); + cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); + cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); + + ret = iwl_trans_send_cmd(trans, &hcmd); + if (ret) + goto error; + + return iwl_trans_pcie_txq_alloc_response(trans, txq, &hcmd); + error: iwl_pcie_gen2_txq_free_memory(trans, txq); return ret; @@ -1251,30 +1308,31 @@ void iwl_pcie_gen2_tx_free(struct iwl_trans *trans) } } -int iwl_pcie_gen2_tx_init(struct iwl_trans *trans) +int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *cmd_queue; - int txq_id = trans_pcie->cmd_queue, ret; + struct iwl_txq *queue; + int ret; - /* alloc and init the command queue */ + /* alloc and init the tx queue */ if (!trans_pcie->txq[txq_id]) { - cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL); - if (!cmd_queue) { - IWL_ERR(trans, "Not enough memory for command queue\n"); + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) { + IWL_ERR(trans, "Not enough memory for tx queue\n"); return -ENOMEM; } - trans_pcie->txq[txq_id] = cmd_queue; - ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true); + trans_pcie->txq[txq_id] = queue; + ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); goto error; } } else { - cmd_queue = trans_pcie->txq[txq_id]; + queue = trans_pcie->txq[txq_id]; } - ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true); + ret = iwl_pcie_txq_init(trans, queue, queue_size, + (txq_id == trans_pcie->cmd_queue)); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 93f0d387688a..f227b91098c9 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -17,10 +17,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -1101,7 +1097,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, if (!iwl_queue_used(txq, last_to_free)) { IWL_ERR(trans, - "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", + "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", __func__, txq_id, last_to_free, trans->cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); @@ -1188,6 +1184,10 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, lockdep_assert_held(&trans_pcie->reg_lock); + /* Make sure the NIC is still alive in the bus */ + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return -ENODEV; + if (!(cmd->flags & CMD_SEND_IN_IDLE) && !trans_pcie->ref_cmd_in_flight) { trans_pcie->ref_cmd_in_flight = true; @@ -1230,7 +1230,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, * need to be reclaimed. As result, some free space forms. If there is * enough free space (> low mark), wake the stack that feeds us. */ -static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) +void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[txq_id]; @@ -1912,7 +1912,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, } if (test_bit(STATUS_FW_ERROR, &trans->status)) { - iwl_trans_dump_regs(trans); + iwl_trans_pcie_dump_regs(trans); IWL_ERR(trans, "FW error in SYNC CMD %s\n", iwl_get_cmd_string(trans, cmd->id)); dump_stack(); @@ -1957,6 +1957,10 @@ cancel: int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { + /* Make sure the NIC is still alive in the bus */ + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return -ENODEV; + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", @@ -1973,29 +1977,24 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, - struct iwl_cmd_meta *out_meta, - struct iwl_device_cmd *dev_cmd, u16 tb1_len) + struct iwl_cmd_meta *out_meta) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u16 tb2_len; + u16 head_tb_len; int i; /* * Set up TFD's third entry to point directly to remainder * of skb's head, if any */ - tb2_len = skb_headlen(skb) - hdr_len; + head_tb_len = skb_headlen(skb) - hdr_len; - if (tb2_len > 0) { - dma_addr_t tb2_phys = dma_map_single(trans->dev, - skb->data + hdr_len, - tb2_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { - iwl_pcie_tfd_unmap(trans, out_meta, txq, - txq->write_ptr); + if (head_tb_len > 0) { + dma_addr_t tb_phys = dma_map_single(trans->dev, + skb->data + hdr_len, + head_tb_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -EINVAL; - } - iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); + iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); } /* set up the remaining entries to point to the data */ @@ -2010,23 +2009,16 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, tb_phys = skb_frag_dma_map(trans->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { - iwl_pcie_tfd_unmap(trans, out_meta, txq, - txq->write_ptr); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -EINVAL; - } tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, skb_frag_size(frag), false); + if (tb_idx < 0) + return tb_idx; out_meta->tbs |= BIT(tb_idx); } - trace_iwlwifi_dev_tx(trans->dev, skb, - iwl_pcie_get_tfd(trans, txq, txq->write_ptr), - trans_pcie->tfd_size, - &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, - hdr_len); - trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len); return 0; } @@ -2087,7 +2079,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; struct page **page_ptr; - int ret; struct tso_t tso; /* if the packet is protected, then it must be CCMP or GCMP */ @@ -2173,10 +2164,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, if (trans_pcie->sw_csum_tx) { csum_skb = alloc_skb(data_left + tcp_hdrlen(skb), GFP_ATOMIC); - if (!csum_skb) { - ret = -ENOMEM; - goto out_unmap; - } + if (!csum_skb) + return -ENOMEM; iwl_compute_pseudo_hdr_csum(iph, tcph, skb->protocol == @@ -2197,8 +2186,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, hdr_tb_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { dev_kfree_skb(csum_skb); - ret = -EINVAL; - goto out_unmap; + return -EINVAL; } iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, hdr_tb_len, false); @@ -2223,8 +2211,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { dev_kfree_skb(csum_skb); - ret = -EINVAL; - goto out_unmap; + return -EINVAL; } iwl_pcie_txq_build_tfd(trans, txq, tb_phys, @@ -2258,10 +2245,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, skb_push(skb, hdr_len + iv_len); return 0; - -out_unmap: - iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr); - return ret; } #else /* CONFIG_INET */ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, @@ -2426,9 +2409,26 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, out_meta, dev_cmd, tb1_len))) goto out_err; - } else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, - out_meta, dev_cmd, tb1_len))) { - goto out_err; + } else { + struct sk_buff *frag; + + if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, + out_meta))) + goto out_err; + + skb_walk_frags(skb, frag) { + if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, + out_meta))) + goto out_err; + } + + trace_iwlwifi_dev_tx(trans->dev, skb, + iwl_pcie_get_tfd(trans, txq, + txq->write_ptr), + trans_pcie->tfd_size, + &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, + hdr_len); + trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len); } /* building the A-MSDU might have changed this data, so memcpy it now */ @@ -2473,6 +2473,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_unlock(&txq->lock); return 0; out_err: + iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr); spin_unlock(&txq->lock); return -1; } diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index 94ad6fe29e69..21bb68457cfe 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -73,10 +73,6 @@ #define URB_ASYNC_UNLINK 0 #endif -/* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */ -static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; -#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2) - struct header_struct { /* 802.3 */ u8 dest[ETH_ALEN]; @@ -915,7 +911,7 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv, default: err("%s: Unexpected context state %d", __func__, state); - /* fall though */ + /* fall through */ case EZUSB_CTX_REQ_TIMEOUT: case EZUSB_CTX_REQ_FAILED: case EZUSB_CTX_RESP_TIMEOUT: diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c index 3a4214d362ff..790784568ad2 100644 --- a/drivers/net/wireless/intersil/p54/txrx.c +++ b/drivers/net/wireless/intersil/p54/txrx.c @@ -121,8 +121,8 @@ static int p54_assign_address(struct p54_common *priv, struct sk_buff *skb) } if (unlikely(!target_skb)) { if (priv->rx_end - last_addr >= len) { - target_skb = priv->tx_queue.prev; - if (!skb_queue_empty(&priv->tx_queue)) { + target_skb = skb_peek_tail(&priv->tx_queue); + if (target_skb) { info = IEEE80211_SKB_CB(target_skb); range = (void *)info->rate_driver_data; target_addr = range->end_addr; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 07442ada6dd0..aa8058264d5b 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3,6 +3,7 @@ * Copyright (c) 2008, Jouni Malinen <j@w1.fi> * Copyright (c) 2011, Javier Lopez <jlopex@gmail.com> * Copyright (c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -494,7 +495,6 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = { static spinlock_t hwsim_radio_lock; static LIST_HEAD(hwsim_radios); -static struct workqueue_struct *hwsim_wq; static struct rhashtable hwsim_radios_rht; static int hwsim_radio_idx; static int hwsim_radios_generation = 1; @@ -2528,23 +2528,20 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz = { IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | - IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8, + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_BSR | IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = - IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU | IEEE80211_HE_MAC_CAP3_OMI_CONTROL | - IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2, + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, - .phy_cap_info[0] = - IEEE80211_HE_PHY_CAP0_DUAL_BAND, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | - IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS, + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | @@ -2578,18 +2575,16 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz = { IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | - IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8, + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_BSR | IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = - IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU | IEEE80211_HE_MAC_CAP3_OMI_CONTROL | - IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2, + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, .phy_cap_info[0] = - IEEE80211_HE_PHY_CAP0_DUAL_BAND | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, @@ -2597,7 +2592,7 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz = { IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | - IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS, + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | @@ -3696,13 +3691,9 @@ static int __init init_mac80211_hwsim(void) spin_lock_init(&hwsim_radio_lock); - hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0); - if (!hwsim_wq) - return -ENOMEM; - err = rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); if (err) - goto out_free_wq; + return err; err = register_pernet_device(&hwsim_net_ops); if (err) @@ -3833,8 +3824,6 @@ out_unregister_pernet: unregister_pernet_device(&hwsim_net_ops); out_free_rht: rhashtable_destroy(&hwsim_radios_rht); -out_free_wq: - destroy_workqueue(hwsim_wq); return err; } module_init(init_mac80211_hwsim); @@ -3846,12 +3835,10 @@ static void __exit exit_mac80211_hwsim(void) hwsim_exit_netlink(); mac80211_hwsim_free(); - flush_workqueue(hwsim_wq); rhashtable_destroy(&hwsim_radios_rht); unregister_netdev(hwsim_mon); platform_driver_unregister(&mac80211_hwsim_driver); unregister_pernet_device(&hwsim_net_ops); - destroy_workqueue(hwsim_wq); } module_exit(exit_mac80211_hwsim); diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index e92fc5001171..789337ea676a 100644 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -605,9 +605,10 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, { unsigned long flags; - if (recvlength > LBS_CMD_BUFFER_SIZE) { + if (recvlength < MESSAGE_HEADER_LEN || + recvlength > LBS_CMD_BUFFER_SIZE) { lbtf_deb_usbd(&cardp->udev->dev, - "The receive buffer is too large\n"); + "The receive buffer is invalid: %d\n", recvlength); kfree_skb(skb); return; } diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 433c6a16870b..d445acc4786b 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -298,6 +298,19 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size) struct mwifiex_adapter *adapter = ctx->adapter; struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; + if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { + if (card->rx_cmd_ep == ctx->ep) { + mwifiex_dbg(adapter, INFO, "%s: free rx_cmd skb\n", + __func__); + dev_kfree_skb_any(ctx->skb); + ctx->skb = NULL; + } + mwifiex_dbg(adapter, ERROR, + "%s: card removed/suspended, EP %d rx_cmd URB submit skipped\n", + __func__, ctx->ep); + return -1; + } + if (card->rx_cmd_ep != ctx->ep) { ctx->skb = dev_alloc_skb(size); if (!ctx->skb) { diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index b6c5f17dca30..0ccbcd7e887d 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig @@ -5,33 +5,13 @@ config MT76_USB tristate depends on MT76_CORE -config MT76x2_COMMON +config MT76x02_LIB tristate - depends on MT76_CORE - -config MT76x0U - tristate "MediaTek MT76x0U (USB) support" - select MT76_CORE - depends on MAC80211 - depends on USB - help - This adds support for MT7610U-based wireless USB dongles. - -config MT76x2E - tristate "MediaTek MT76x2E (PCIe) support" select MT76_CORE - select MT76x2_COMMON - depends on MAC80211 - depends on PCI - ---help--- - This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices. -config MT76x2U - tristate "MediaTek MT76x2U (USB) support" - select MT76_CORE +config MT76x02_USB + tristate select MT76_USB - select MT76x2_COMMON - depends on MAC80211 - depends on USB - help - This adds support for MT7612U-based wireless USB dongles. + +source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig" +source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig" diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index 158d10d2716c..9b8d7488c545 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -1,9 +1,7 @@ obj-$(CONFIG_MT76_CORE) += mt76.o obj-$(CONFIG_MT76_USB) += mt76-usb.o -obj-$(CONFIG_MT76x0U) += mt76x0/ -obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o -obj-$(CONFIG_MT76x2E) += mt76x2e.o -obj-$(CONFIG_MT76x2U) += mt76x2u.o +obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o +obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o mt76-y := \ mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o @@ -12,20 +10,13 @@ mt76-usb-y := usb.o usb_trace.o usb_mcu.o CFLAGS_trace.o := -I$(src) CFLAGS_usb_trace.o := -I$(src) +CFLAGS_mt76x02_trace.o := -I$(src) -mt76x2-common-y := \ - mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \ - mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o \ - mt76x2_debugfs.o +mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \ + mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \ + mt76x02_txrx.o mt76x02_trace.o -mt76x2e-y := \ - mt76x2_pci.o mt76x2_dma.o \ - mt76x2_main.o mt76x2_init.o mt76x2_tx.o \ - mt76x2_core.o mt76x2_mac.o mt76x2_mcu.o mt76x2_phy.o \ - mt76x2_dfs.o mt76x2_trace.o +mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o -mt76x2u-y := \ - mt76x2_usb.o mt76x2u_init.o mt76x2u_main.o mt76x2u_mac.o \ - mt76x2u_mcu.o mt76x2u_phy.o mt76x2u_core.o - -CFLAGS_mt76x2_trace.o := -I$(src) +obj-$(CONFIG_MT76x0_COMMON) += mt76x0/ +obj-$(CONFIG_MT76x2_COMMON) += mt76x2/ diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c index a38d05dea599..a5adf22c3ffa 100644 --- a/drivers/net/wireless/mediatek/mt76/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/debugfs.c @@ -56,6 +56,35 @@ mt76_queues_read(struct seq_file *s, void *data) return 0; } +void mt76_seq_puts_array(struct seq_file *file, const char *str, + s8 *val, int len) +{ + int i; + + seq_printf(file, "%10s:", str); + for (i = 0; i < len; i++) + seq_printf(file, " %2d", val[i]); + seq_puts(file, "\n"); +} +EXPORT_SYMBOL_GPL(mt76_seq_puts_array); + +static int mt76_read_rate_txpower(struct seq_file *s, void *data) +{ + struct mt76_dev *dev = dev_get_drvdata(s->private); + + mt76_seq_puts_array(s, "CCK", dev->rate_power.cck, + ARRAY_SIZE(dev->rate_power.cck)); + mt76_seq_puts_array(s, "OFDM", dev->rate_power.ofdm, + ARRAY_SIZE(dev->rate_power.ofdm)); + mt76_seq_puts_array(s, "STBC", dev->rate_power.stbc, + ARRAY_SIZE(dev->rate_power.stbc)); + mt76_seq_puts_array(s, "HT", dev->rate_power.ht, + ARRAY_SIZE(dev->rate_power.ht)); + mt76_seq_puts_array(s, "VHT", dev->rate_power.vht, + ARRAY_SIZE(dev->rate_power.vht)); + return 0; +} + struct dentry *mt76_register_debugfs(struct mt76_dev *dev) { struct dentry *dir; @@ -72,6 +101,8 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev) if (dev->otp.data) debugfs_create_blob("otp", 0400, dir, &dev->otp); debugfs_create_devm_seqfile(dev->dev, "queues", dir, mt76_queues_read); + debugfs_create_devm_seqfile(dev->dev, "rate_txpower", dir, + mt76_read_rate_txpower); return dir; } diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index c51da2205b93..f7fbd7016403 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -322,19 +322,13 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi) int len = SKB_WITH_OVERHEAD(q->buf_size); int offset = q->buf_offset; int idx; - void *(*alloc)(unsigned int fragsz); - - if (napi) - alloc = napi_alloc_frag; - else - alloc = netdev_alloc_frag; spin_lock_bh(&q->lock); while (q->queued < q->ndesc - 1) { struct mt76_queue_buf qbuf; - buf = alloc(q->buf_size); + buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); if (!buf) break; @@ -361,6 +355,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi) static void mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) { + struct page *page; void *buf; bool more; @@ -373,6 +368,13 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) skb_free_frag(buf); } while (1); spin_unlock_bh(&q->lock); + + if (!q->rx_page.va) + return; + + page = virt_to_page(q->rx_page.va); + __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); + memset(&q->rx_page, 0, sizeof(q->rx_page)); } static void diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h index 27248e24a19b..357cc356342d 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.h +++ b/drivers/net/wireless/mediatek/mt76/dma.h @@ -25,34 +25,6 @@ #define MT_DMA_CTL_LAST_SEC0 BIT(30) #define MT_DMA_CTL_DMA_DONE BIT(31) -#define MT_TXD_INFO_LEN GENMASK(15, 0) -#define MT_TXD_INFO_NEXT_VLD BIT(16) -#define MT_TXD_INFO_TX_BURST BIT(17) -#define MT_TXD_INFO_80211 BIT(19) -#define MT_TXD_INFO_TSO BIT(20) -#define MT_TXD_INFO_CSO BIT(21) -#define MT_TXD_INFO_WIV BIT(24) -#define MT_TXD_INFO_QSEL GENMASK(26, 25) -#define MT_TXD_INFO_DPORT GENMASK(29, 27) -#define MT_TXD_INFO_TYPE GENMASK(31, 30) - -#define MT_RX_FCE_INFO_LEN GENMASK(13, 0) -#define MT_RX_FCE_INFO_SELF_GEN BIT(15) -#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16) -#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20) -#define MT_RX_FCE_INFO_PCIE_INTR BIT(24) -#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25) -#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27) -#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30) - -/* MCU request message header */ -#define MT_MCU_MSG_LEN GENMASK(15, 0) -#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16) -#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20) -#define MT_MCU_MSG_PORT GENMASK(29, 27) -#define MT_MCU_MSG_TYPE GENMASK(31, 30) -#define MT_MCU_MSG_TYPE_CMD BIT(30) - #define MT_DMA_HDR_LEN 4 #define MT_RX_INFO_LEN 4 #define MT_FCE_INFO_LEN 4 @@ -65,14 +37,21 @@ struct mt76_desc { __le32 info; } __packed __aligned(4); -enum dma_msg_port { - WLAN_PORT, - CPU_RX_PORT, - CPU_TX_PORT, - HOST_PORT, - VIRTUAL_CPU_RX_PORT, - VIRTUAL_CPU_TX_PORT, - DISCARD, +enum mt76_qsel { + MT_QSEL_MGMT, + MT_QSEL_HCCA, + MT_QSEL_EDCA, + MT_QSEL_EDCA_2, +}; + +enum mt76_mcu_evt_type { + EVT_CMD_DONE, + EVT_CMD_ERROR, + EVT_CMD_RETRY, + EVT_EVENT_PWR_RSP, + EVT_EVENT_WOW_RSP, + EVT_EVENT_CARRIER_DETECT_RSP, + EVT_EVENT_DFS_DETECT_RSP, }; int mt76_dma_attach(struct mt76_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 029d54bce9e8..2a699e8b79bf 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -283,6 +283,7 @@ mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops) spin_lock_init(&dev->rx_lock); spin_lock_init(&dev->lock); spin_lock_init(&dev->cc_lock); + mutex_init(&dev->mutex); init_waitqueue_head(&dev->tx_wait); return dev; @@ -305,6 +306,8 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + wiphy->available_antennas_tx = dev->antenna_mask; wiphy->available_antennas_rx = dev->antenna_mask; @@ -472,7 +475,7 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, } EXPORT_SYMBOL(mt76_wcid_key_setup); -static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb) +struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct mt76_rx_status mstat; @@ -497,6 +500,7 @@ static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb) return wcid_to_sta(mstat.wcid); } +EXPORT_SYMBOL(mt76_rx_convert); static int mt76_check_ccmp_pn(struct sk_buff *skb) @@ -546,6 +550,12 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb) struct mt76_wcid *wcid = status->wcid; bool ps; + if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) { + sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL); + if (sta) + wcid = status->wcid = (struct mt76_wcid *) sta->drv_priv; + } + if (!wcid || !wcid->sta) return; diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c index 09a14dead6e3..30a5d928e655 100644 --- a/drivers/net/wireless/mediatek/mt76/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mmio.c @@ -21,7 +21,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset) { u32 val; - val = ioread32(dev->regs + offset); + val = ioread32(dev->mmio.regs + offset); trace_reg_rr(dev, offset, val); return val; @@ -30,7 +30,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset) static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val) { trace_reg_wr(dev, offset, val); - iowrite32(val, dev->regs + offset); + iowrite32(val, dev->mmio.regs + offset); } static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val) @@ -43,7 +43,31 @@ static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val) static void mt76_mmio_copy(struct mt76_dev *dev, u32 offset, const void *data, int len) { - __iowrite32_copy(dev->regs + offset, data, len >> 2); + __iowrite32_copy(dev->mmio.regs + offset, data, len >> 2); +} + +static int mt76_mmio_wr_rp(struct mt76_dev *dev, u32 base, + const struct mt76_reg_pair *data, int len) +{ + while (len > 0) { + mt76_mmio_wr(dev, data->reg, data->value); + data++; + len--; + } + + return 0; +} + +static int mt76_mmio_rd_rp(struct mt76_dev *dev, u32 base, + struct mt76_reg_pair *data, int len) +{ + while (len > 0) { + data->value = mt76_mmio_rr(dev, data->reg); + data++; + len--; + } + + return 0; } void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs) @@ -53,9 +77,16 @@ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs) .rmw = mt76_mmio_rmw, .wr = mt76_mmio_wr, .copy = mt76_mmio_copy, + .wr_rp = mt76_mmio_wr_rp, + .rd_rp = mt76_mmio_rd_rp, }; dev->bus = &mt76_mmio_ops; - dev->regs = regs; + dev->mmio.regs = regs; + + skb_queue_head_init(&dev->mmio.mcu.res_q); + init_waitqueue_head(&dev->mmio.mcu.wait); + spin_lock_init(&dev->mmio.irq_lock); + mutex_init(&dev->mmio.mcu.mutex); } EXPORT_SYMBOL_GPL(mt76_mmio_init); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 2eab35879163..f723a07cab29 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -33,12 +33,21 @@ struct mt76_dev; struct mt76_wcid; +struct mt76_reg_pair { + u32 reg; + u32 value; +}; + struct mt76_bus_ops { u32 (*rr)(struct mt76_dev *dev, u32 offset); void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); void (*copy)(struct mt76_dev *dev, u32 offset, const void *data, int len); + int (*wr_rp)(struct mt76_dev *dev, u32 base, + const struct mt76_reg_pair *rp, int len); + int (*rd_rp)(struct mt76_dev *dev, u32 base, + struct mt76_reg_pair *rp, int len); }; enum mt76_txq_id { @@ -112,6 +121,18 @@ struct mt76_queue { dma_addr_t desc_dma; struct sk_buff *rx_head; + struct page_frag_cache rx_page; + spinlock_t rx_page_lock; +}; + +struct mt76_mcu_ops { + struct sk_buff *(*mcu_msg_alloc)(const void *data, int len); + int (*mcu_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, + int cmd, bool wait_resp); + int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, + const struct mt76_reg_pair *rp, int len); + int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, + struct mt76_reg_pair *rp, int len); }; struct mt76_queue_ops { @@ -143,6 +164,8 @@ enum mt76_wcid_flags { MT_WCID_FLAG_PS, }; +#define MT76_N_WCIDS 128 + struct mt76_wcid { struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; @@ -210,7 +233,6 @@ enum { MT76_OFFCHANNEL, MT76_REMOVED, MT76_READING_STATS, - MT76_MORE_STATS, }; struct mt76_hw_cap { @@ -252,6 +274,19 @@ struct mt76_sband { struct mt76_channel_state *chan; }; +struct mt76_rate_power { + union { + struct { + s8 cck[4]; + s8 ofdm[8]; + s8 stbc[10]; + s8 ht[16]; + s8 vht[10]; + }; + s8 all[48]; + }; +}; + /* addr req mask */ #define MT_VEND_TYPE_EEPROM BIT(31) #define MT_VEND_TYPE_CFG BIT(30) @@ -307,9 +342,29 @@ struct mt76_usb { struct completion cmpl; struct mt76u_buf res; u32 msg_seq; + + /* multiple reads */ + struct mt76_reg_pair *rp; + int rp_len; + u32 base; + bool burst; } mcu; }; +struct mt76_mmio { + struct mt76e_mcu { + struct mutex mutex; + + wait_queue_head_t wait; + struct sk_buff_head res_q; + + u32 msg_seq; + } mcu; + void __iomem *regs; + spinlock_t irq_lock; + u32 irqmask; +}; + struct mt76_dev { struct ieee80211_hw *hw; struct cfg80211_chan_def chandef; @@ -317,9 +372,12 @@ struct mt76_dev { spinlock_t lock; spinlock_t cc_lock; + + struct mutex mutex; + const struct mt76_bus_ops *bus; const struct mt76_driver_ops *drv; - void __iomem *regs; + const struct mt76_mcu_ops *mcu_ops; struct device *dev; struct net_device napi_dev; @@ -334,11 +392,17 @@ struct mt76_dev { wait_queue_head_t tx_wait; + unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG]; + + struct mt76_wcid global_wcid; + struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; + u8 macaddr[ETH_ALEN]; u32 rev; unsigned long state; u8 antenna_mask; + u16 chainmask; struct mt76_sband sband_2g; struct mt76_sband sband_5g; @@ -346,6 +410,10 @@ struct mt76_dev { struct debugfs_blob_wrapper otp; struct mt76_hw_cap cap; + struct mt76_rate_power rate_power; + int txpower_conf; + int txpower_cur; + u32 debugfs_reg; struct led_classdev led_cdev; @@ -353,7 +421,12 @@ struct mt76_dev { bool led_al; u8 led_pin; - struct mt76_usb usb; + u32 rxfilter; + + union { + struct mt76_mmio mmio; + struct mt76_usb usb; + }; }; enum mt76_phy_type { @@ -364,18 +437,6 @@ enum mt76_phy_type { MT_PHY_TYPE_VHT, }; -struct mt76_rate_power { - union { - struct { - s8 cck[4]; - s8 ofdm[8]; - s8 ht[16]; - s8 vht[10]; - }; - s8 all[38]; - }; -}; - struct mt76_rx_status { struct mt76_wcid *wcid; @@ -399,10 +460,23 @@ struct mt76_rx_status { s8 chain_signal[IEEE80211_MAX_CHAINS]; }; +#define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) +#define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) +#define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) +#define __mt76_wr_copy(dev, ...) (dev)->bus->copy((dev), __VA_ARGS__) + +#define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) +#define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) + #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__) +#define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) +#define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) + +#define mt76_mcu_msg_alloc(dev, ...) (dev)->mt76.mcu_ops->mcu_msg_alloc(__VA_ARGS__) +#define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__) #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) @@ -413,6 +487,9 @@ struct mt76_rx_status { #define mt76_rmw_field(_dev, _reg, _field, _val) \ mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) +#define __mt76_rmw_field(_dev, _reg, _field, _val) \ + __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) + #define mt76_hw(dev) (dev)->mt76.hw bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, @@ -469,6 +546,8 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, void mt76_unregister_device(struct mt76_dev *dev); struct dentry *mt76_register_debugfs(struct mt76_dev *dev); +void mt76_seq_puts_array(struct seq_file *file, const char *str, + s8 *val, int len); int mt76_eeprom_init(struct mt76_dev *dev, int len); void mt76_eeprom_override(struct mt76_dev *dev); @@ -485,13 +564,7 @@ static inline int mt76_decr(int val, int size) return (val - 1) & (size - 1); } -/* Hardware uses mirrored order of queues with Q3 - * having the highest priority - */ -static inline u8 q2hwq(u8 q) -{ - return q ^ 0x3; -} +u8 mt76_ac_to_hwq(u8 ac); static inline struct ieee80211_txq * mtxq_to_txq(struct mt76_txq *mtxq) @@ -543,6 +616,8 @@ void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, struct ieee80211_key_conf *key); +struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); + /* internal */ void mt76_tx_free(struct mt76_dev *dev); struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev); @@ -599,15 +674,9 @@ int mt76u_alloc_queues(struct mt76_dev *dev); void mt76u_stop_queues(struct mt76_dev *dev); void mt76u_stop_stat_wk(struct mt76_dev *dev); void mt76u_queues_deinit(struct mt76_dev *dev); -int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags); -int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data, - int data_len, u32 max_payload, u32 offset); void mt76u_mcu_complete_urb(struct urb *urb); -struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len); -int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, - int cmd, bool wait_resp); -void mt76u_mcu_fw_reset(struct mt76_dev *dev); int mt76u_mcu_init_rx(struct mt76_dev *dev); +void mt76u_mcu_deinit(struct mt76_dev *dev); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig new file mode 100644 index 000000000000..9a6157db3893 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig @@ -0,0 +1,20 @@ +config MT76x0_COMMON + tristate + select MT76x02_LIB + +config MT76x0U + tristate "MediaTek MT76x0U (USB) support" + select MT76x0_COMMON + select MT76x02_USB + depends on MAC80211 + depends on USB + help + This adds support for MT7610U-based wireless USB dongles. + +config MT76x0E + tristate "MediaTek MT76x0E (PCIe) support" + select MT76x0_COMMON + depends on MAC80211 + depends on PCI + help + This adds support for MT7610/MT7630-based wireless PCIe devices. diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile index 7843908261ba..20672978dceb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile @@ -1,7 +1,12 @@ -obj-$(CONFIG_MT76x0U) += mt76x0.o +obj-$(CONFIG_MT76x0U) += mt76x0u.o +obj-$(CONFIG_MT76x0E) += mt76x0e.o +obj-$(CONFIG_MT76x0_COMMON) += mt76x0-common.o + +mt76x0-common-y := \ + init.o main.o trace.o eeprom.o phy.o \ + mac.o debugfs.o +mt76x0u-y := usb.o usb_mcu.o +mt76x0e-y := pci.o pci_mcu.o -mt76x0-objs = \ - usb.o init.o main.o mcu.o trace.o dma.o eeprom.o phy.o \ - mac.o util.o debugfs.o tx.o core.o # ccflags-y := -DDEBUG CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/core.c b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c deleted file mode 100644 index 892803fce842..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/core.c +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "mt76x0.h" - -int mt76x0_wait_asic_ready(struct mt76x0_dev *dev) -{ - int i = 100; - u32 val; - - do { - if (test_bit(MT76_REMOVED, &dev->mt76.state)) - return -EIO; - - val = mt76_rr(dev, MT_MAC_CSR0); - if (val && ~val) - return 0; - - udelay(10); - } while (i--); - - return -EIO; -} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c index e7a77a886068..3224e5b1a1e5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c @@ -19,29 +19,9 @@ #include "eeprom.h" static int -mt76_reg_set(void *data, u64 val) -{ - struct mt76x0_dev *dev = data; - - mt76_wr(dev, dev->debugfs_reg, val); - return 0; -} - -static int -mt76_reg_get(void *data, u64 *val) -{ - struct mt76x0_dev *dev = data; - - *val = mt76_rr(dev, dev->debugfs_reg); - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n"); - -static int mt76x0_ampdu_stat_read(struct seq_file *file, void *data) { - struct mt76x0_dev *dev = file->private; + struct mt76x02_dev *dev = file->private; int i, j; #define stat_printf(grp, off, name) \ @@ -95,72 +75,13 @@ static const struct file_operations fops_ampdu_stat = { .release = single_release, }; -static int -mt76x0_eeprom_param_read(struct seq_file *file, void *data) -{ - struct mt76x0_dev *dev = file->private; - int i; - - seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off); - seq_printf(file, "RSSI offset 2GHz: %hhx %hhx\n", - dev->ee->rssi_offset_2ghz[0], dev->ee->rssi_offset_2ghz[1]); - seq_printf(file, "RSSI offset 5GHz: %hhx %hhx %hhx\n", - dev->ee->rssi_offset_5ghz[0], dev->ee->rssi_offset_5ghz[1], - dev->ee->rssi_offset_5ghz[2]); - seq_printf(file, "Temperature offset: %hhx\n", dev->ee->temp_off); - seq_printf(file, "LNA gain 2Ghz: %hhx\n", dev->ee->lna_gain_2ghz); - seq_printf(file, "LNA gain 5Ghz: %hhx %hhx %hhx\n", - dev->ee->lna_gain_5ghz[0], dev->ee->lna_gain_5ghz[1], - dev->ee->lna_gain_5ghz[2]); - seq_printf(file, "Power Amplifier type %hhx\n", dev->ee->pa_type); - seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start, - dev->ee->reg.start + dev->ee->reg.num - 1); - - seq_puts(file, "Per channel power:\n"); - for (i = 0; i < 58; i++) - seq_printf(file, "\t%d chan:%d pwr:%d\n", i, i, - dev->ee->tx_pwr_per_chan[i]); - - seq_puts(file, "Per rate power 2GHz:\n"); - for (i = 0; i < 5; i++) - seq_printf(file, "\t %d bw20:%d bw40:%d\n", - i, dev->ee->tx_pwr_cfg_2g[i][0], - dev->ee->tx_pwr_cfg_5g[i][1]); - - seq_puts(file, "Per rate power 5GHz:\n"); - for (i = 0; i < 5; i++) - seq_printf(file, "\t %d bw20:%d bw40:%d\n", - i, dev->ee->tx_pwr_cfg_5g[i][0], - dev->ee->tx_pwr_cfg_5g[i][1]); - - return 0; -} - -static int -mt76x0_eeprom_param_open(struct inode *inode, struct file *f) -{ - return single_open(f, mt76x0_eeprom_param_read, inode->i_private); -} - -static const struct file_operations fops_eeprom_param = { - .open = mt76x0_eeprom_param_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -void mt76x0_init_debugfs(struct mt76x0_dev *dev) +void mt76x0_init_debugfs(struct mt76x02_dev *dev) { struct dentry *dir; - dir = debugfs_create_dir("mt76x0", dev->mt76.hw->wiphy->debugfsdir); + dir = mt76_register_debugfs(&dev->mt76); if (!dir) return; - debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg); - debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev, - &fops_regval); debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat); - debugfs_create_file("eeprom_param", S_IRUSR, dir, dev, - &fops_eeprom_param); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c deleted file mode 100644 index e2efb430419b..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c +++ /dev/null @@ -1,522 +0,0 @@ -/* - * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> - * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "mt76x0.h" -#include "dma.h" -#include "usb.h" -#include "trace.h" - -static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev, - struct mt76x0_dma_buf_rx *e, gfp_t gfp); - -static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len) -{ - const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data; - unsigned int hdrlen; - - if (unlikely(len < 10)) - return 0; - hdrlen = ieee80211_hdrlen(hdr->frame_control); - if (unlikely(hdrlen > len)) - return 0; - return hdrlen; -} - -static struct sk_buff * -mt76x0_rx_skb_from_seg(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi, - void *data, u32 seg_len, u32 truesize, struct page *p) -{ - struct sk_buff *skb; - u32 true_len, hdr_len = 0, copy, frag; - - skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC); - if (!skb) - return NULL; - - true_len = mt76x0_mac_process_rx(dev, skb, data, rxwi); - if (!true_len || true_len > seg_len) - goto bad_frame; - - hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len); - if (!hdr_len) - goto bad_frame; - - if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) { - memcpy(skb_put(skb, hdr_len), data, hdr_len); - - data += hdr_len + 2; - true_len -= hdr_len; - hdr_len = 0; - } - - /* If not doing paged RX allocated skb will always have enough space */ - copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8; - frag = true_len - copy; - - memcpy(skb_put(skb, copy), data, copy); - data += copy; - - if (frag) { - skb_add_rx_frag(skb, 0, p, data - page_address(p), - frag, truesize); - get_page(p); - } - - return skb; - -bad_frame: - dev_err_ratelimited(dev->mt76.dev, "Error: incorrect frame len:%u hdr:%u\n", - true_len, hdr_len); - dev_kfree_skb(skb); - return NULL; -} - -static void mt76x0_rx_process_seg(struct mt76x0_dev *dev, u8 *data, - u32 seg_len, struct page *p) -{ - struct sk_buff *skb; - struct mt76x0_rxwi *rxwi; - u32 fce_info, truesize = seg_len; - - /* DMA_INFO field at the beginning of the segment contains only some of - * the information, we need to read the FCE descriptor from the end. - */ - fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN); - seg_len -= MT_FCE_INFO_LEN; - - data += MT_DMA_HDR_LEN; - seg_len -= MT_DMA_HDR_LEN; - - rxwi = (struct mt76x0_rxwi *) data; - data += sizeof(struct mt76x0_rxwi); - seg_len -= sizeof(struct mt76x0_rxwi); - - if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info))) - dev_err_once(dev->mt76.dev, "Error: RX path seen a non-pkt urb\n"); - - trace_mt76x0_rx(&dev->mt76, rxwi, fce_info); - - skb = mt76x0_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p); - if (!skb) - return; - - spin_lock(&dev->mac_lock); - ieee80211_rx(dev->mt76.hw, skb); - spin_unlock(&dev->mac_lock); -} - -static u16 mt76x0_rx_next_seg_len(u8 *data, u32 data_len) -{ - u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN + - sizeof(struct mt76x0_rxwi) + MT_FCE_INFO_LEN; - u16 dma_len = get_unaligned_le16(data); - - if (data_len < min_seg_len || - WARN_ON(!dma_len) || - WARN_ON(dma_len + MT_DMA_HDRS > data_len) || - WARN_ON(dma_len & 0x3)) - return 0; - - return MT_DMA_HDRS + dma_len; -} - -static void -mt76x0_rx_process_entry(struct mt76x0_dev *dev, struct mt76x0_dma_buf_rx *e) -{ - u32 seg_len, data_len = e->urb->actual_length; - u8 *data = page_address(e->p); - struct page *new_p = NULL; - int cnt = 0; - - if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state)) - return; - - /* Copy if there is very little data in the buffer. */ - if (data_len > 512) - new_p = dev_alloc_pages(MT_RX_ORDER); - - while ((seg_len = mt76x0_rx_next_seg_len(data, data_len))) { - mt76x0_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL); - - data_len -= seg_len; - data += seg_len; - cnt++; - } - - if (cnt > 1) - trace_mt76x0_rx_dma_aggr(&dev->mt76, cnt, !!new_p); - - if (new_p) { - /* we have one extra ref from the allocator */ - __free_pages(e->p, MT_RX_ORDER); - - e->p = new_p; - } -} - -static struct mt76x0_dma_buf_rx * -mt76x0_rx_get_pending_entry(struct mt76x0_dev *dev) -{ - struct mt76x0_rx_queue *q = &dev->rx_q; - struct mt76x0_dma_buf_rx *buf = NULL; - unsigned long flags; - - spin_lock_irqsave(&dev->rx_lock, flags); - - if (!q->pending) - goto out; - - buf = &q->e[q->start]; - q->pending--; - q->start = (q->start + 1) % q->entries; -out: - spin_unlock_irqrestore(&dev->rx_lock, flags); - - return buf; -} - -static void mt76x0_complete_rx(struct urb *urb) -{ - struct mt76x0_dev *dev = urb->context; - struct mt76x0_rx_queue *q = &dev->rx_q; - unsigned long flags; - - spin_lock_irqsave(&dev->rx_lock, flags); - - if (mt76x0_urb_has_error(urb)) - dev_err(dev->mt76.dev, "Error: RX urb failed:%d\n", urb->status); - if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch")) - goto out; - - q->end = (q->end + 1) % q->entries; - q->pending++; - tasklet_schedule(&dev->rx_tasklet); -out: - spin_unlock_irqrestore(&dev->rx_lock, flags); -} - -static void mt76x0_rx_tasklet(unsigned long data) -{ - struct mt76x0_dev *dev = (struct mt76x0_dev *) data; - struct mt76x0_dma_buf_rx *e; - - while ((e = mt76x0_rx_get_pending_entry(dev))) { - if (e->urb->status) - continue; - - mt76x0_rx_process_entry(dev, e); - mt76x0_submit_rx_buf(dev, e, GFP_ATOMIC); - } -} - -static void mt76x0_complete_tx(struct urb *urb) -{ - struct mt76x0_tx_queue *q = urb->context; - struct mt76x0_dev *dev = q->dev; - struct sk_buff *skb; - unsigned long flags; - - spin_lock_irqsave(&dev->tx_lock, flags); - - if (mt76x0_urb_has_error(urb)) - dev_err(dev->mt76.dev, "Error: TX urb failed:%d\n", urb->status); - if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch")) - goto out; - - skb = q->e[q->start].skb; - trace_mt76x0_tx_dma_done(&dev->mt76, skb); - - __skb_queue_tail(&dev->tx_skb_done, skb); - tasklet_schedule(&dev->tx_tasklet); - - if (q->used == q->entries - q->entries / 8) - ieee80211_wake_queue(dev->mt76.hw, skb_get_queue_mapping(skb)); - - q->start = (q->start + 1) % q->entries; - q->used--; -out: - spin_unlock_irqrestore(&dev->tx_lock, flags); -} - -static void mt76x0_tx_tasklet(unsigned long data) -{ - struct mt76x0_dev *dev = (struct mt76x0_dev *) data; - struct sk_buff_head skbs; - unsigned long flags; - - __skb_queue_head_init(&skbs); - - spin_lock_irqsave(&dev->tx_lock, flags); - - set_bit(MT76_MORE_STATS, &dev->mt76.state); - if (!test_and_set_bit(MT76_READING_STATS, &dev->mt76.state)) - queue_delayed_work(dev->stat_wq, &dev->stat_work, - msecs_to_jiffies(10)); - - skb_queue_splice_init(&dev->tx_skb_done, &skbs); - - spin_unlock_irqrestore(&dev->tx_lock, flags); - - while (!skb_queue_empty(&skbs)) { - struct sk_buff *skb = __skb_dequeue(&skbs); - - mt76x0_tx_status(dev, skb); - } -} - -static int mt76x0_dma_submit_tx(struct mt76x0_dev *dev, - struct sk_buff *skb, u8 ep) -{ - struct usb_device *usb_dev = mt76x0_to_usb_dev(dev); - unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep]); - struct mt76x0_dma_buf_tx *e; - struct mt76x0_tx_queue *q = &dev->tx_q[ep]; - unsigned long flags; - int ret; - - spin_lock_irqsave(&dev->tx_lock, flags); - - if (WARN_ON_ONCE(q->entries <= q->used)) { - ret = -ENOSPC; - goto out; - } - - e = &q->e[q->end]; - e->skb = skb; - usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len, - mt76x0_complete_tx, q); - ret = usb_submit_urb(e->urb, GFP_ATOMIC); - if (ret) { - /* Special-handle ENODEV from TX urb submission because it will - * often be the first ENODEV we see after device is removed. - */ - if (ret == -ENODEV) - set_bit(MT76_REMOVED, &dev->mt76.state); - else - dev_err(dev->mt76.dev, "Error: TX urb submit failed:%d\n", - ret); - goto out; - } - - q->end = (q->end + 1) % q->entries; - q->used++; - - if (q->used >= q->entries) - ieee80211_stop_queue(dev->mt76.hw, skb_get_queue_mapping(skb)); -out: - spin_unlock_irqrestore(&dev->tx_lock, flags); - - return ret; -} - -/* Map USB endpoint number to Q id in the DMA engine */ -static enum mt76_qsel ep2dmaq(u8 ep) -{ - if (ep == 5) - return MT_QSEL_MGMT; - return MT_QSEL_EDCA; -} - -int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb, - struct mt76_wcid *wcid, int hw_q) -{ - u8 ep = q2ep(hw_q); - u32 dma_flags; - int ret; - - dma_flags = MT_TXD_PKT_INFO_80211; - if (wcid->hw_key_idx == 0xff) - dma_flags |= MT_TXD_PKT_INFO_WIV; - - ret = mt76x0_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags); - if (ret) - return ret; - - ret = mt76x0_dma_submit_tx(dev, skb, ep); - - if (ret) { - ieee80211_free_txskb(dev->mt76.hw, skb); - return ret; - } - - return 0; -} - -static void mt76x0_kill_rx(struct mt76x0_dev *dev) -{ - int i; - unsigned long flags; - - spin_lock_irqsave(&dev->rx_lock, flags); - - for (i = 0; i < dev->rx_q.entries; i++) { - int next = dev->rx_q.end; - - spin_unlock_irqrestore(&dev->rx_lock, flags); - usb_poison_urb(dev->rx_q.e[next].urb); - spin_lock_irqsave(&dev->rx_lock, flags); - } - - spin_unlock_irqrestore(&dev->rx_lock, flags); -} - -static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev, - struct mt76x0_dma_buf_rx *e, gfp_t gfp) -{ - struct usb_device *usb_dev = mt76x0_to_usb_dev(dev); - u8 *buf = page_address(e->p); - unsigned pipe; - int ret; - - pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[MT_EP_IN_PKT_RX]); - - usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE, - mt76x0_complete_rx, dev); - - trace_mt76x0_submit_urb(&dev->mt76, e->urb); - ret = usb_submit_urb(e->urb, gfp); - if (ret) - dev_err(dev->mt76.dev, "Error: submit RX URB failed:%d\n", ret); - - return ret; -} - -static int mt76x0_submit_rx(struct mt76x0_dev *dev) -{ - int i, ret; - - for (i = 0; i < dev->rx_q.entries; i++) { - ret = mt76x0_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL); - if (ret) - return ret; - } - - return 0; -} - -static void mt76x0_free_rx(struct mt76x0_dev *dev) -{ - int i; - - for (i = 0; i < dev->rx_q.entries; i++) { - __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER); - usb_free_urb(dev->rx_q.e[i].urb); - } -} - -static int mt76x0_alloc_rx(struct mt76x0_dev *dev) -{ - int i; - - memset(&dev->rx_q, 0, sizeof(dev->rx_q)); - dev->rx_q.dev = dev; - dev->rx_q.entries = N_RX_ENTRIES; - - for (i = 0; i < N_RX_ENTRIES; i++) { - dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL); - dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER); - - if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p) - return -ENOMEM; - } - - return 0; -} - -static void mt76x0_free_tx_queue(struct mt76x0_tx_queue *q) -{ - int i; - - WARN_ON(q->used); - - for (i = 0; i < q->entries; i++) { - usb_poison_urb(q->e[i].urb); - usb_free_urb(q->e[i].urb); - } -} - -static void mt76x0_free_tx(struct mt76x0_dev *dev) -{ - int i; - - for (i = 0; i < __MT_EP_OUT_MAX; i++) - mt76x0_free_tx_queue(&dev->tx_q[i]); -} - -static int mt76x0_alloc_tx_queue(struct mt76x0_dev *dev, - struct mt76x0_tx_queue *q) -{ - int i; - - q->dev = dev; - q->entries = N_TX_ENTRIES; - - for (i = 0; i < N_TX_ENTRIES; i++) { - q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL); - if (!q->e[i].urb) - return -ENOMEM; - } - - return 0; -} - -static int mt76x0_alloc_tx(struct mt76x0_dev *dev) -{ - int i; - - dev->tx_q = devm_kcalloc(dev->mt76.dev, __MT_EP_OUT_MAX, - sizeof(*dev->tx_q), GFP_KERNEL); - - for (i = 0; i < __MT_EP_OUT_MAX; i++) - if (mt76x0_alloc_tx_queue(dev, &dev->tx_q[i])) - return -ENOMEM; - - return 0; -} - -int mt76x0_dma_init(struct mt76x0_dev *dev) -{ - int ret = -ENOMEM; - - tasklet_init(&dev->tx_tasklet, mt76x0_tx_tasklet, (unsigned long) dev); - tasklet_init(&dev->rx_tasklet, mt76x0_rx_tasklet, (unsigned long) dev); - - ret = mt76x0_alloc_tx(dev); - if (ret) - goto err; - ret = mt76x0_alloc_rx(dev); - if (ret) - goto err; - - ret = mt76x0_submit_rx(dev); - if (ret) - goto err; - - return 0; -err: - mt76x0_dma_cleanup(dev); - return ret; -} - -void mt76x0_dma_cleanup(struct mt76x0_dev *dev) -{ - mt76x0_kill_rx(dev); - - tasklet_kill(&dev->rx_tasklet); - - mt76x0_free_rx(dev); - mt76x0_free_tx(dev); - - tasklet_kill(&dev->tx_tasklet); -} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index 36da1e6bc21a..5735038c0e2d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -13,6 +13,7 @@ * GNU General Public License for more details. */ +#include <linux/module.h> #include <linux/of.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> @@ -20,81 +21,20 @@ #include <asm/unaligned.h> #include "mt76x0.h" #include "eeprom.h" - -static bool -field_valid(u8 val) -{ - return val != 0xff; -} - -static s8 -field_validate(u8 val) -{ - if (!field_valid(val)) - return 0; - - return val; -} - -static inline int -sign_extend(u32 val, unsigned int size) -{ - bool sign = val & BIT(size - 1); - - val &= BIT(size - 1) - 1; - - return sign ? val : -val; -} - -static int -mt76x0_efuse_read(struct mt76x0_dev *dev, u16 addr, u8 *data, - enum mt76x0_eeprom_access_modes mode) -{ - u32 val; - int i; - - val = mt76_rr(dev, MT_EFUSE_CTRL); - val &= ~(MT_EFUSE_CTRL_AIN | - MT_EFUSE_CTRL_MODE); - val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) | - FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) | - MT_EFUSE_CTRL_KICK; - mt76_wr(dev, MT_EFUSE_CTRL, val); - - if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000)) - return -ETIMEDOUT; - - val = mt76_rr(dev, MT_EFUSE_CTRL); - if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) { - /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0) - * will not return valid data but it's ok. - */ - memset(data, 0xff, 16); - return 0; - } - - for (i = 0; i < 4; i++) { - val = mt76_rr(dev, MT_EFUSE_DATA(i)); - put_unaligned_le32(val, data + 4 * i); - } - - return 0; -} +#include "../mt76x02_phy.h" #define MT_MAP_READS DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16) static int -mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev) +mt76x0_efuse_physical_size_check(struct mt76x02_dev *dev) { u8 data[MT_MAP_READS * 16]; int ret, i; u32 start = 0, end = 0, cnt_free; - for (i = 0; i < MT_MAP_READS; i++) { - ret = mt76x0_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16, - data + i * 16, MT_EE_PHYSICAL_READ); - if (ret) - return ret; - } + ret = mt76x02_get_efuse_data(&dev->mt76, MT_EE_USAGE_MAP_START, + data, sizeof(data), MT_EE_PHYSICAL_READ); + if (ret) + return ret; for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++) if (!data[i]) { @@ -105,341 +45,307 @@ mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev) cnt_free = end - start + 1; if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) { - dev_err(dev->mt76.dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n"); + dev_err(dev->mt76.dev, + "driver does not support default EEPROM\n"); return -EINVAL; } return 0; } -static void -mt76x0_set_chip_cap(struct mt76x0_dev *dev, u8 *eeprom) +static void mt76x0_set_chip_cap(struct mt76x02_dev *dev) { - enum mt76x2_board_type { BOARD_TYPE_2GHZ = 1, BOARD_TYPE_5GHZ = 2 }; - u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0); - u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1); - - dev_dbg(dev->mt76.dev, "NIC_CONF0: %04x NIC_CONF1: %04x\n", nic_conf0, nic_conf1); - - switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, nic_conf0)) { - case BOARD_TYPE_5GHZ: - dev->ee->has_5ghz = true; - break; - case BOARD_TYPE_2GHZ: - dev->ee->has_2ghz = true; - break; - default: - dev->ee->has_2ghz = true; - dev->ee->has_5ghz = true; - break; - } + u16 nic_conf0 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0); + u16 nic_conf1 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1); + + mt76x02_eeprom_parse_hw_cap(&dev->mt76); + dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n", + dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz); - dev_dbg(dev->mt76.dev, "Has 2GHZ %d 5GHZ %d\n", dev->ee->has_2ghz, dev->ee->has_5ghz); + if (dev->no_2ghz) { + dev->mt76.cap.has_2ghz = false; + dev_dbg(dev->mt76.dev, "mask out 2GHz support\n"); + } - if (!field_valid(nic_conf1 & 0xff)) + if (!mt76x02_field_valid(nic_conf1 & 0xff)) nic_conf1 &= 0xff00; if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL) dev_err(dev->mt76.dev, - "Error: this driver does not support HW RF ctrl\n"); + "driver does not support HW RF ctrl\n"); - if (!field_valid(nic_conf0 >> 8)) + if (!mt76x02_field_valid(nic_conf0 >> 8)) return; if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 || FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1) - dev_err(dev->mt76.dev, - "Error: device has more than 1 RX/TX stream!\n"); - - dev->ee->pa_type = FIELD_GET(MT_EE_NIC_CONF_0_PA_TYPE, nic_conf0); - dev_dbg(dev->mt76.dev, "PA Type %d\n", dev->ee->pa_type); + dev_err(dev->mt76.dev, "invalid tx-rx stream\n"); } -static int -mt76x0_set_macaddr(struct mt76x0_dev *dev, const u8 *eeprom) +static void mt76x0_set_temp_offset(struct mt76x02_dev *dev) { - const void *src = eeprom + MT_EE_MAC_ADDR; - - ether_addr_copy(dev->macaddr, src); - - if (!is_valid_ether_addr(dev->macaddr)) { - eth_random_addr(dev->macaddr); - dev_info(dev->mt76.dev, - "Invalid MAC address, using random address %pM\n", - dev->macaddr); - } + u8 val; - mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr)); - mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) | - FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); - - return 0; -} - -static void -mt76x0_set_temp_offset(struct mt76x0_dev *dev, u8 *eeprom) -{ - u8 temp = eeprom[MT_EE_TEMP_OFFSET]; - - if (field_valid(temp)) - dev->ee->temp_off = sign_extend(temp, 8); + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_2G_TARGET_POWER) >> 8; + if (mt76x02_field_valid(val)) + dev->cal.rx.temp_offset = mt76x02_sign_extend(val, 8); else - dev->ee->temp_off = -10; + dev->cal.rx.temp_offset = -10; } -static void -mt76x0_set_country_reg(struct mt76x0_dev *dev, u8 *eeprom) +static void mt76x0_set_freq_offset(struct mt76x02_dev *dev) { - /* Note: - region 31 is not valid for mt76x0 (see rtmp_init.c) - * - comments in rtmp_def.h are incorrect (see rt_channel.c) - */ - static const struct reg_channel_bounds chan_bounds[] = { - /* EEPROM country regions 0 - 7 */ - { 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 }, - { 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 }, - /* EEPROM country regions 32 - 33 */ - { 1, 11 }, { 1, 14 } - }; - u8 val = eeprom[MT_EE_COUNTRY_REGION_2GHZ]; - int idx = -1; - - dev_dbg(dev->mt76.dev, "REG 2GHZ %u REG 5GHZ %u\n", val, eeprom[MT_EE_COUNTRY_REGION_5GHZ]); - if (val < 8) - idx = val; - if (val > 31 && val < 33) - idx = val - 32 + 8; - - if (idx != -1) - dev_info(dev->mt76.dev, - "EEPROM country region %02hhx (channels %hhd-%hhd)\n", - val, chan_bounds[idx].start, - chan_bounds[idx].start + chan_bounds[idx].num - 1); - else - idx = 5; /* channels 1 - 14 */ - - dev->ee->reg = chan_bounds[idx]; + struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx; + u8 val; - /* TODO: country region 33 is special - phy should be set to B-mode - * before entering channel 14 (see sta/connect.c) - */ -} - -static void -mt76x0_set_rf_freq_off(struct mt76x0_dev *dev, u8 *eeprom) -{ - u8 comp; + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_FREQ_OFFSET); + if (!mt76x02_field_valid(val)) + val = 0; + caldata->freq_offset = val; - dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]); - comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]); + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TSSI_BOUND4) >> 8; + if (!mt76x02_field_valid(val)) + val = 0; - if (comp & BIT(7)) - dev->ee->rf_freq_off -= comp & 0x7f; - else - dev->ee->rf_freq_off += comp; + caldata->freq_offset -= mt76x02_sign_extend(val, 8); } -static void -mt76x0_set_lna_gain(struct mt76x0_dev *dev, u8 *eeprom) +void mt76x0_read_rx_gain(struct mt76x02_dev *dev) { - u8 gain; + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx; + s8 val, lna_5g[3], lna_2g; + u16 rssi_offset; + int i; - dev->ee->lna_gain_2ghz = eeprom[MT_EE_LNA_GAIN_2GHZ]; - dev->ee->lna_gain_5ghz[0] = eeprom[MT_EE_LNA_GAIN_5GHZ_0]; + mt76x02_get_rx_gain(&dev->mt76, chan->band, &rssi_offset, + &lna_2g, lna_5g); + caldata->lna_gain = mt76x02_get_lna_gain(&dev->mt76, &lna_2g, + lna_5g, chan); - gain = eeprom[MT_EE_LNA_GAIN_5GHZ_1]; - if (gain == 0xff || gain == 0) - dev->ee->lna_gain_5ghz[1] = dev->ee->lna_gain_5ghz[0]; - else - dev->ee->lna_gain_5ghz[1] = gain; + for (i = 0; i < ARRAY_SIZE(caldata->rssi_offset); i++) { + val = rssi_offset >> (8 * i); + if (val < -10 || val > 10) + val = 0; - gain = eeprom[MT_EE_LNA_GAIN_5GHZ_2]; - if (gain == 0xff || gain == 0) - dev->ee->lna_gain_5ghz[2] = dev->ee->lna_gain_5ghz[0]; - else - dev->ee->lna_gain_5ghz[2] = gain; + caldata->rssi_offset[i] = val; + } } -static void -mt76x0_set_rssi_offset(struct mt76x0_dev *dev, u8 *eeprom) +static s8 mt76x0_get_delta(struct mt76_dev *dev) { - int i; - s8 *rssi_offset = dev->ee->rssi_offset_2ghz; - - for (i = 0; i < 2; i++) { - rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i]; + struct cfg80211_chan_def *chandef = &dev->chandef; + u8 val; - if (rssi_offset[i] < -10 || rssi_offset[i] > 10) { - dev_warn(dev->mt76.dev, - "Warning: EEPROM RSSI is invalid %02hhx\n", - rssi_offset[i]); - rssi_offset[i] = 0; - } - } - - rssi_offset = dev->ee->rssi_offset_5ghz; + if (mt76x02_tssi_enabled(dev)) + return 0; - for (i = 0; i < 3; i++) { - rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET_5GHZ + i]; + if (chandef->width == NL80211_CHAN_WIDTH_80) { + val = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER) >> 8; + } else if (chandef->width == NL80211_CHAN_WIDTH_40) { + u16 data; - if (rssi_offset[i] < -10 || rssi_offset[i] > 10) { - dev_warn(dev->mt76.dev, - "Warning: EEPROM RSSI is invalid %02hhx\n", - rssi_offset[i]); - rssi_offset[i] = 0; - } + data = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40); + if (chandef->chan->band == NL80211_BAND_5GHZ) + val = data >> 8; + else + val = data; + } else { + return 0; } + + return mt76x02_rate_power_val(val); } -static u32 -calc_bw40_power_rate(u32 value, int delta) +void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev) { - u32 ret = 0; - int i, tmp; - - for (i = 0; i < 4; i++) { - tmp = s6_to_int((value >> i*8) & 0xff) + delta; - ret |= (u32)(int_to_s6(tmp)) << i*8; - } - - return ret; + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + bool is_2ghz = chan->band == NL80211_BAND_2GHZ; + struct mt76_rate_power *t = &dev->mt76.rate_power; + s8 delta = mt76x0_get_delta(&dev->mt76); + u16 val, addr; + + memset(t, 0, sizeof(*t)); + + /* cck 1M, 2M, 5.5M, 11M */ + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_BYRATE_BASE); + t->cck[0] = t->cck[1] = s6_to_s8(val); + t->cck[2] = t->cck[3] = s6_to_s8(val >> 8); + + /* ofdm 6M, 9M, 12M, 18M */ + addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 2 : 0x120; + val = mt76x02_eeprom_get(&dev->mt76, addr); + t->ofdm[0] = t->ofdm[1] = s6_to_s8(val); + t->ofdm[2] = t->ofdm[3] = s6_to_s8(val >> 8); + + /* ofdm 24M, 36M, 48M, 54M */ + addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 4 : 0x122; + val = mt76x02_eeprom_get(&dev->mt76, addr); + t->ofdm[4] = t->ofdm[5] = s6_to_s8(val); + t->ofdm[6] = t->ofdm[7] = s6_to_s8(val >> 8); + + /* ht-vht mcs 1ss 0, 1, 2, 3 */ + addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 6 : 0x124; + val = mt76x02_eeprom_get(&dev->mt76, addr); + t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val); + t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8); + + /* ht-vht mcs 1ss 4, 5, 6 */ + addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126; + val = mt76x02_eeprom_get(&dev->mt76, addr); + t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val); + t->ht[6] = t->vht[6] = s6_to_s8(val >> 8); + + /* ht-vht mcs 1ss 0, 1, 2, 3 stbc */ + addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec; + val = mt76x02_eeprom_get(&dev->mt76, addr); + t->stbc[0] = t->stbc[1] = s6_to_s8(val); + t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8); + + /* ht-vht mcs 1ss 4, 5, 6 stbc */ + addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 16 : 0xee; + val = mt76x02_eeprom_get(&dev->mt76, addr); + t->stbc[4] = t->stbc[5] = s6_to_s8(val); + t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8); + + /* vht mcs 8, 9 5GHz */ + val = mt76x02_eeprom_get(&dev->mt76, 0x132); + t->vht[7] = s6_to_s8(val); + t->vht[8] = s6_to_s8(val >> 8); + + mt76x02_add_rate_power_offset(t, delta); } -static s8 -get_delta(u8 val) +void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) { - s8 ret; + struct mt76x0_chan_map { + u8 chan; + u8 offset; + } chan_map[] = { + { 2, 0 }, { 4, 1 }, { 6, 2 }, { 8, 3 }, + { 10, 4 }, { 12, 5 }, { 14, 6 }, { 38, 0 }, + { 44, 1 }, { 48, 2 }, { 54, 3 }, { 60, 4 }, + { 64, 5 }, { 102, 6 }, { 108, 7 }, { 112, 8 }, + { 118, 9 }, { 124, 10 }, { 128, 11 }, { 134, 12 }, + { 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 }, + { 167, 17 }, { 171, 18 }, { 173, 19 }, + }; + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + u8 offset, addr; + u16 data; + int i; - if (!field_valid(val) || !(val & BIT(7))) - return 0; + for (i = 0; i < ARRAY_SIZE(chan_map); i++) { + if (chan_map[i].chan <= chan->hw_value) { + offset = chan_map[i].offset; + break; + } + } + if (i == ARRAY_SIZE(chan_map)) + offset = chan_map[0].offset; + + if (chan->band == NL80211_BAND_2GHZ) { + addr = MT_EE_TX_POWER_DELTA_BW80 + offset; + } else { + switch (chan->hw_value) { + case 58: + offset = 8; + break; + case 106: + offset = 14; + break; + case 112: + offset = 20; + break; + case 155: + offset = 30; + break; + default: + break; + } + addr = MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE + 2 + offset; + } - ret = val & 0x1f; - if (ret > 8) - ret = 8; - if (val & BIT(6)) - ret = -ret; + data = mt76x02_eeprom_get(&dev->mt76, addr); - return ret; + info[0] = data; + if (!info[0] || info[0] > 0x3f) + info[0] = 5; + + info[1] = data >> 8; + if (!info[1] || info[1] > 0x3f) + info[1] = 5; } -static void -mt76x0_set_tx_power_per_rate(struct mt76x0_dev *dev, u8 *eeprom) +static int mt76x0_check_eeprom(struct mt76x02_dev *dev) { - s8 bw40_delta_2g, bw40_delta_5g; - u32 val; - int i; - - bw40_delta_2g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]); - bw40_delta_5g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40 + 1]); - - for (i = 0; i < 5; i++) { - val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i)); + u16 val; - /* Skip last 16 bits. */ - if (i == 4) - val &= 0x0000ffff; + val = get_unaligned_le16(dev->mt76.eeprom.data); + if (!val) + val = get_unaligned_le16(dev->mt76.eeprom.data + + MT_EE_PCI_ID); - dev->ee->tx_pwr_cfg_2g[i][0] = val; - dev->ee->tx_pwr_cfg_2g[i][1] = calc_bw40_power_rate(val, bw40_delta_2g); + switch (val) { + case 0x7650: + case 0x7610: + return 0; + default: + dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n", + val); + return -EINVAL; } - - /* Reading per rate tx power for 5 GHz band is a bit more complex. Note - * we mix 16 bit and 32 bit reads and sometimes do shifts. - */ - val = get_unaligned_le16(eeprom + 0x120); - val <<= 16; - dev->ee->tx_pwr_cfg_5g[0][0] = val; - dev->ee->tx_pwr_cfg_5g[0][1] = calc_bw40_power_rate(val, bw40_delta_5g); - - val = get_unaligned_le32(eeprom + 0x122); - dev->ee->tx_pwr_cfg_5g[1][0] = val; - dev->ee->tx_pwr_cfg_5g[1][1] = calc_bw40_power_rate(val, bw40_delta_5g); - - val = get_unaligned_le16(eeprom + 0x126); - dev->ee->tx_pwr_cfg_5g[2][0] = val; - dev->ee->tx_pwr_cfg_5g[2][1] = calc_bw40_power_rate(val, bw40_delta_5g); - - val = get_unaligned_le16(eeprom + 0xec); - val <<= 16; - dev->ee->tx_pwr_cfg_5g[3][0] = val; - dev->ee->tx_pwr_cfg_5g[3][1] = calc_bw40_power_rate(val, bw40_delta_5g); - - val = get_unaligned_le16(eeprom + 0xee); - dev->ee->tx_pwr_cfg_5g[4][0] = val; - dev->ee->tx_pwr_cfg_5g[4][1] = calc_bw40_power_rate(val, bw40_delta_5g); } -static void -mt76x0_set_tx_power_per_chan(struct mt76x0_dev *dev, u8 *eeprom) +static int mt76x0_load_eeprom(struct mt76x02_dev *dev) { - int i; - u8 tx_pwr; + int found; - for (i = 0; i < 14; i++) { - tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_2GHZ + i]; - if (tx_pwr <= 0x3f && tx_pwr > 0) - dev->ee->tx_pwr_per_chan[i] = tx_pwr; - else - dev->ee->tx_pwr_per_chan[i] = 5; - } + found = mt76_eeprom_init(&dev->mt76, MT76X0_EEPROM_SIZE); + if (found < 0) + return found; - for (i = 0; i < 40; i++) { - tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_5GHZ + i]; - if (tx_pwr <= 0x3f && tx_pwr > 0) - dev->ee->tx_pwr_per_chan[14 + i] = tx_pwr; - else - dev->ee->tx_pwr_per_chan[14 + i] = 5; - } + if (found && !mt76x0_check_eeprom(dev)) + return 0; + + found = mt76x0_efuse_physical_size_check(dev); + if (found < 0) + return found; - dev->ee->tx_pwr_per_chan[54] = dev->ee->tx_pwr_per_chan[22]; - dev->ee->tx_pwr_per_chan[55] = dev->ee->tx_pwr_per_chan[28]; - dev->ee->tx_pwr_per_chan[56] = dev->ee->tx_pwr_per_chan[34]; - dev->ee->tx_pwr_per_chan[57] = dev->ee->tx_pwr_per_chan[44]; + return mt76x02_get_efuse_data(&dev->mt76, 0, dev->mt76.eeprom.data, + MT76X0_EEPROM_SIZE, MT_EE_READ); } -int -mt76x0_eeprom_init(struct mt76x0_dev *dev) +int mt76x0_eeprom_init(struct mt76x02_dev *dev) { - u8 *eeprom; - int i, ret; - - ret = mt76x0_efuse_physical_size_check(dev); - if (ret) - return ret; - - dev->ee = devm_kzalloc(dev->mt76.dev, sizeof(*dev->ee), GFP_KERNEL); - if (!dev->ee) - return -ENOMEM; + u8 version, fae; + u16 data; + int err; - eeprom = kmalloc(MT76X0_EEPROM_SIZE, GFP_KERNEL); - if (!eeprom) - return -ENOMEM; + err = mt76x0_load_eeprom(dev); + if (err < 0) + return err; - for (i = 0; i + 16 <= MT76X0_EEPROM_SIZE; i += 16) { - ret = mt76x0_efuse_read(dev, i, eeprom + i, MT_EE_READ); - if (ret) - goto out; - } + data = mt76x02_eeprom_get(&dev->mt76, MT_EE_VERSION); + version = data >> 8; + fae = data; - if (eeprom[MT_EE_VERSION_EE] > MT76X0U_EE_MAX_VER) + if (version > MT76X0U_EE_MAX_VER) dev_warn(dev->mt76.dev, "Warning: unsupported EEPROM version %02hhx\n", - eeprom[MT_EE_VERSION_EE]); + version); dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n", - eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]); - - mt76x0_set_macaddr(dev, eeprom); - mt76x0_set_chip_cap(dev, eeprom); - mt76x0_set_country_reg(dev, eeprom); - mt76x0_set_rf_freq_off(dev, eeprom); - mt76x0_set_temp_offset(dev, eeprom); - mt76x0_set_lna_gain(dev, eeprom); - mt76x0_set_rssi_offset(dev, eeprom); - dev->chainmask = 0x0101; - - mt76x0_set_tx_power_per_rate(dev, eeprom); - mt76x0_set_tx_power_per_chan(dev, eeprom); - -out: - kfree(eeprom); - return ret; + version, fae); + + mt76x02_mac_setaddr(&dev->mt76, + dev->mt76.eeprom.data + MT_EE_MAC_ADDR); + mt76x0_set_chip_cap(dev); + mt76x0_set_freq_offset(dev); + mt76x0_set_temp_offset(dev); + + dev->mt76.chainmask = 0x0101; + + return 0; } + +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h index e37b573aed7b..40fd4e61769b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h @@ -16,134 +16,25 @@ #ifndef __MT76X0U_EEPROM_H #define __MT76X0U_EEPROM_H -struct mt76x0_dev; +#include "../mt76x02_eeprom.h" -#define MT76X0U_EE_MAX_VER 0x0c -#define MT76X0_EEPROM_SIZE 512 +struct mt76x02_dev; -#define MT76X0U_DEFAULT_TX_POWER 6 +#define MT76X0U_EE_MAX_VER 0x0c +#define MT76X0_EEPROM_SIZE 512 -enum mt76_eeprom_field { - MT_EE_CHIP_ID = 0x00, - MT_EE_VERSION_FAE = 0x02, - MT_EE_VERSION_EE = 0x03, - MT_EE_MAC_ADDR = 0x04, - MT_EE_NIC_CONF_0 = 0x34, - MT_EE_NIC_CONF_1 = 0x36, - MT_EE_COUNTRY_REGION_5GHZ = 0x38, - MT_EE_COUNTRY_REGION_2GHZ = 0x39, - MT_EE_FREQ_OFFSET = 0x3a, - MT_EE_NIC_CONF_2 = 0x42, +int mt76x0_eeprom_init(struct mt76x02_dev *dev); +void mt76x0_read_rx_gain(struct mt76x02_dev *dev); +void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev); +void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info); - MT_EE_LNA_GAIN_2GHZ = 0x44, - MT_EE_LNA_GAIN_5GHZ_0 = 0x45, - MT_EE_RSSI_OFFSET = 0x46, - MT_EE_RSSI_OFFSET_5GHZ = 0x4a, - MT_EE_LNA_GAIN_5GHZ_1 = 0x49, - MT_EE_LNA_GAIN_5GHZ_2 = 0x4d, - - MT_EE_TX_POWER_DELTA_BW40 = 0x50, - - MT_EE_TX_POWER_OFFSET_2GHZ = 0x52, - - MT_EE_TX_TSSI_SLOPE = 0x6e, - MT_EE_TX_TSSI_OFFSET_GROUP = 0x6f, - MT_EE_TX_TSSI_OFFSET = 0x76, - - MT_EE_TX_POWER_OFFSET_5GHZ = 0x78, - - MT_EE_TEMP_OFFSET = 0xd1, - MT_EE_FREQ_OFFSET_COMPENSATION = 0xdb, - MT_EE_TX_POWER_BYRATE_BASE = 0xde, - - MT_EE_TX_POWER_BYRATE_BASE_5GHZ = 0x120, - - MT_EE_USAGE_MAP_START = 0x1e0, - MT_EE_USAGE_MAP_END = 0x1fc, -}; - -#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0) -#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4) -#define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8) -#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12) - -#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0) -#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1) -#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2) -#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3) -#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13) - -#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0) -#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4) -#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8) -#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9) -#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11) -#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13) - -#define MT_EE_TX_POWER_BYRATE(i) (MT_EE_TX_POWER_BYRATE_BASE + \ - (i) * 4) - -#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \ - MT_EE_USAGE_MAP_START + 1) - -enum mt76x0_eeprom_access_modes { - MT_EE_READ = 0, - MT_EE_PHYSICAL_READ = 1, -}; - -struct reg_channel_bounds { - u8 start; - u8 num; -}; - -struct mt76x0_eeprom_params { - u8 rf_freq_off; - s16 temp_off; - s8 rssi_offset_2ghz[2]; - s8 rssi_offset_5ghz[3]; - s8 lna_gain_2ghz; - s8 lna_gain_5ghz[3]; - u8 pa_type; - - /* TX_PWR_CFG_* values from EEPROM for 20 and 40 Mhz bandwidths. */ - u32 tx_pwr_cfg_2g[5][2]; - u32 tx_pwr_cfg_5g[5][2]; - - u8 tx_pwr_per_chan[58]; - - struct reg_channel_bounds reg; - - bool has_2ghz; - bool has_5ghz; -}; - -int mt76x0_eeprom_init(struct mt76x0_dev *dev); - -static inline u32 s6_validate(u32 reg) -{ - WARN_ON(reg & ~GENMASK(5, 0)); - return reg & GENMASK(5, 0); -} - -static inline int s6_to_int(u32 reg) -{ - int s6; - - s6 = s6_validate(reg); - if (s6 & BIT(5)) - s6 -= BIT(6); - - return s6; -} - -static inline u32 int_to_s6(int val) +static inline s8 s6_to_s8(u32 val) { - if (val < -0x20) - return 0x20; - if (val > 0x1f) - return 0x1f; + s8 ret = val & GENMASK(5, 0); - return val & 0x3f; + if (ret & BIT(5)) + ret -= BIT(6); + return ret; } #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c index 7cdb3e740522..ee2b8e885608 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c @@ -18,15 +18,31 @@ #include "eeprom.h" #include "trace.h" #include "mcu.h" -#include "usb.h" - #include "initvals.h" -static void -mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable) +static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband) { + struct ieee80211_sta_vht_cap *vht_cap = &sband->vht_cap; + u16 mcs_map = 0; int i; + vht_cap->cap &= ~IEEE80211_VHT_CAP_RXLDPC; + for (i = 0; i < 8; i++) { + if (!i) + mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_7 << (i * 2)); + else + mcs_map |= + (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2)); + } + vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); + vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); +} + +static void +mt76x0_set_wlan_state(struct mt76x02_dev *dev, u32 val, bool enable) +{ + u32 mask = MT_CMB_CTRL_XTAL_RDY | MT_CMB_CTRL_PLL_LD; + /* Note: we don't turn off WLAN_CLK because that makes the device * not respond properly on the probe path. * In case anyone (PSM?) wants to use this function we can @@ -42,32 +58,18 @@ mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable) mt76_wr(dev, MT_WLAN_FUN_CTRL, val); udelay(20); - if (!enable) - return; - - for (i = 200; i; i--) { - val = mt76_rr(dev, MT_CMB_CTRL); - - if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD) - break; - - udelay(20); - } - /* Note: vendor driver tries to disable/enable wlan here and retry * but the code which does it is so buggy it must have never * triggered, so don't bother. */ - if (!i) - dev_err(dev->mt76.dev, "Error: PLL and XTAL check failed!\n"); + if (enable && !mt76_poll(dev, MT_CMB_CTRL, mask, mask, 2000)) + dev_err(dev->mt76.dev, "PLL and XTAL check failed\n"); } -void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset) +void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset) { u32 val; - mutex_lock(&dev->hw_atomic_mutex); - val = mt76_rr(dev, MT_WLAN_FUN_CTRL); if (reset) { @@ -89,54 +91,25 @@ void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset) udelay(20); mt76x0_set_wlan_state(dev, val, enable); - - mutex_unlock(&dev->hw_atomic_mutex); } +EXPORT_SYMBOL_GPL(mt76x0_chip_onoff); -static void mt76x0_reset_csr_bbp(struct mt76x0_dev *dev) +static void mt76x0_reset_csr_bbp(struct mt76x02_dev *dev) { - u32 val; - - val = mt76_rr(dev, MT_PBF_SYS_CTRL); - val &= ~0x2000; - mt76_wr(dev, MT_PBF_SYS_CTRL, val); - - mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR | - MT_MAC_SYS_CTRL_RESET_BBP); - + mt76_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_RESET_CSR | + MT_MAC_SYS_CTRL_RESET_BBP); msleep(200); + mt76_clear(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_RESET_CSR | + MT_MAC_SYS_CTRL_RESET_BBP); } -static void mt76x0_init_usb_dma(struct mt76x0_dev *dev) -{ - u32 val; - - val = mt76_rr(dev, MT_USB_DMA_CFG); - - val |= FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) | - FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) | - MT_USB_DMA_CFG_RX_BULK_EN | - MT_USB_DMA_CFG_TX_BULK_EN; - if (dev->in_max_packet == 512) - val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN; - mt76_wr(dev, MT_USB_DMA_CFG, val); - - val = mt76_rr(dev, MT_COM_REG0); - if (val & 1) - dev_dbg(dev->mt76.dev, "MCU not ready\n"); - - val = mt76_rr(dev, MT_USB_DMA_CFG); - - val |= MT_USB_DMA_CFG_RX_DROP_OR_PADDING; - mt76_wr(dev, MT_USB_DMA_CFG, val); - val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PADDING; - mt76_wr(dev, MT_USB_DMA_CFG, val); -} - -#define RANDOM_WRITE(dev, tab) \ - mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, tab, ARRAY_SIZE(tab)); +#define RANDOM_WRITE(dev, tab) \ + mt76_wr_rp(dev, MT_MCU_MEMMAP_WLAN, \ + tab, ARRAY_SIZE(tab)) -static int mt76x0_init_bbp(struct mt76x0_dev *dev) +static int mt76x0_init_bbp(struct mt76x02_dev *dev) { int ret, i; @@ -159,30 +132,13 @@ static int mt76x0_init_bbp(struct mt76x0_dev *dev) return 0; } -static void -mt76_init_beacon_offsets(struct mt76x0_dev *dev) -{ - u16 base = MT_BEACON_BASE; - u32 regs[4] = {}; - int i; - - for (i = 0; i < 16; i++) { - u16 addr = dev->beacon_offsets[i]; - - regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4)); - } - - for (i = 0; i < 4; i++) - mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]); -} - -static void mt76x0_init_mac_registers(struct mt76x0_dev *dev) +static void mt76x0_init_mac_registers(struct mt76x02_dev *dev) { u32 reg; RANDOM_WRITE(dev, common_mac_reg_table); - mt76_init_beacon_offsets(dev); + mt76x02_set_beacon_offsets(&dev->mt76); /* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */ RANDOM_WRITE(dev, mt76x0_mac_reg_table); @@ -192,13 +148,6 @@ static void mt76x0_init_mac_registers(struct mt76x0_dev *dev) reg &= ~0x3; mt76_wr(dev, MT_MAC_SYS_CTRL, reg); - if (is_mt7610e(dev)) { - /* Disable COEX_EN */ - reg = mt76_rr(dev, MT_COEXCFG0); - reg &= 0xFFFFFFFE; - mt76_wr(dev, MT_COEXCFG0, reg); - } - /* Set 0x141C[15:12]=0xF */ reg = mt76_rr(dev, MT_EXT_CCA_CFG); reg |= 0x0000F000; @@ -216,115 +165,81 @@ static void mt76x0_init_mac_registers(struct mt76x0_dev *dev) reg &= ~0x000003FF; reg |= 0x00000201; mt76_wr(dev, MT_WMM_CTRL, reg); - - /* TODO: Probably not needed */ - mt76_wr(dev, 0x7028, 0); - mt76_wr(dev, 0x7010, 0); - mt76_wr(dev, 0x7024, 0); - msleep(10); } -static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev) +static int mt76x0_init_wcid_mem(struct mt76x02_dev *dev) { u32 *vals; - int i, ret; + int i; - vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL); + vals = kmalloc(sizeof(*vals) * MT76_N_WCIDS * 2, GFP_KERNEL); if (!vals) return -ENOMEM; - for (i = 0; i < N_WCIDS; i++) { + for (i = 0; i < MT76_N_WCIDS; i++) { vals[i * 2] = 0xffffffff; vals[i * 2 + 1] = 0x00ffffff; } - ret = mt76x0_burst_write_regs(dev, MT_WCID_ADDR_BASE, - vals, N_WCIDS * 2); + mt76_wr_copy(dev, MT_WCID_ADDR_BASE, vals, MT76_N_WCIDS * 2); kfree(vals); - - return ret; + return 0; } -static int mt76x0_init_key_mem(struct mt76x0_dev *dev) +static void mt76x0_init_key_mem(struct mt76x02_dev *dev) { u32 vals[4] = {}; - return mt76x0_burst_write_regs(dev, MT_SKEY_MODE_BASE_0, - vals, ARRAY_SIZE(vals)); + mt76_wr_copy(dev, MT_SKEY_MODE_BASE_0, vals, ARRAY_SIZE(vals)); } -static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev) +static int mt76x0_init_wcid_attr_mem(struct mt76x02_dev *dev) { u32 *vals; - int i, ret; + int i; - vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL); + vals = kmalloc(sizeof(*vals) * MT76_N_WCIDS * 2, GFP_KERNEL); if (!vals) return -ENOMEM; - for (i = 0; i < N_WCIDS * 2; i++) + for (i = 0; i < MT76_N_WCIDS * 2; i++) vals[i] = 1; - ret = mt76x0_burst_write_regs(dev, MT_WCID_ATTR_BASE, - vals, N_WCIDS * 2); + mt76_wr_copy(dev, MT_WCID_ATTR_BASE, vals, MT76_N_WCIDS * 2); kfree(vals); - - return ret; + return 0; } -static void mt76x0_reset_counters(struct mt76x0_dev *dev) +static void mt76x0_reset_counters(struct mt76x02_dev *dev) { - mt76_rr(dev, MT_RX_STA_CNT0); - mt76_rr(dev, MT_RX_STA_CNT1); - mt76_rr(dev, MT_RX_STA_CNT2); - mt76_rr(dev, MT_TX_STA_CNT0); - mt76_rr(dev, MT_TX_STA_CNT1); - mt76_rr(dev, MT_TX_STA_CNT2); + mt76_rr(dev, MT_RX_STAT_0); + mt76_rr(dev, MT_RX_STAT_1); + mt76_rr(dev, MT_RX_STAT_2); + mt76_rr(dev, MT_TX_STA_0); + mt76_rr(dev, MT_TX_STA_1); + mt76_rr(dev, MT_TX_STA_2); } -int mt76x0_mac_start(struct mt76x0_dev *dev) +int mt76x0_mac_start(struct mt76x02_dev *dev) { mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); - if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY | - MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000)) + if (!mt76x02_wait_for_wpdma(&dev->mt76, 200000)) return -ETIMEDOUT; - dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR | - MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC | - MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP | - MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND | - MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS | - MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL | - MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV; - mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); - + mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); mt76_wr(dev, MT_MAC_SYS_CTRL, - MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); - - if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY | - MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50)) - return -ETIMEDOUT; + MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); - return 0; + return !mt76x02_wait_for_wpdma(&dev->mt76, 50) ? -ETIMEDOUT : 0; } +EXPORT_SYMBOL_GPL(mt76x0_mac_start); -static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev) +void mt76x0_mac_stop(struct mt76x02_dev *dev) { - int i, ok; - - if (test_bit(MT76_REMOVED, &dev->mt76.state)) - return; - - mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN | - MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN | - MT_BEACON_TIME_CFG_BEACON_TX); - - if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000)) - dev_warn(dev->mt76.dev, "Warning: TX DMA did not stop!\n"); + int i = 200, ok = 0; /* Page count on TxQ */ - i = 200; while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) || (mt76_rr(dev, 0x0a30) & 0x000000ff) || (mt76_rr(dev, 0x0a34) & 0x00ff00ff))) @@ -337,9 +252,7 @@ static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev) MT_MAC_SYS_CTRL_ENABLE_TX); /* Page count on RxQ */ - ok = 0; - i = 200; - while (i--) { + for (i = 0; i < 200; i++) { if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) && !mt76_rr(dev, 0x0a30) && !mt76_rr(dev, 0x0a34)) { @@ -352,91 +265,45 @@ static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev) if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000)) dev_warn(dev->mt76.dev, "Warning: MAC RX did not stop!\n"); - - if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000)) - dev_warn(dev->mt76.dev, "Warning: RX DMA did not stop!\n"); -} - -void mt76x0_mac_stop(struct mt76x0_dev *dev) -{ - mt76x0_mac_stop_hw(dev); - flush_delayed_work(&dev->stat_work); - cancel_delayed_work_sync(&dev->stat_work); -} - -static void mt76x0_stop_hardware(struct mt76x0_dev *dev) -{ - mt76x0_chip_onoff(dev, false, false); } +EXPORT_SYMBOL_GPL(mt76x0_mac_stop); -int mt76x0_init_hardware(struct mt76x0_dev *dev) +int mt76x0_init_hardware(struct mt76x02_dev *dev) { - static const u16 beacon_offsets[16] = { - /* 512 byte per beacon */ - 0xc000, 0xc200, 0xc400, 0xc600, - 0xc800, 0xca00, 0xcc00, 0xce00, - 0xd000, 0xd200, 0xd400, 0xd600, - 0xd800, 0xda00, 0xdc00, 0xde00 - }; int ret; - dev->beacon_offsets = beacon_offsets; - - mt76x0_chip_onoff(dev, true, true); - - ret = mt76x0_wait_asic_ready(dev); - if (ret) - goto err; - ret = mt76x0_mcu_init(dev); - if (ret) - goto err; - - if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG, - MT_WPDMA_GLO_CFG_TX_DMA_BUSY | - MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) { - ret = -EIO; - goto err; - } + if (!mt76x02_wait_for_wpdma(&dev->mt76, 1000)) + return -EIO; /* Wait for ASIC ready after FW load. */ - ret = mt76x0_wait_asic_ready(dev); - if (ret) - goto err; + if (!mt76x02_wait_for_mac(&dev->mt76)) + return -ETIMEDOUT; mt76x0_reset_csr_bbp(dev); - mt76x0_init_usb_dma(dev); - - mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0x0); - mt76_wr(dev, MT_TSO_CTRL, 0x0); - - ret = mt76x0_mcu_cmd_init(dev); + ret = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, false); if (ret) - goto err; - ret = mt76x0_dma_init(dev); - if (ret) - goto err_mcu; + return ret; mt76x0_init_mac_registers(dev); - if (!mt76_poll_msec(dev, MT_MAC_STATUS, - MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 1000)) { - ret = -EIO; - goto err_rx; - } + if (!mt76x02_wait_for_txrx_idle(&dev->mt76)) + return -EIO; ret = mt76x0_init_bbp(dev); if (ret) - goto err_rx; + return ret; + + dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); ret = mt76x0_init_wcid_mem(dev); if (ret) - goto err_rx; - ret = mt76x0_init_key_mem(dev); - if (ret) - goto err_rx; + return ret; + + mt76x0_init_key_mem(dev); + ret = mt76x0_init_wcid_attr_mem(dev); if (ret) - goto err_rx; + return ret; mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN | MT_BEACON_TIME_CFG_SYNC_MODE | @@ -445,276 +312,82 @@ int mt76x0_init_hardware(struct mt76x0_dev *dev) mt76x0_reset_counters(dev); - mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); - - mt76_wr(dev, MT_TXOP_CTRL_CFG, - FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | - FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); - ret = mt76x0_eeprom_init(dev); if (ret) - goto err_rx; + return ret; mt76x0_phy_init(dev); - return 0; - -err_rx: - mt76x0_dma_cleanup(dev); -err_mcu: - mt76x0_mcu_cmd_deinit(dev); -err: - mt76x0_chip_onoff(dev, false, false); - return ret; -} - -void mt76x0_cleanup(struct mt76x0_dev *dev) -{ - if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state)) - return; - mt76x0_stop_hardware(dev); - mt76x0_dma_cleanup(dev); - mt76x0_mcu_cmd_deinit(dev); + return 0; } +EXPORT_SYMBOL_GPL(mt76x0_init_hardware); -struct mt76x0_dev *mt76x0_alloc_device(struct device *pdev) +struct mt76x02_dev * +mt76x0_alloc_device(struct device *pdev, + const struct mt76_driver_ops *drv_ops, + const struct ieee80211_ops *ops) { - struct ieee80211_hw *hw; - struct mt76x0_dev *dev; + struct mt76x02_dev *dev; + struct mt76_dev *mdev; - hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x0_ops); - if (!hw) + mdev = mt76_alloc_device(sizeof(*dev), ops); + if (!mdev) return NULL; - dev = hw->priv; - dev->mt76.dev = pdev; - dev->mt76.hw = hw; - mutex_init(&dev->usb_ctrl_mtx); - mutex_init(&dev->reg_atomic_mutex); - mutex_init(&dev->hw_atomic_mutex); - mutex_init(&dev->mutex); - spin_lock_init(&dev->tx_lock); - spin_lock_init(&dev->rx_lock); - spin_lock_init(&dev->mt76.lock); - spin_lock_init(&dev->mac_lock); - spin_lock_init(&dev->con_mon_lock); - atomic_set(&dev->avg_ampdu_len, 1); - skb_queue_head_init(&dev->tx_skb_done); + mdev->dev = pdev; + mdev->drv = drv_ops; - dev->stat_wq = alloc_workqueue("mt76x0", WQ_UNBOUND, 0); - if (!dev->stat_wq) { - ieee80211_free_hw(hw); - return NULL; - } + dev = container_of(mdev, struct mt76x02_dev, mt76); + mutex_init(&dev->phy_mutex); + atomic_set(&dev->avg_ampdu_len, 1); return dev; } +EXPORT_SYMBOL_GPL(mt76x0_alloc_device); -#define CHAN2G(_idx, _freq) { \ - .band = NL80211_BAND_2GHZ, \ - .center_freq = (_freq), \ - .hw_value = (_idx), \ - .max_power = 30, \ -} - -static const struct ieee80211_channel mt76_channels_2ghz[] = { - CHAN2G(1, 2412), - CHAN2G(2, 2417), - CHAN2G(3, 2422), - CHAN2G(4, 2427), - CHAN2G(5, 2432), - CHAN2G(6, 2437), - CHAN2G(7, 2442), - CHAN2G(8, 2447), - CHAN2G(9, 2452), - CHAN2G(10, 2457), - CHAN2G(11, 2462), - CHAN2G(12, 2467), - CHAN2G(13, 2472), - CHAN2G(14, 2484), -}; - -#define CHAN5G(_idx, _freq) { \ - .band = NL80211_BAND_5GHZ, \ - .center_freq = (_freq), \ - .hw_value = (_idx), \ - .max_power = 30, \ -} - -static const struct ieee80211_channel mt76_channels_5ghz[] = { - CHAN5G(36, 5180), - CHAN5G(40, 5200), - CHAN5G(44, 5220), - CHAN5G(46, 5230), - CHAN5G(48, 5240), - CHAN5G(52, 5260), - CHAN5G(56, 5280), - CHAN5G(60, 5300), - CHAN5G(64, 5320), - - CHAN5G(100, 5500), - CHAN5G(104, 5520), - CHAN5G(108, 5540), - CHAN5G(112, 5560), - CHAN5G(116, 5580), - CHAN5G(120, 5600), - CHAN5G(124, 5620), - CHAN5G(128, 5640), - CHAN5G(132, 5660), - CHAN5G(136, 5680), - CHAN5G(140, 5700), -}; - -#define CCK_RATE(_idx, _rate) { \ - .bitrate = _rate, \ - .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ - .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \ - .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \ -} - -#define OFDM_RATE(_idx, _rate) { \ - .bitrate = _rate, \ - .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \ - .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \ -} - -static struct ieee80211_rate mt76_rates[] = { - CCK_RATE(0, 10), - CCK_RATE(1, 20), - CCK_RATE(2, 55), - CCK_RATE(3, 110), - OFDM_RATE(0, 60), - OFDM_RATE(1, 90), - OFDM_RATE(2, 120), - OFDM_RATE(3, 180), - OFDM_RATE(4, 240), - OFDM_RATE(5, 360), - OFDM_RATE(6, 480), - OFDM_RATE(7, 540), -}; - -static int -mt76_init_sband(struct mt76x0_dev *dev, struct ieee80211_supported_band *sband, - const struct ieee80211_channel *chan, int n_chan, - struct ieee80211_rate *rates, int n_rates) -{ - struct ieee80211_sta_ht_cap *ht_cap; - void *chanlist; - int size; - - size = n_chan * sizeof(*chan); - chanlist = devm_kmemdup(dev->mt76.dev, chan, size, GFP_KERNEL); - if (!chanlist) - return -ENOMEM; - - sband->channels = chanlist; - sband->n_channels = n_chan; - sband->bitrates = rates; - sband->n_bitrates = n_rates; - - ht_cap = &sband->ht_cap; - ht_cap->ht_supported = true; - ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | - IEEE80211_HT_CAP_GRN_FLD | - IEEE80211_HT_CAP_SGI_20 | - IEEE80211_HT_CAP_SGI_40 | - (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); - - ht_cap->mcs.rx_mask[0] = 0xff; - ht_cap->mcs.rx_mask[4] = 0x1; - ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; - ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; - ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2; - - return 0; -} - -static int -mt76_init_sband_2g(struct mt76x0_dev *dev) -{ - dev->mt76.hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->mt76.sband_2g.sband; - - WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num > - ARRAY_SIZE(mt76_channels_2ghz)); - - - return mt76_init_sband(dev, &dev->mt76.sband_2g.sband, - mt76_channels_2ghz, ARRAY_SIZE(mt76_channels_2ghz), - mt76_rates, ARRAY_SIZE(mt76_rates)); -} - -static int -mt76_init_sband_5g(struct mt76x0_dev *dev) -{ - dev->mt76.hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->mt76.sband_5g.sband; - - return mt76_init_sband(dev, &dev->mt76.sband_5g.sband, - mt76_channels_5ghz, ARRAY_SIZE(mt76_channels_5ghz), - mt76_rates + 4, ARRAY_SIZE(mt76_rates) - 4); -} - - -int mt76x0_register_device(struct mt76x0_dev *dev) +int mt76x0_register_device(struct mt76x02_dev *dev) { - struct ieee80211_hw *hw = dev->mt76.hw; + struct mt76_dev *mdev = &dev->mt76; + struct ieee80211_hw *hw = mdev->hw; struct wiphy *wiphy = hw->wiphy; int ret; /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to * entry no. 1 like it does in the vendor driver. */ - dev->wcid_mask[0] |= 1; + mdev->wcid_mask[0] |= 1; /* init fake wcid for monitor interfaces */ - dev->mon_wcid = devm_kmalloc(dev->mt76.dev, sizeof(*dev->mon_wcid), - GFP_KERNEL); - if (!dev->mon_wcid) - return -ENOMEM; - dev->mon_wcid->idx = 0xff; - dev->mon_wcid->hw_key_idx = -1; + mdev->global_wcid.idx = 0xff; + mdev->global_wcid.hw_key_idx = -1; - SET_IEEE80211_DEV(hw, dev->mt76.dev); + /* init antenna configuration */ + mdev->antenna_mask = 1; hw->queues = 4; - ieee80211_hw_set(hw, SIGNAL_DBM); - ieee80211_hw_set(hw, PS_NULLFUNC_STACK); - ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); - ieee80211_hw_set(hw, AMPDU_AGGREGATION); - ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); hw->max_rates = 1; hw->max_report_rates = 7; hw->max_rate_tries = 1; + hw->extra_tx_headroom = sizeof(struct mt76x02_txwi) + 4 + 2; - hw->sta_data_size = sizeof(struct mt76_sta); - hw->vif_data_size = sizeof(struct mt76_vif); - - SET_IEEE80211_PERM_ADDR(hw, dev->macaddr); + hw->sta_data_size = sizeof(struct mt76x02_sta); + hw->vif_data_size = sizeof(struct mt76x02_vif); - wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); - if (dev->ee->has_2ghz) { - ret = mt76_init_sband_2g(dev); - if (ret) - return ret; - } - - if (dev->ee->has_5ghz) { - ret = mt76_init_sband_5g(dev); - if (ret) - return ret; - } - - dev->mt76.chandef.chan = &dev->mt76.sband_2g.sband.channels[0]; - INIT_DELAYED_WORK(&dev->mac_work, mt76x0_mac_work); - INIT_DELAYED_WORK(&dev->stat_work, mt76x0_tx_stat); - ret = ieee80211_register_hw(hw); + ret = mt76_register_device(mdev, true, mt76x02_rates, + ARRAY_SIZE(mt76x02_rates)); if (ret) return ret; + /* overwrite unsupported features */ + if (mdev->cap.has_5ghz) + mt76x0_vht_cap_mask(&dev->mt76.sband_5g.sband); + mt76x0_init_debugfs(dev); return 0; } +EXPORT_SYMBOL_GPL(mt76x0_register_device); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h index 24afcfd94b4e..236dce6860b4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h @@ -2,6 +2,7 @@ * (c) Copyright 2002-2010, Ralink Technology, Inc. * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 @@ -19,264 +20,215 @@ #include "phy.h" static const struct mt76_reg_pair common_mac_reg_table[] = { -#if 1 - {MT_BCN_OFFSET(0), 0xf8f0e8e0}, /* 0x3800(e0), 0x3A00(e8), 0x3C00(f0), 0x3E00(f8), 512B for each beacon */ - {MT_BCN_OFFSET(1), 0x6f77d0c8}, /* 0x3200(c8), 0x3400(d0), 0x1DC0(77), 0x1BC0(6f), 512B for each beacon */ -#endif - - {MT_LEGACY_BASIC_RATE, 0x0000013f}, /* Basic rate set bitmap*/ - {MT_HT_BASIC_RATE, 0x00008003}, /* Basic HT rate set , 20M, MCS=3, MM. Format is the same as in TXWI.*/ - {MT_MAC_SYS_CTRL, 0x00}, /* 0x1004, , default Disable RX*/ - {MT_RX_FILTR_CFG, 0x17f97}, /*0x1400 , RX filter control, */ - {MT_BKOFF_SLOT_CFG, 0x209}, /* default set short slot time, CC_DELAY_TIME should be 2 */ - /*{TX_SW_CFG0, 0x40a06}, Gary,2006-08-23 */ - {MT_TX_SW_CFG0, 0x0}, /* Gary,2008-05-21 for CWC test */ - {MT_TX_SW_CFG1, 0x80606}, /* Gary,2006-08-23 */ - {MT_TX_LINK_CFG, 0x1020}, /* Gary,2006-08-23 */ - /*{TX_TIMEOUT_CFG, 0x00182090}, CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT*/ - {MT_TX_TIMEOUT_CFG, 0x000a2090}, /* CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT , Modify for 2860E ,2007-08-01*/ - {MT_MAX_LEN_CFG, 0xa0fff | 0x00001000}, /* 0x3018, MAX frame length. Max PSDU = 16kbytes.*/ - {MT_LED_CFG, 0x7f031e46}, /* Gary, 2006-08-23*/ - - {MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f /*0xbfbf3f1f*/}, - {MT_PBF_RX_MAX_PCNT, 0x9f}, - - /*{TX_RTY_CFG, 0x6bb80408}, Jan, 2006/11/16*/ -/* WMM_ACM_SUPPORT */ -/* {TX_RTY_CFG, 0x6bb80101}, sample*/ - {MT_TX_RETRY_CFG, 0x47d01f0f}, /* Jan, 2006/11/16, Set TxWI->ACK =0 in Probe Rsp Modify for 2860E ,2007-08-03*/ - - {MT_AUTO_RSP_CFG, 0x00000013}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/ - {MT_CCK_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */ - {MT_OFDM_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */ - {MT_PBF_CFG, 0xf40006}, /* Only enable Queue 2*/ - {MT_MM40_PROT_CFG, 0x3F44084}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/ - {MT_WPDMA_GLO_CFG, 0x00000030}, - {MT_GF20_PROT_CFG, 0x01744004}, /* set 19:18 --> Short NAV for MIMO PS*/ - {MT_GF40_PROT_CFG, 0x03F44084}, - {MT_MM20_PROT_CFG, 0x01744004}, - {MT_TXOP_CTRL_CFG, 0x0000583f, /*0x0000243f*/ /*0x000024bf*/}, /*Extension channel backoff.*/ - {MT_TX_RTS_CFG, 0x00092b20}, - - {MT_EXP_ACK_TIME, 0x002400ca}, /* default value */ - {MT_TXOP_HLDR_ET, 0x00000002}, - - /* Jerry comments 2008/01/16: we use SIFS = 10us in CCK defaultly, but it seems that 10us - is too small for INTEL 2200bg card, so in MBSS mode, the delta time between beacon0 - and beacon1 is SIFS (10us), so if INTEL 2200bg card connects to BSS0, the ping - will always lost. So we change the SIFS of CCK from 10us to 16us. */ - {MT_XIFS_TIME_CFG, 0x33a41010}, - {MT_PWR_PIN_CFG, 0x00000000}, + { MT_BCN_OFFSET(0), 0xf8f0e8e0 }, + { MT_BCN_OFFSET(1), 0x6f77d0c8 }, + { MT_LEGACY_BASIC_RATE, 0x0000013f }, + { MT_HT_BASIC_RATE, 0x00008003 }, + { MT_MAC_SYS_CTRL, 0x00000000 }, + { MT_RX_FILTR_CFG, 0x00017f97 }, + { MT_BKOFF_SLOT_CFG, 0x00000209 }, + { MT_TX_SW_CFG0, 0x00000000 }, + { MT_TX_SW_CFG1, 0x00080606 }, + { MT_TX_LINK_CFG, 0x00001020 }, + { MT_TX_TIMEOUT_CFG, 0x000a2090 }, + { MT_MAX_LEN_CFG, 0xa0fff | 0x00001000 }, + { MT_LED_CFG, 0x7f031e46 }, + { MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f }, + { MT_PBF_RX_MAX_PCNT, 0x0000fe9f }, + { MT_TX_RETRY_CFG, 0x47d01f0f }, + { MT_AUTO_RSP_CFG, 0x00000013 }, + { MT_CCK_PROT_CFG, 0x05740003 }, + { MT_OFDM_PROT_CFG, 0x05740003 }, + { MT_PBF_CFG, 0x00f40006 }, + { MT_WPDMA_GLO_CFG, 0x00000030 }, + { MT_GF20_PROT_CFG, 0x01744004 }, + { MT_GF40_PROT_CFG, 0x03f44084 }, + { MT_MM20_PROT_CFG, 0x01744004 }, + { MT_MM40_PROT_CFG, 0x03f54084 }, + { MT_TXOP_CTRL_CFG, 0x0000583f }, + { MT_TX_RTS_CFG, 0x00092b20 }, + { MT_EXP_ACK_TIME, 0x002400ca }, + { MT_TXOP_HLDR_ET, 0x00000002 }, + { MT_XIFS_TIME_CFG, 0x33a41010 }, + { MT_PWR_PIN_CFG, 0x00000000 }, }; static const struct mt76_reg_pair mt76x0_mac_reg_table[] = { - /* {MT_IOCFG_6, 0xA0040080 }, */ - {MT_PBF_SYS_CTRL, 0x00080c00 }, - {MT_PBF_CFG, 0x77723c1f }, - {MT_FCE_PSE_CTRL, 0x00000001 }, - - {MT_AMPDU_MAX_LEN_20M1S, 0xBAA99887 }, - - /* Delay bb_tx_pe for proper tx_mcs_pwr update */ - {MT_TX_SW_CFG0, 0x00000601 }, - - /* Set rf_tx_pe deassert time to 1us by Chee's comment @MT7650_CR_setting_1018.xlsx */ - {MT_TX_SW_CFG1, 0x00040000 }, - {MT_TX_SW_CFG2, 0x00000000 }, - - /* disable Tx info report */ - {0xa44, 0x0000000 }, - - {MT_HEADER_TRANS_CTRL_REG, 0x0}, - {MT_TSO_CTRL, 0x0}, - - /* BB_PA_MODE_CFG0(0x1214) Keep default value @20120903 */ - {MT_BB_PA_MODE_CFG1, 0x00500055}, - - /* RF_PA_MODE_CFG0(0x121C) Keep default value @20120903 */ - {MT_RF_PA_MODE_CFG1, 0x00500055}, - - {MT_TX_ALC_CFG_0, 0x2F2F000C}, - {MT_TX0_BB_GAIN_ATTEN, 0x00000000}, /* set BBP atten gain = 0 */ - - {MT_TX_PWR_CFG_0, 0x3A3A3A3A}, - {MT_TX_PWR_CFG_1, 0x3A3A3A3A}, - {MT_TX_PWR_CFG_2, 0x3A3A3A3A}, - {MT_TX_PWR_CFG_3, 0x3A3A3A3A}, - {MT_TX_PWR_CFG_4, 0x3A3A3A3A}, - {MT_TX_PWR_CFG_7, 0x3A3A3A3A}, - {MT_TX_PWR_CFG_8, 0x3A}, - {MT_TX_PWR_CFG_9, 0x3A}, - /* Enable Tx length > 4095 byte */ - {0x150C, 0x00000002}, - - /* Disable bt_abort_tx_en(0x1238[21] = 0) which is not used at MT7650 */ - {0x1238, 0x001700C8}, - /* PMU_OCLEVEL<5:1> from default <5'b10010> to <5'b11011> for normal driver */ - /* {MT_LDO_CTRL_0, 0x00A647B6}, */ - - /* Default LDO_DIG supply 1.26V, change to 1.2V */ - {MT_LDO_CTRL_1, 0x6B006464 }, -/* - {MT_HT_BASIC_RATE, 0x00004003 }, - {MT_HT_CTRL_CFG, 0x000001FF }, -*/ + { MT_IOCFG_6, 0xa0040080 }, + { MT_PBF_SYS_CTRL, 0x00080c00 }, + { MT_PBF_CFG, 0x77723c1f }, + { MT_FCE_PSE_CTRL, 0x00000001 }, + { MT_AMPDU_MAX_LEN_20M1S, 0xAAA99887 }, + { MT_TX_SW_CFG0, 0x00000601 }, + { MT_TX_SW_CFG1, 0x00040000 }, + { MT_TX_SW_CFG2, 0x00000000 }, + { 0xa44, 0x00000000 }, + { MT_HEADER_TRANS_CTRL_REG, 0x00000000 }, + { MT_TSO_CTRL, 0x00000000 }, + { MT_BB_PA_MODE_CFG1, 0x00500055 }, + { MT_RF_PA_MODE_CFG1, 0x00500055 }, + { MT_TX_ALC_CFG_0, 0x2F2F000C }, + { MT_TX0_BB_GAIN_ATTEN, 0x00000000 }, + { MT_TX_PWR_CFG_0, 0x3A3A3A3A }, + { MT_TX_PWR_CFG_1, 0x3A3A3A3A }, + { MT_TX_PWR_CFG_2, 0x3A3A3A3A }, + { MT_TX_PWR_CFG_3, 0x3A3A3A3A }, + { MT_TX_PWR_CFG_4, 0x3A3A3A3A }, + { MT_TX_PWR_CFG_7, 0x3A3A3A3A }, + { MT_TX_PWR_CFG_8, 0x0000003A }, + { MT_TX_PWR_CFG_9, 0x0000003A }, + { 0x150C, 0x00000002 }, + { 0x1238, 0x001700C8 }, + { MT_LDO_CTRL_0, 0x00A647B6 }, + { MT_LDO_CTRL_1, 0x6B006464 }, + { MT_HT_BASIC_RATE, 0x00004003 }, + { MT_HT_CTRL_CFG, 0x000001FF }, + { MT_TXOP_HLDR_ET, 0x00000000 }, + { MT_PN_PAD_MODE, 0x00000003 }, }; - static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = { - {MT_BBP(CORE, 1), 0x00000002}, - {MT_BBP(CORE, 4), 0x00000000}, - {MT_BBP(CORE, 24), 0x00000000}, - {MT_BBP(CORE, 32), 0x4003000a}, - {MT_BBP(CORE, 42), 0x00000000}, - {MT_BBP(CORE, 44), 0x00000000}, - - {MT_BBP(IBI, 11), 0x00000080}, - - /* - 0x2300[5] Default Antenna: - 0 for WIFI main antenna - 1 for WIFI aux antenna - - */ - {MT_BBP(AGC, 0), 0x00021400}, - {MT_BBP(AGC, 1), 0x00000003}, - {MT_BBP(AGC, 2), 0x003A6464}, - {MT_BBP(AGC, 15), 0x88A28CB8}, - {MT_BBP(AGC, 22), 0x00001E21}, - {MT_BBP(AGC, 23), 0x0000272C}, - {MT_BBP(AGC, 24), 0x00002F3A}, - {MT_BBP(AGC, 25), 0x8000005A}, - {MT_BBP(AGC, 26), 0x007C2005}, - {MT_BBP(AGC, 34), 0x000A0C0C}, - {MT_BBP(AGC, 37), 0x2121262C}, - {MT_BBP(AGC, 41), 0x38383E45}, - {MT_BBP(AGC, 57), 0x00001010}, - {MT_BBP(AGC, 59), 0xBAA20E96}, - {MT_BBP(AGC, 63), 0x00000001}, - - {MT_BBP(TXC, 0), 0x00280403}, - {MT_BBP(TXC, 1), 0x00000000}, - - {MT_BBP(RXC, 1), 0x00000012}, - {MT_BBP(RXC, 2), 0x00000011}, - {MT_BBP(RXC, 3), 0x00000005}, - {MT_BBP(RXC, 4), 0x00000000}, - {MT_BBP(RXC, 5), 0xF977C4EC}, - {MT_BBP(RXC, 7), 0x00000090}, - - {MT_BBP(TXO, 8), 0x00000000}, - - {MT_BBP(TXBE, 0), 0x00000000}, - {MT_BBP(TXBE, 4), 0x00000004}, - {MT_BBP(TXBE, 6), 0x00000000}, - {MT_BBP(TXBE, 8), 0x00000014}, - {MT_BBP(TXBE, 9), 0x20000000}, - {MT_BBP(TXBE, 10), 0x00000000}, - {MT_BBP(TXBE, 12), 0x00000000}, - {MT_BBP(TXBE, 13), 0x00000000}, - {MT_BBP(TXBE, 14), 0x00000000}, - {MT_BBP(TXBE, 15), 0x00000000}, - {MT_BBP(TXBE, 16), 0x00000000}, - {MT_BBP(TXBE, 17), 0x00000000}, - - {MT_BBP(RXFE, 1), 0x00008800}, /* Add for E3 */ - {MT_BBP(RXFE, 3), 0x00000000}, - {MT_BBP(RXFE, 4), 0x00000000}, - - {MT_BBP(RXO, 13), 0x00000092}, - {MT_BBP(RXO, 14), 0x00060612}, - {MT_BBP(RXO, 15), 0xC8321B18}, - {MT_BBP(RXO, 16), 0x0000001E}, - {MT_BBP(RXO, 17), 0x00000000}, - {MT_BBP(RXO, 18), 0xCC00A993}, - {MT_BBP(RXO, 19), 0xB9CB9CB9}, - {MT_BBP(RXO, 20), 0x26c00057}, - {MT_BBP(RXO, 21), 0x00000001}, - {MT_BBP(RXO, 24), 0x00000006}, + { MT_BBP(CORE, 1), 0x00000002 }, + { MT_BBP(CORE, 4), 0x00000000 }, + { MT_BBP(CORE, 24), 0x00000000 }, + { MT_BBP(CORE, 32), 0x4003000a }, + { MT_BBP(CORE, 42), 0x00000000 }, + { MT_BBP(CORE, 44), 0x00000000 }, + { MT_BBP(IBI, 11), 0x0FDE8081 }, + { MT_BBP(AGC, 0), 0x00021400 }, + { MT_BBP(AGC, 1), 0x00000003 }, + { MT_BBP(AGC, 2), 0x003A6464 }, + { MT_BBP(AGC, 15), 0x88A28CB8 }, + { MT_BBP(AGC, 22), 0x00001E21 }, + { MT_BBP(AGC, 23), 0x0000272C }, + { MT_BBP(AGC, 24), 0x00002F3A }, + { MT_BBP(AGC, 25), 0x8000005A }, + { MT_BBP(AGC, 26), 0x007C2005 }, + { MT_BBP(AGC, 33), 0x00003238 }, + { MT_BBP(AGC, 34), 0x000A0C0C }, + { MT_BBP(AGC, 37), 0x2121262C }, + { MT_BBP(AGC, 41), 0x38383E45 }, + { MT_BBP(AGC, 57), 0x00001010 }, + { MT_BBP(AGC, 59), 0xBAA20E96 }, + { MT_BBP(AGC, 63), 0x00000001 }, + { MT_BBP(TXC, 0), 0x00280403 }, + { MT_BBP(TXC, 1), 0x00000000 }, + { MT_BBP(RXC, 1), 0x00000012 }, + { MT_BBP(RXC, 2), 0x00000011 }, + { MT_BBP(RXC, 3), 0x00000005 }, + { MT_BBP(RXC, 4), 0x00000000 }, + { MT_BBP(RXC, 5), 0xF977C4EC }, + { MT_BBP(RXC, 7), 0x00000090 }, + { MT_BBP(TXO, 8), 0x00000000 }, + { MT_BBP(TXBE, 0), 0x00000000 }, + { MT_BBP(TXBE, 4), 0x00000004 }, + { MT_BBP(TXBE, 6), 0x00000000 }, + { MT_BBP(TXBE, 8), 0x00000014 }, + { MT_BBP(TXBE, 9), 0x20000000 }, + { MT_BBP(TXBE, 10), 0x00000000 }, + { MT_BBP(TXBE, 12), 0x00000000 }, + { MT_BBP(TXBE, 13), 0x00000000 }, + { MT_BBP(TXBE, 14), 0x00000000 }, + { MT_BBP(TXBE, 15), 0x00000000 }, + { MT_BBP(TXBE, 16), 0x00000000 }, + { MT_BBP(TXBE, 17), 0x00000000 }, + { MT_BBP(RXFE, 1), 0x00008800 }, + { MT_BBP(RXFE, 3), 0x00000000 }, + { MT_BBP(RXFE, 4), 0x00000000 }, + { MT_BBP(RXO, 13), 0x00000192 }, + { MT_BBP(RXO, 14), 0x00060612 }, + { MT_BBP(RXO, 15), 0xC8321B18 }, + { MT_BBP(RXO, 16), 0x0000001E }, + { MT_BBP(RXO, 17), 0x00000000 }, + { MT_BBP(RXO, 18), 0xCC00A993 }, + { MT_BBP(RXO, 19), 0xB9CB9CB9 }, + { MT_BBP(RXO, 20), 0x26c00057 }, + { MT_BBP(RXO, 21), 0x00000001 }, + { MT_BBP(RXO, 24), 0x00000006 }, + { MT_BBP(RXO, 28), 0x0000003F }, }; static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = { - {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 8), 0x0E344EF0}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 8), 0x122C54F2}}, + { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 4), 0x1FEDA049 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 4), 0x1FECA054 } }, + + { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 6), 0x00000045 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 6), 0x0000000A } }, - {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 14), 0x310F2E39}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 14), 0x310F2A3F}}, + { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 8), 0x16344EF0 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 8), 0x122C54F2 } }, - {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 32), 0x00003230}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 32), 0x0000181C}}, + { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 12), 0x05052879 } }, + { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 12), 0x050528F9 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 12), 0x050528F9 } }, - {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 33), 0x00003240}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 33), 0x00003218}}, + { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 13), 0x35050004 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 13), 0x2C3A0406 } }, - {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 35), 0x11112016}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 35), 0x11112016}}, + { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 14), 0x310F2E3C } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 14), 0x310F2A3F } }, - {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXO, 28), 0x0000008A}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXO, 28), 0x0000008A}}, + { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 26), 0x007C2005 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 26), 0x007C2005 } }, - {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 4), 0x1FEDA049}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 4), 0x1FECA054}}, + { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 27), 0x000000E1 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 27), 0x000000EC } }, - {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 6), 0x00000045}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 6), 0x0000000A}}, + { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 28), 0x00060806 } }, + { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 28), 0x00050806 } }, + { RF_A_BAND | RF_BW_40, { MT_BBP(AGC, 28), 0x00060801 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_80, { MT_BBP(AGC, 28), 0x00060806 } }, - {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 12), 0x05052879}}, - {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 12), 0x050528F9}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 12), 0x050528F9}}, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(RXO, 28), 0x0000008A } }, - {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 13), 0x35050004}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 13), 0x2C3A0406}}, + { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 31), 0x00000E23 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 31), 0x00000E13 } }, - {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 27), 0x000000E1}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 27), 0x000000EC}}, + { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 32), 0x00003218 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 32), 0x0000181C } }, - {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 28), 0x00060806}}, - {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00050806}}, - {RF_A_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00060801}}, - {RF_A_BAND | RF_BW_20 | RF_BW_80, {MT_BBP(AGC, 28), 0x00060806}}, + { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 33), 0x00003240 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 33), 0x00003218 } }, - {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 31), 0x00000F23}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 31), 0x00000F13}}, + { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 35), 0x11111616 } }, + { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 35), 0x11111516 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 35), 0x11111111 } }, - {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 39), 0x2A2A3036}}, - {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A2C36}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A3036}}, - {RF_A_BAND | RF_BW_80, {MT_BBP(AGC, 39), 0x2A2A2A36}}, + { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 39), 0x2A2A3036 } }, + { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 39), 0x2A2A2C36 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 39), 0x2A2A2A2A } }, - {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 43), 0x27273438}}, - {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 43), 0x27272D38}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 43), 0x27272B30}}, + { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 43), 0x27273438 } }, + { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 43), 0x27272D38 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 43), 0x27271A1A } }, - {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 51), 0x17171C1C}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 51), 0xFFFFFFFF}}, + { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 51), 0x17171C1C } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 51), 0xFFFFFFFF } }, - {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 53), 0x26262A2F}}, - {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 53), 0x2626322F}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 53), 0xFFFFFFFF}}, + { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 53), 0x26262A2F } }, + { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 53), 0x2626322F } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 53), 0xFFFFFFFF } }, - {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 55), 0x40404E58}}, - {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 55), 0x40405858}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 55), 0xFFFFFFFF}}, + { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 55), 0x40404040 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 55), 0xFFFFFFFF } }, - {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 58), 0x00001010}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 58), 0x00000000}}, + { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 58), 0x00001010 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 58), 0x00000000 } }, - {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXFE, 0), 0x3D5000E0}}, - {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXFE, 0), 0x895000E0}}, + { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(RXFE, 0), 0x3D5000E0 } }, + { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(RXFE, 0), 0x895000E0 } }, }; static const struct mt76_reg_pair mt76x0_dcoc_tab[] = { - {MT_BBP(CAL, 47), 0x000010F0 }, - {MT_BBP(CAL, 48), 0x00008080 }, - {MT_BBP(CAL, 49), 0x00000F07 }, - {MT_BBP(CAL, 50), 0x00000040 }, - {MT_BBP(CAL, 51), 0x00000404 }, - {MT_BBP(CAL, 52), 0x00080803 }, - {MT_BBP(CAL, 53), 0x00000704 }, - {MT_BBP(CAL, 54), 0x00002828 }, - {MT_BBP(CAL, 55), 0x00005050 }, + { MT_BBP(CAL, 47), 0x000010F0 }, + { MT_BBP(CAL, 48), 0x00008080 }, + { MT_BBP(CAL, 49), 0x00000F07 }, + { MT_BBP(CAL, 50), 0x00000040 }, + { MT_BBP(CAL, 51), 0x00000404 }, + { MT_BBP(CAL, 52), 0x00080803 }, + { MT_BBP(CAL, 53), 0x00000704 }, + { MT_BBP(CAL, 54), 0x00002828 }, + { MT_BBP(CAL, 55), 0x00005050 }, }; #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c index 91a84be36d3b..7a422c590211 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c @@ -13,241 +13,13 @@ * GNU General Public License for more details. */ -#include "mt76x0.h" -#include "trace.h" #include <linux/etherdevice.h> -static void -mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate, - enum nl80211_band band) -{ - u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); - - txrate->idx = 0; - txrate->flags = 0; - txrate->count = 1; - - switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) { - case MT_PHY_TYPE_OFDM: - if (band == NL80211_BAND_2GHZ) - idx += 4; - - txrate->idx = idx; - return; - case MT_PHY_TYPE_CCK: - if (idx >= 8) - idx -= 8; - - txrate->idx = idx; - return; - case MT_PHY_TYPE_HT_GF: - txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD; - /* fall through */ - case MT_PHY_TYPE_HT: - txrate->flags |= IEEE80211_TX_RC_MCS; - txrate->idx = idx; - break; - case MT_PHY_TYPE_VHT: - txrate->flags |= IEEE80211_TX_RC_VHT_MCS; - txrate->idx = idx; - break; - default: - WARN_ON(1); - return; - } - - switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) { - case MT_PHY_BW_20: - break; - case MT_PHY_BW_40: - txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; - break; - case MT_PHY_BW_80: - txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; - break; - default: - WARN_ON(1); - return; - } - - if (rate & MT_RXWI_RATE_SGI) - txrate->flags |= IEEE80211_TX_RC_SHORT_GI; -} - -static void -mt76_mac_fill_tx_status(struct mt76x0_dev *dev, struct ieee80211_tx_info *info, - struct mt76_tx_status *st, int n_frames) -{ - struct ieee80211_tx_rate *rate = info->status.rates; - int cur_idx, last_rate; - int i; - - if (!n_frames) - return; - - last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1); - mt76_mac_process_tx_rate(&rate[last_rate], st->rate, - dev->mt76.chandef.chan->band); - if (last_rate < IEEE80211_TX_MAX_RATES - 1) - rate[last_rate + 1].idx = -1; - - cur_idx = rate[last_rate].idx + last_rate; - for (i = 0; i <= last_rate; i++) { - rate[i].flags = rate[last_rate].flags; - rate[i].idx = max_t(int, 0, cur_idx - i); - rate[i].count = 1; - } - - rate[last_rate - 1].count = st->retry + 1 - last_rate; - - info->status.ampdu_len = n_frames; - info->status.ampdu_ack_len = st->success ? n_frames : 0; - - if (st->pktid & MT_TXWI_PKTID_PROBE) - info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; - - if (st->aggr) - info->flags |= IEEE80211_TX_CTL_AMPDU | - IEEE80211_TX_STAT_AMPDU; - - if (!st->ack_req) - info->flags |= IEEE80211_TX_CTL_NO_ACK; - else if (st->success) - info->flags |= IEEE80211_TX_STAT_ACK; -} - -u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev, - const struct ieee80211_tx_rate *rate, u8 *nss_val) -{ - u16 rateval; - u8 phy, rate_idx; - u8 nss = 1; - u8 bw = 0; - - if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { - rate_idx = rate->idx; - nss = 1 + (rate->idx >> 4); - phy = MT_PHY_TYPE_VHT; - if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) - bw = 2; - else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - bw = 1; - } else if (rate->flags & IEEE80211_TX_RC_MCS) { - rate_idx = rate->idx; - nss = 1 + (rate->idx >> 3); - phy = MT_PHY_TYPE_HT; - if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) - phy = MT_PHY_TYPE_HT_GF; - if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - bw = 1; - } else { - const struct ieee80211_rate *r; - int band = dev->mt76.chandef.chan->band; - u16 val; - - r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx]; - if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) - val = r->hw_value_short; - else - val = r->hw_value; - - phy = val >> 8; - rate_idx = val & 0xff; - bw = 0; - } - - rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx); - rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy); - rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw); - if (rate->flags & IEEE80211_TX_RC_SHORT_GI) - rateval |= MT_RXWI_RATE_SGI; - - *nss_val = nss; - return cpu_to_le16(rateval); -} - -void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid, - const struct ieee80211_tx_rate *rate) -{ - unsigned long flags; - - spin_lock_irqsave(&dev->mt76.lock, flags); - wcid->tx_rate = mt76x0_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss); - wcid->tx_rate_set = true; - spin_unlock_irqrestore(&dev->mt76.lock, flags); -} - -struct mt76_tx_status mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev) -{ - struct mt76_tx_status stat = {}; - u32 stat2, stat1; - - stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT); - stat1 = mt76_rr(dev, MT_TX_STAT_FIFO); - - stat.valid = !!(stat1 & MT_TX_STAT_FIFO_VALID); - stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS); - stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR); - stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ); - stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1); - stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1); - - stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2); - stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2); - - return stat; -} - -void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update) -{ - struct ieee80211_tx_info info = {}; - struct ieee80211_sta *sta = NULL; - struct mt76_wcid *wcid = NULL; - struct mt76_sta *msta = NULL; - - rcu_read_lock(); - if (stat->wcid < ARRAY_SIZE(dev->wcid)) - wcid = rcu_dereference(dev->wcid[stat->wcid]); - - if (wcid) { - void *priv; - priv = msta = container_of(wcid, struct mt76_sta, wcid); - sta = container_of(priv, struct ieee80211_sta, drv_priv); - } - - if (msta && stat->aggr) { - u32 stat_val, stat_cache; - - stat_val = stat->rate; - stat_val |= ((u32) stat->retry) << 16; - stat_cache = msta->status.rate; - stat_cache |= ((u32) msta->status.retry) << 16; - - if (*update == 0 && stat_val == stat_cache && - stat->wcid == msta->status.wcid && msta->n_frames < 32) { - msta->n_frames++; - goto out; - } - - mt76_mac_fill_tx_status(dev, &info, &msta->status, - msta->n_frames); - msta->status = *stat; - msta->n_frames = 1; - *update = 0; - } else { - mt76_mac_fill_tx_status(dev, &info, stat, 1); - *update = 1; - } - - spin_lock_bh(&dev->mac_lock); - ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info); - spin_unlock_bh(&dev->mac_lock); -out: - rcu_read_unlock(); -} +#include "mt76x0.h" +#include "trace.h" -void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot, - int ht_mode) +void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot, + int ht_mode) { int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION; bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); @@ -305,7 +77,7 @@ void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot, mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]); } -void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb) +void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb) { if (short_preamb) mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); @@ -313,7 +85,7 @@ void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb) mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); } -void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval) +void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval) { u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG); @@ -333,7 +105,7 @@ void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval) MT_BEACON_TIME_CFG_TBTT_EN; } -static void mt76x0_check_mac_err(struct mt76x0_dev *dev) +static void mt76x0_check_mac_err(struct mt76x02_dev *dev) { u32 val = mt76_rr(dev, 0x10f4); @@ -348,15 +120,15 @@ static void mt76x0_check_mac_err(struct mt76x0_dev *dev) } void mt76x0_mac_work(struct work_struct *work) { - struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev, + struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, mac_work.work); struct { u32 addr_base; u32 span; u64 *stat_base; } spans[] = { - { MT_RX_STA_CNT0, 3, dev->stats.rx_stat }, - { MT_TX_STA_CNT0, 3, dev->stats.tx_stat }, + { MT_RX_STAT_0, 3, dev->stats.rx_stat }, + { MT_TX_STA_0, 3, dev->stats.tx_stat }, { MT_TX_AGG_STAT, 1, dev->stats.aggr_stat }, { MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del }, { MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] }, @@ -399,24 +171,7 @@ void mt76x0_mac_work(struct work_struct *work) ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ); } -void -mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac) -{ - u8 zmac[ETH_ALEN] = {}; - u32 attr; - - attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) | - FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8)); - - mt76_wr(dev, MT_WCID_ATTR(idx), attr); - - if (mac) - memcpy(zmac, mac, sizeof(zmac)); - - mt76x0_addr_wr(dev, MT_WCID_ADDR(idx), zmac); -} - -void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev) +void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev) { struct ieee80211_sta *sta; struct mt76_wcid *wcid; @@ -425,12 +180,12 @@ void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev) int i; rcu_read_lock(); - for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) { - wcid = rcu_dereference(dev->wcid[i]); + for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) { + wcid = rcu_dereference(dev->mt76.wcid[i]); if (!wcid) continue; - msta = container_of(wcid, struct mt76_sta, wcid); + msta = container_of(wcid, struct mt76x02_sta, wcid); sta = container_of(msta, struct ieee80211_sta, drv_priv); min_factor = min(min_factor, sta->ht_cap.ampdu_factor); @@ -440,219 +195,3 @@ void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev) mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff | FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor)); } - -static void -mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate) -{ - u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); - - switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) { - case MT_PHY_TYPE_OFDM: - if (idx >= 8) - idx = 0; - - if (status->band == NL80211_BAND_2GHZ) - idx += 4; - - status->rate_idx = idx; - return; - case MT_PHY_TYPE_CCK: - if (idx >= 8) { - idx -= 8; - status->enc_flags |= RX_ENC_FLAG_SHORTPRE; - } - - if (idx >= 4) - idx = 0; - - status->rate_idx = idx; - return; - case MT_PHY_TYPE_HT_GF: - status->enc_flags |= RX_ENC_FLAG_HT_GF; - /* fall through */ - case MT_PHY_TYPE_HT: - status->encoding = RX_ENC_HT; - status->rate_idx = idx; - break; - case MT_PHY_TYPE_VHT: - status->encoding = RX_ENC_VHT; - status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx); - status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1; - break; - default: - WARN_ON(1); - return; - } - - if (rate & MT_RXWI_RATE_LDPC) - status->enc_flags |= RX_ENC_FLAG_LDPC; - - if (rate & MT_RXWI_RATE_SGI) - status->enc_flags |= RX_ENC_FLAG_SHORT_GI; - - if (rate & MT_RXWI_RATE_STBC) - status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT; - - switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) { - case MT_PHY_BW_20: - break; - case MT_PHY_BW_40: - status->bw = RATE_INFO_BW_40; - break; - case MT_PHY_BW_80: - status->bw = RATE_INFO_BW_80; - break; - default: - WARN_ON(1); - break; - } -} - -static void -mt76x0_rx_monitor_beacon(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi, - u16 rate, int rssi) -{ - dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate); - dev->avg_rssi = ((dev->avg_rssi * 15) / 16 + (rssi << 8)) / 256; -} - -static int -mt76x0_rx_is_our_beacon(struct mt76x0_dev *dev, u8 *data) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data; - - return ieee80211_is_beacon(hdr->frame_control) && - ether_addr_equal(hdr->addr2, dev->ap_bssid); -} - -u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb, - u8 *data, void *rxi) -{ - struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - struct mt76x0_rxwi *rxwi = rxi; - u32 len, ctl = le32_to_cpu(rxwi->ctl); - u16 rate = le16_to_cpu(rxwi->rate); - int rssi; - - len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl); - if (WARN_ON(len < 10)) - return 0; - - if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) { - status->flag |= RX_FLAG_DECRYPTED; - status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; - } - - status->chains = BIT(0); - rssi = mt76x0_phy_get_rssi(dev, rxwi); - status->chain_signal[0] = status->signal = rssi; - status->freq = dev->mt76.chandef.chan->center_freq; - status->band = dev->mt76.chandef.chan->band; - - mt76_mac_process_rate(status, rate); - - spin_lock_bh(&dev->con_mon_lock); - if (mt76x0_rx_is_our_beacon(dev, data)) { - mt76x0_rx_monitor_beacon(dev, rxwi, rate, rssi); - } else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M)) { - if (dev->avg_rssi == 0) - dev->avg_rssi = rssi; - else - dev->avg_rssi = (dev->avg_rssi * 15) / 16 + rssi / 16; - - } - spin_unlock_bh(&dev->con_mon_lock); - - return len; -} - -static enum mt76_cipher_type -mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) -{ - memset(key_data, 0, 32); - if (!key) - return MT_CIPHER_NONE; - - if (key->keylen > 32) - return MT_CIPHER_NONE; - - memcpy(key_data, key->key, key->keylen); - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_WEP40: - return MT_CIPHER_WEP40; - case WLAN_CIPHER_SUITE_WEP104: - return MT_CIPHER_WEP104; - case WLAN_CIPHER_SUITE_TKIP: - return MT_CIPHER_TKIP; - case WLAN_CIPHER_SUITE_CCMP: - return MT_CIPHER_AES_CCMP; - default: - return MT_CIPHER_NONE; - } -} - -int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx, - struct ieee80211_key_conf *key) -{ - enum mt76_cipher_type cipher; - u8 key_data[32]; - u8 iv_data[8]; - u32 val; - - cipher = mt76_mac_get_key_info(key, key_data); - if (cipher == MT_CIPHER_NONE && key) - return -EINVAL; - - trace_mt76x0_set_key(&dev->mt76, idx); - - mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); - - memset(iv_data, 0, sizeof(iv_data)); - if (key) { - iv_data[3] = key->keyidx << 6; - if (cipher >= MT_CIPHER_TKIP) { - /* Note: start with 1 to comply with spec, - * (see comment on common/cmm_wpa.c:4291). - */ - iv_data[0] |= 1; - iv_data[3] |= 0x20; - } - } - mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); - - val = mt76_rr(dev, MT_WCID_ATTR(idx)); - val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT; - val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) | - FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3); - val &= ~MT_WCID_ATTR_PAIRWISE; - val |= MT_WCID_ATTR_PAIRWISE * - !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE); - mt76_wr(dev, MT_WCID_ATTR(idx), val); - - return 0; -} - -int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx, - struct ieee80211_key_conf *key) -{ - enum mt76_cipher_type cipher; - u8 key_data[32]; - u32 val; - - cipher = mt76_mac_get_key_info(key, key_data); - if (cipher == MT_CIPHER_NONE && key) - return -EINVAL; - - trace_mt76x0_set_shared_key(&dev->mt76, vif_idx, key_idx); - - mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), - key_data, sizeof(key_data)); - - val = mt76_rr(dev, MT_SKEY_MODE(vif_idx)); - val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx)); - val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx); - mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); - - return 0; -} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h deleted file mode 100644 index bea067b71c13..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __MT76_MAC_H -#define __MT76_MAC_H - -/* Note: values in original "RSSI" and "SNR" fields are not actually what they - * are called for MT76X0U, names used by this driver are educated guesses - * (see vendor mac/ral_omac.c). - */ -struct mt76x0_rxwi { - __le32 rxinfo; - - __le32 ctl; - - __le16 tid_sn; - __le16 rate; - - s8 rssi[4]; - - __le32 bbp_rxinfo[4]; -} __packed __aligned(4); - -#define MT_RXINFO_BA BIT(0) -#define MT_RXINFO_DATA BIT(1) -#define MT_RXINFO_NULL BIT(2) -#define MT_RXINFO_FRAG BIT(3) -#define MT_RXINFO_U2M BIT(4) -#define MT_RXINFO_MULTICAST BIT(5) -#define MT_RXINFO_BROADCAST BIT(6) -#define MT_RXINFO_MYBSS BIT(7) -#define MT_RXINFO_CRCERR BIT(8) -#define MT_RXINFO_ICVERR BIT(9) -#define MT_RXINFO_MICERR BIT(10) -#define MT_RXINFO_AMSDU BIT(11) -#define MT_RXINFO_HTC BIT(12) -#define MT_RXINFO_RSSI BIT(13) -#define MT_RXINFO_L2PAD BIT(14) -#define MT_RXINFO_AMPDU BIT(15) -#define MT_RXINFO_DECRYPT BIT(16) -#define MT_RXINFO_BSSIDX3 BIT(17) -#define MT_RXINFO_WAPI_KEY BIT(18) -#define MT_RXINFO_PN_LEN GENMASK(21, 19) -#define MT_RXINFO_SW_PKT_80211 BIT(22) -#define MT_RXINFO_TCP_SUM_BYPASS BIT(28) -#define MT_RXINFO_IP_SUM_BYPASS BIT(29) -#define MT_RXINFO_TCP_SUM_ERR BIT(30) -#define MT_RXINFO_IP_SUM_ERR BIT(31) - -#define MT_RXWI_CTL_WCID GENMASK(7, 0) -#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8) -#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10) -#define MT_RXWI_CTL_UDF GENMASK(15, 13) -#define MT_RXWI_CTL_MPDU_LEN GENMASK(27, 16) -#define MT_RXWI_CTL_TID GENMASK(31, 28) - -#define MT_RXWI_FRAG GENMASK(3, 0) -#define MT_RXWI_SN GENMASK(15, 4) - -#define MT_RXWI_RATE_INDEX GENMASK(5, 0) -#define MT_RXWI_RATE_LDPC BIT(6) -#define MT_RXWI_RATE_BW GENMASK(8, 7) -#define MT_RXWI_RATE_SGI BIT(9) -#define MT_RXWI_RATE_STBC BIT(10) -#define MT_RXWI_RATE_LDPC_ETXBF BIT(11) -#define MT_RXWI_RATE_SND BIT(12) -#define MT_RXWI_RATE_PHY GENMASK(15, 13) - -#define MT_RATE_INDEX_VHT_IDX GENMASK(3, 0) -#define MT_RATE_INDEX_VHT_NSS GENMASK(5, 4) - -#define MT_RXWI_GAIN_RSSI_VAL GENMASK(5, 0) -#define MT_RXWI_GAIN_RSSI_LNA_ID GENMASK(7, 6) -#define MT_RXWI_ANT_AUX_LNA BIT(7) - -#define MT_RXWI_EANT_ENC_ANT_ID GENMASK(7, 0) - -enum mt76_phy_bandwidth { - MT_PHY_BW_20, - MT_PHY_BW_40, - MT_PHY_BW_80, -}; - -struct mt76_txwi { - __le16 flags; - __le16 rate_ctl; - u8 ack_ctl; - u8 wcid; - __le16 len_ctl; - __le32 iv; - __le32 eiv; - u8 aid; - u8 txstream; - u8 ctl2; - u8 pktid; -} __packed __aligned(4); - -#define MT_TXWI_FLAGS_FRAG BIT(0) -#define MT_TXWI_FLAGS_MMPS BIT(1) -#define MT_TXWI_FLAGS_CFACK BIT(2) -#define MT_TXWI_FLAGS_TS BIT(3) -#define MT_TXWI_FLAGS_AMPDU BIT(4) -#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5) -#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8) -#define MT_TXWI_FLAGS_CWMIN GENMASK(12, 10) -#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13) -#define MT_TXWI_FLAGS_TX_RPT BIT(14) -#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15) - -#define MT_TXWI_RATE_MCS GENMASK(6, 0) -#define MT_TXWI_RATE_BW BIT(7) -#define MT_TXWI_RATE_SGI BIT(8) -#define MT_TXWI_RATE_STBC GENMASK(10, 9) -#define MT_TXWI_RATE_PHY_MODE GENMASK(15, 14) - -#define MT_TXWI_ACK_CTL_REQ BIT(0) -#define MT_TXWI_ACK_CTL_NSEQ BIT(1) -#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2) - -#define MT_TXWI_LEN_BYTE_CNT GENMASK(11, 0) - -#define MT_TXWI_CTL_TX_POWER_ADJ GENMASK(3, 0) -#define MT_TXWI_CTL_CHAN_CHECK_PKT BIT(4) -#define MT_TXWI_CTL_PIFS_REV BIT(6) - -#define MT_TXWI_PKTID_PROBE BIT(7) - -u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb, - u8 *data, void *rxi); -int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx, - struct ieee80211_key_conf *key); -void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid, - const struct ieee80211_tx_rate *rate); - -int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx, - struct ieee80211_key_conf *key); -u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev, - const struct ieee80211_tx_rate *rate, u8 *nss_val); -struct mt76_tx_status -mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev); -void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update); - -#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c index 22bc9d368728..c9cd0254a979 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c @@ -13,144 +13,58 @@ * GNU General Public License for more details. */ -#include "mt76x0.h" -#include "mac.h" #include <linux/etherdevice.h> +#include "mt76x0.h" -static int mt76x0_start(struct ieee80211_hw *hw) -{ - struct mt76x0_dev *dev = hw->priv; - int ret; - - mutex_lock(&dev->mutex); - - ret = mt76x0_mac_start(dev); - if (ret) - goto out; - - ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, - MT_CALIBRATE_INTERVAL); - ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, - MT_CALIBRATE_INTERVAL); -out: - mutex_unlock(&dev->mutex); - return ret; -} - -static void mt76x0_stop(struct ieee80211_hw *hw) -{ - struct mt76x0_dev *dev = hw->priv; - - mutex_lock(&dev->mutex); - - cancel_delayed_work_sync(&dev->cal_work); - cancel_delayed_work_sync(&dev->mac_work); - mt76x0_mac_stop(dev); - - mutex_unlock(&dev->mutex); -} - - -static int mt76x0_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +int mt76x0_config(struct ieee80211_hw *hw, u32 changed) { - struct mt76x0_dev *dev = hw->priv; - struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; - unsigned int idx; - - idx = ffs(~dev->vif_mask); - if (!idx || idx > 8) - return -ENOSPC; - - idx--; - dev->vif_mask |= BIT(idx); - - mvif->idx = idx; - mvif->group_wcid.idx = GROUP_WCID(idx); - mvif->group_wcid.hw_key_idx = -1; - - return 0; -} + struct mt76x02_dev *dev = hw->priv; + int ret = 0; -static void mt76x0_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct mt76x0_dev *dev = hw->priv; - struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; + mutex_lock(&dev->mt76.mutex); - dev->vif_mask &= ~BIT(mvif->idx); -} + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + ieee80211_stop_queues(hw); + ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef); + ieee80211_wake_queues(hw); + } -static int mt76x0_config(struct ieee80211_hw *hw, u32 changed) -{ - struct mt76x0_dev *dev = hw->priv; - int ret = 0; + if (changed & IEEE80211_CONF_CHANGE_POWER) { + dev->mt76.txpower_conf = hw->conf.power_level * 2; - mutex_lock(&dev->mutex); + if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) + mt76x0_phy_set_txpower(dev); + } if (changed & IEEE80211_CONF_CHANGE_MONITOR) { if (!(hw->conf.flags & IEEE80211_CONF_MONITOR)) - dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC; + dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC; else - dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC; + dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC; - mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); } - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { - ieee80211_stop_queues(hw); - ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef); - ieee80211_wake_queues(hw); - } - - mutex_unlock(&dev->mutex); + mutex_unlock(&dev->mt76.mutex); return ret; } +EXPORT_SYMBOL_GPL(mt76x0_config); static void -mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, - unsigned int *total_flags, u64 multicast) +mt76x0_addr_wr(struct mt76x02_dev *dev, const u32 offset, const u8 *addr) { - struct mt76x0_dev *dev = hw->priv; - u32 flags = 0; - -#define MT76_FILTER(_flag, _hw) do { \ - flags |= *total_flags & FIF_##_flag; \ - dev->rxfilter &= ~(_hw); \ - dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ - } while (0) - - mutex_lock(&dev->mutex); - - dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS; - - MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR); - MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR); - MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK | - MT_RX_FILTR_CFG_CTS | - MT_RX_FILTR_CFG_CFEND | - MT_RX_FILTR_CFG_CFACK | - MT_RX_FILTR_CFG_BA | - MT_RX_FILTR_CFG_CTRL_RSV); - MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL); - - *total_flags = flags; - mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); - - mutex_unlock(&dev->mutex); + mt76_wr(dev, offset, get_unaligned_le32(addr)); + mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8); } -static void -mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info, u32 changed) +void mt76x0_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed) { - struct mt76x0_dev *dev = hw->priv; - - mutex_lock(&dev->mutex); + struct mt76x02_dev *dev = hw->priv; - if (changed & BSS_CHANGED_ASSOC) - mt76x0_phy_con_cal_onoff(dev, info); + mutex_lock(&dev->mt76.mutex); if (changed & BSS_CHANGED_BSSID) { mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid); @@ -165,8 +79,8 @@ mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (changed & BSS_CHANGED_BASIC_RATES) { mt76_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates); - mt76_wr(dev, MT_HT_FBK_CFG0, 0x65432100); - mt76_wr(dev, MT_HT_FBK_CFG1, 0xedcba980); + mt76_wr(dev, MT_VHT_HT_FBK_CFG0, 0x65432100); + mt76_wr(dev, MT_VHT_HT_FBK_CFG1, 0xedcba980); mt76_wr(dev, MT_LG_FBK_CFG0, 0xedcba988); mt76_wr(dev, MT_LG_FBK_CFG1, 0x00002100); } @@ -191,82 +105,25 @@ mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (changed & BSS_CHANGED_ASSOC) mt76x0_phy_recalibrate_after_assoc(dev); - mutex_unlock(&dev->mutex); -} - -static int -mt76x0_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct mt76x0_dev *dev = hw->priv; - struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; - struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; - int ret = 0; - int idx = 0; - - mutex_lock(&dev->mutex); - - idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid)); - if (idx < 0) { - ret = -ENOSPC; - goto out; - } - - msta->wcid.idx = idx; - msta->wcid.hw_key_idx = -1; - mt76x0_mac_wcid_setup(dev, idx, mvif->idx, sta->addr); - mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx)); - rcu_assign_pointer(dev->wcid[idx], &msta->wcid); - mt76x0_mac_set_ampdu_factor(dev); - -out: - mutex_unlock(&dev->mutex); - - return ret; -} - -static int -mt76x0_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct mt76x0_dev *dev = hw->priv; - struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; - int idx = msta->wcid.idx; - - mutex_lock(&dev->mutex); - rcu_assign_pointer(dev->wcid[idx], NULL); - mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx)); - dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG); - mt76x0_mac_wcid_setup(dev, idx, 0, NULL); - mt76x0_mac_set_ampdu_factor(dev); - mutex_unlock(&dev->mutex); - - return 0; + mutex_unlock(&dev->mt76.mutex); } +EXPORT_SYMBOL_GPL(mt76x0_bss_info_changed); -static void -mt76x0_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum sta_notify_cmd cmd, struct ieee80211_sta *sta) +void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const u8 *mac_addr) { -} - -static void -mt76x0_sw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - const u8 *mac_addr) -{ - struct mt76x0_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; cancel_delayed_work_sync(&dev->cal_work); mt76x0_agc_save(dev); set_bit(MT76_SCANNING, &dev->mt76.state); } +EXPORT_SYMBOL_GPL(mt76x0_sw_scan); -static void -mt76x0_sw_scan_complete(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +void mt76x0_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { - struct mt76x0_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; mt76x0_agc_restore(dev); clear_bit(MT76_SCANNING, &dev->mt76.state); @@ -274,129 +131,14 @@ mt76x0_sw_scan_complete(struct ieee80211_hw *hw, ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, MT_CALIBRATE_INTERVAL); } +EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete); -static int -mt76x0_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) +int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { - struct mt76x0_dev *dev = hw->priv; - struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; - struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL; - struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid; - int idx = key->keyidx; - int ret; - - if (cmd == SET_KEY) { - key->hw_key_idx = wcid->idx; - wcid->hw_key_idx = idx; - } else { - if (idx == wcid->hw_key_idx) - wcid->hw_key_idx = -1; - - key = NULL; - } - - if (!msta) { - if (key || wcid->hw_key_idx == idx) { - ret = mt76x0_mac_wcid_set_key(dev, wcid->idx, key); - if (ret) - return ret; - } - - return mt76x0_mac_shared_key_setup(dev, mvif->idx, idx, key); - } - - return mt76x0_mac_wcid_set_key(dev, msta->wcid.idx, key); -} - -static int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value) -{ - struct mt76x0_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value); return 0; } - -static int -mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_ampdu_params *params) -{ - struct mt76x0_dev *dev = hw->priv; - struct ieee80211_sta *sta = params->sta; - enum ieee80211_ampdu_mlme_action action = params->action; - u16 tid = params->tid; - u16 *ssn = ¶ms->ssn; - struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; - - WARN_ON(msta->wcid.idx > N_WCIDS); - - switch (action) { - case IEEE80211_AMPDU_RX_START: - mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); - break; - case IEEE80211_AMPDU_RX_STOP: - mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); - break; - case IEEE80211_AMPDU_TX_OPERATIONAL: - ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]); - break; - case IEEE80211_AMPDU_TX_STOP_FLUSH: - case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: - break; - case IEEE80211_AMPDU_TX_START: - msta->agg_ssn[tid] = *ssn << 4; - ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); - break; - case IEEE80211_AMPDU_TX_STOP_CONT: - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); - break; - } - - return 0; -} - -static void -mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct mt76x0_dev *dev = hw->priv; - struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; - struct ieee80211_sta_rates *rates; - struct ieee80211_tx_rate rate = {}; - - rcu_read_lock(); - rates = rcu_dereference(sta->rates); - - if (!rates) - goto out; - - rate.idx = rates->rate[0].idx; - rate.flags = rates->rate[0].flags; - mt76x0_mac_wcid_set_rate(dev, &msta->wcid, &rate); - -out: - rcu_read_unlock(); -} - -const struct ieee80211_ops mt76x0_ops = { - .tx = mt76x0_tx, - .start = mt76x0_start, - .stop = mt76x0_stop, - .add_interface = mt76x0_add_interface, - .remove_interface = mt76x0_remove_interface, - .config = mt76x0_config, - .configure_filter = mt76_configure_filter, - .bss_info_changed = mt76x0_bss_info_changed, - .sta_add = mt76x0_sta_add, - .sta_remove = mt76x0_sta_remove, - .sta_notify = mt76x0_sta_notify, - .set_key = mt76x0_set_key, - .conf_tx = mt76x0_conf_tx, - .sw_scan_start = mt76x0_sw_scan, - .sw_scan_complete = mt76x0_sw_scan_complete, - .ampdu_action = mt76_ampdu_action, - .sta_rate_tbl_update = mt76_sta_rate_tbl_update, - .set_rts_threshold = mt76x0_set_rts_threshold, -}; +EXPORT_SYMBOL_GPL(mt76x0_set_rts_threshold); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c deleted file mode 100644 index 8affacbab90a..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c +++ /dev/null @@ -1,656 +0,0 @@ -/* - * (c) Copyright 2002-2010, Ralink Technology, Inc. - * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> - * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/kernel.h> -#include <linux/firmware.h> -#include <linux/delay.h> -#include <linux/usb.h> -#include <linux/skbuff.h> - -#include "mt76x0.h" -#include "dma.h" -#include "mcu.h" -#include "usb.h" -#include "trace.h" - -#define MCU_FW_URB_MAX_PAYLOAD 0x38f8 -#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12) -#define MCU_RESP_URB_SIZE 1024 - -static inline int firmware_running(struct mt76x0_dev *dev) -{ - return mt76_rr(dev, MT_MCU_COM_REG0) == 1; -} - -static inline void skb_put_le32(struct sk_buff *skb, u32 val) -{ - put_unaligned_le32(val, skb_put(skb, 4)); -} - -static inline void mt76x0_dma_skb_wrap_cmd(struct sk_buff *skb, - u8 seq, enum mcu_cmd cmd) -{ - WARN_ON(mt76x0_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND, - FIELD_PREP(MT_TXD_CMD_SEQ, seq) | - FIELD_PREP(MT_TXD_CMD_TYPE, cmd))); -} - -static inline void trace_mt76x0_mcu_msg_send_cs(struct mt76_dev *dev, - struct sk_buff *skb, bool need_resp) -{ - u32 i, csum = 0; - - for (i = 0; i < skb->len / 4; i++) - csum ^= get_unaligned_le32(skb->data + i * 4); - - trace_mt76x0_mcu_msg_send(dev, skb, csum, need_resp); -} - -static struct sk_buff * -mt76x0_mcu_msg_alloc(struct mt76x0_dev *dev, const void *data, int len) -{ - struct sk_buff *skb; - - WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */ - - skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL); - if (skb) { - skb_reserve(skb, MT_DMA_HDR_LEN); - memcpy(skb_put(skb, len), data, len); - } - return skb; -} - -static void mt76x0_read_resp_regs(struct mt76x0_dev *dev, int len) -{ - int i; - int n = dev->mcu.reg_pairs_len; - u8 *buf = dev->mcu.resp.buf; - - buf += 4; - len -= 8; - - if (dev->mcu.burst_read) { - u32 reg = dev->mcu.reg_pairs[0].reg - dev->mcu.reg_base; - - WARN_ON_ONCE(len/4 != n); - for (i = 0; i < n; i++) { - u32 val = get_unaligned_le32(buf + 4*i); - - dev->mcu.reg_pairs[i].reg = reg++; - dev->mcu.reg_pairs[i].value = val; - } - } else { - WARN_ON_ONCE(len/8 != n); - for (i = 0; i < n; i++) { - u32 reg = get_unaligned_le32(buf + 8*i) - dev->mcu.reg_base; - u32 val = get_unaligned_le32(buf + 8*i + 4); - - WARN_ON_ONCE(dev->mcu.reg_pairs[i].reg != reg); - dev->mcu.reg_pairs[i].value = val; - } - } -} - -static int mt76x0_mcu_wait_resp(struct mt76x0_dev *dev, u8 seq) -{ - struct urb *urb = dev->mcu.resp.urb; - u32 rxfce; - int urb_status, ret, try = 5; - - while (try--) { - if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl, - msecs_to_jiffies(300))) { - dev_warn(dev->mt76.dev, "Warning: %s retrying\n", __func__); - continue; - } - - /* Make copies of important data before reusing the urb */ - rxfce = get_unaligned_le32(dev->mcu.resp.buf); - urb_status = urb->status * mt76x0_urb_has_error(urb); - - if (urb_status == 0 && dev->mcu.reg_pairs) - mt76x0_read_resp_regs(dev, urb->actual_length); - - ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP, - &dev->mcu.resp, GFP_KERNEL, - mt76x0_complete_urb, - &dev->mcu.resp_cmpl); - if (ret) - return ret; - - if (urb_status) - dev_err(dev->mt76.dev, "Error: MCU resp urb failed:%d\n", - urb_status); - - if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq && - FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE) - return 0; - - dev_err(dev->mt76.dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n", - FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce), - seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce)); - } - - dev_err(dev->mt76.dev, "Error: %s timed out\n", __func__); - return -ETIMEDOUT; -} - -static int -__mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb, - enum mcu_cmd cmd, bool wait_resp) -{ - struct usb_device *usb_dev = mt76x0_to_usb_dev(dev); - unsigned cmd_pipe = usb_sndbulkpipe(usb_dev, - dev->out_ep[MT_EP_OUT_INBAND_CMD]); - int sent, ret; - u8 seq = 0; - - if (wait_resp) - while (!seq) - seq = ++dev->mcu.msg_seq & 0xf; - - mt76x0_dma_skb_wrap_cmd(skb, seq, cmd); - - if (dev->mcu.resp_cmpl.done) - dev_err(dev->mt76.dev, "Error: MCU response pre-completed!\n"); - - trace_mt76x0_mcu_msg_send_cs(&dev->mt76, skb, wait_resp); - trace_mt76x0_submit_urb_sync(&dev->mt76, cmd_pipe, skb->len); - - ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500); - if (ret) { - dev_err(dev->mt76.dev, "Error: send MCU cmd failed:%d\n", ret); - goto out; - } - if (sent != skb->len) - dev_err(dev->mt76.dev, "Error: %s sent != skb->len\n", __func__); - - if (wait_resp) - ret = mt76x0_mcu_wait_resp(dev, seq); - -out: - return ret; -} - -static int -mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb, - enum mcu_cmd cmd, bool wait_resp) -{ - int ret; - - if (test_bit(MT76_REMOVED, &dev->mt76.state)) - return 0; - - mutex_lock(&dev->mcu.mutex); - ret = __mt76x0_mcu_msg_send(dev, skb, cmd, wait_resp); - mutex_unlock(&dev->mcu.mutex); - - consume_skb(skb); - - return ret; -} - -int mt76x0_mcu_function_select(struct mt76x0_dev *dev, - enum mcu_function func, u32 val) -{ - struct sk_buff *skb; - struct { - __le32 id; - __le32 value; - } __packed __aligned(4) msg = { - .id = cpu_to_le32(func), - .value = cpu_to_le32(val), - }; - - skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg)); - if (!skb) - return -ENOMEM; - return mt76x0_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5); -} - -int -mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val) -{ - struct sk_buff *skb; - struct { - __le32 id; - __le32 value; - } __packed __aligned(4) msg = { - .id = cpu_to_le32(cal), - .value = cpu_to_le32(val), - }; - - skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg)); - if (!skb) - return -ENOMEM; - return mt76x0_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true); -} - -int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base, - const struct mt76_reg_pair *data, int n) -{ - const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8; - struct sk_buff *skb; - int cnt, i, ret; - - if (!n) - return 0; - - cnt = min(max_vals_per_cmd, n); - - skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); - if (!skb) - return -ENOMEM; - skb_reserve(skb, MT_DMA_HDR_LEN); - - for (i = 0; i < cnt; i++) { - skb_put_le32(skb, base + data[i].reg); - skb_put_le32(skb, data[i].value); - } - - ret = mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n); - if (ret) - return ret; - - return mt76x0_write_reg_pairs(dev, base, data + cnt, n - cnt); -} - -int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base, - struct mt76_reg_pair *data, int n) -{ - const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8; - struct sk_buff *skb; - int cnt, i, ret; - - if (!n) - return 0; - - cnt = min(max_vals_per_cmd, n); - if (cnt != n) - return -EINVAL; - - skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); - if (!skb) - return -ENOMEM; - skb_reserve(skb, MT_DMA_HDR_LEN); - - for (i = 0; i < cnt; i++) { - skb_put_le32(skb, base + data[i].reg); - skb_put_le32(skb, data[i].value); - } - - mutex_lock(&dev->mcu.mutex); - - dev->mcu.reg_pairs = data; - dev->mcu.reg_pairs_len = n; - dev->mcu.reg_base = base; - dev->mcu.burst_read = false; - - ret = __mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_READ, true); - - dev->mcu.reg_pairs = NULL; - - mutex_unlock(&dev->mcu.mutex); - - consume_skb(skb); - - return ret; - -} - -int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset, - const u32 *data, int n) -{ - const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1; - struct sk_buff *skb; - int cnt, i, ret; - - if (!n) - return 0; - - cnt = min(max_regs_per_cmd, n); - - skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); - if (!skb) - return -ENOMEM; - skb_reserve(skb, MT_DMA_HDR_LEN); - - skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset); - for (i = 0; i < cnt; i++) - skb_put_le32(skb, data[i]); - - ret = mt76x0_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n); - if (ret) - return ret; - - return mt76x0_burst_write_regs(dev, offset + cnt * 4, - data + cnt, n - cnt); -} - -#if 0 -static int mt76x0_burst_read_regs(struct mt76x0_dev *dev, u32 base, - struct mt76_reg_pair *data, int n) -{ - const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1; - struct sk_buff *skb; - int cnt, ret; - - if (!n) - return 0; - - cnt = min(max_vals_per_cmd, n); - if (cnt != n) - return -EINVAL; - - skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); - if (!skb) - return -ENOMEM; - skb_reserve(skb, MT_DMA_HDR_LEN); - - skb_put_le32(skb, base + data[0].reg); - skb_put_le32(skb, n); - - mutex_lock(&dev->mcu.mutex); - - dev->mcu.reg_pairs = data; - dev->mcu.reg_pairs_len = n; - dev->mcu.reg_base = base; - dev->mcu.burst_read = true; - - ret = __mt76x0_mcu_msg_send(dev, skb, CMD_BURST_READ, true); - - dev->mcu.reg_pairs = NULL; - - mutex_unlock(&dev->mcu.mutex); - - consume_skb(skb); - - return ret; -} -#endif - -struct mt76_fw_header { - __le32 ilm_len; - __le32 dlm_len; - __le16 build_ver; - __le16 fw_ver; - u8 pad[4]; - char build_time[16]; -}; - -struct mt76_fw { - struct mt76_fw_header hdr; - u8 ivb[MT_MCU_IVB_SIZE]; - u8 ilm[]; -}; - -static int __mt76x0_dma_fw(struct mt76x0_dev *dev, - const struct mt76x0_dma_buf *dma_buf, - const void *data, u32 len, u32 dst_addr) -{ - DECLARE_COMPLETION_ONSTACK(cmpl); - struct mt76x0_dma_buf buf = *dma_buf; /* we need to fake length */ - __le32 reg; - u32 val; - int ret; - - reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_COMMAND) | - FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) | - FIELD_PREP(MT_TXD_INFO_LEN, len)); - memcpy(buf.buf, ®, sizeof(reg)); - memcpy(buf.buf + sizeof(reg), data, len); - memset(buf.buf + sizeof(reg) + len, 0, 8); - - ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE, - MT_FCE_DMA_ADDR, dst_addr); - if (ret) - return ret; - len = roundup(len, 4); - ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE, - MT_FCE_DMA_LEN, len << 16); - if (ret) - return ret; - - buf.len = MT_DMA_HDR_LEN + len + 4; - ret = mt76x0_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD, - &buf, GFP_KERNEL, - mt76x0_complete_urb, &cmpl); - if (ret) - return ret; - - if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) { - dev_err(dev->mt76.dev, "Error: firmware upload timed out\n"); - usb_kill_urb(buf.urb); - return -ETIMEDOUT; - } - if (mt76x0_urb_has_error(buf.urb)) { - dev_err(dev->mt76.dev, "Error: firmware upload urb failed:%d\n", - buf.urb->status); - return buf.urb->status; - } - - val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); - val++; - mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val); - - msleep(5); - - return 0; -} - -static int -mt76x0_dma_fw(struct mt76x0_dev *dev, struct mt76x0_dma_buf *dma_buf, - const void *data, int len, u32 dst_addr) -{ - int n, ret; - - if (len == 0) - return 0; - - n = min(MCU_FW_URB_MAX_PAYLOAD, len); - ret = __mt76x0_dma_fw(dev, dma_buf, data, n, dst_addr); - if (ret) - return ret; - -#if 0 - if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500)) - return -ETIMEDOUT; -#endif - - return mt76x0_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n); -} - -static int -mt76x0_upload_firmware(struct mt76x0_dev *dev, const struct mt76_fw *fw) -{ - struct mt76x0_dma_buf dma_buf; - void *ivb; - u32 ilm_len, dlm_len; - int i, ret; - - ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL); - if (!ivb) - return -ENOMEM; - if (mt76x0_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) { - ret = -ENOMEM; - goto error; - } - - ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb); - dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %zu\n", - ilm_len, sizeof(fw->ivb)); - ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb)); - if (ret) - goto error; - - dlm_len = le32_to_cpu(fw->hdr.dlm_len); - dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len); - ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm + ilm_len, - dlm_len, MT_MCU_DLM_OFFSET); - if (ret) - goto error; - - ret = mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT, - 0x12, 0, ivb, sizeof(fw->ivb)); - if (ret < 0) - goto error; - ret = 0; - - for (i = 100; i && !firmware_running(dev); i--) - msleep(10); - if (!i) { - ret = -ETIMEDOUT; - goto error; - } - - dev_dbg(dev->mt76.dev, "Firmware running!\n"); -error: - kfree(ivb); - mt76x0_usb_free_buf(dev, &dma_buf); - - return ret; -} - -static int mt76x0_load_firmware(struct mt76x0_dev *dev) -{ - const struct firmware *fw; - const struct mt76_fw_header *hdr; - int len, ret; - u32 val; - - mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN | - MT_USB_DMA_CFG_TX_BULK_EN)); - - if (firmware_running(dev)) - return 0; - - ret = request_firmware(&fw, MT7610_FIRMWARE, dev->mt76.dev); - if (ret) - return ret; - - if (!fw || !fw->data || fw->size < sizeof(*hdr)) - goto err_inv_fw; - - hdr = (const struct mt76_fw_header *) fw->data; - - if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE) - goto err_inv_fw; - - len = sizeof(*hdr); - len += le32_to_cpu(hdr->ilm_len); - len += le32_to_cpu(hdr->dlm_len); - - if (fw->size != len) - goto err_inv_fw; - - val = le16_to_cpu(hdr->fw_ver); - dev_dbg(dev->mt76.dev, - "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n", - (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf, - le16_to_cpu(hdr->build_ver), hdr->build_time); - - len = le32_to_cpu(hdr->ilm_len); - - mt76_wr(dev, 0x1004, 0x2c); - - mt76_set(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN | - MT_USB_DMA_CFG_TX_BULK_EN) | - FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20)); - mt76x0_vendor_reset(dev); - msleep(5); -/* - mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN | - MT_PBF_CFG_TX1Q_EN | - MT_PBF_CFG_TX2Q_EN | - MT_PBF_CFG_TX3Q_EN)); -*/ - - mt76_wr(dev, MT_FCE_PSE_CTRL, 1); - - /* FCE tx_fs_base_ptr */ - mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230); - /* FCE tx_fs_max_cnt */ - mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1); - /* FCE pdma enable */ - mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44); - /* FCE skip_fs_en */ - mt76_wr(dev, MT_FCE_SKIP_FS, 3); - - val = mt76_rr(dev, MT_USB_DMA_CFG); - val |= MT_USB_DMA_CFG_TX_WL_DROP; - mt76_wr(dev, MT_USB_DMA_CFG, val); - val &= ~MT_USB_DMA_CFG_TX_WL_DROP; - mt76_wr(dev, MT_USB_DMA_CFG, val); - - ret = mt76x0_upload_firmware(dev, (const struct mt76_fw *)fw->data); - release_firmware(fw); - - mt76_wr(dev, MT_FCE_PSE_CTRL, 1); - - return ret; - -err_inv_fw: - dev_err(dev->mt76.dev, "Invalid firmware image\n"); - release_firmware(fw); - return -ENOENT; -} - -int mt76x0_mcu_init(struct mt76x0_dev *dev) -{ - int ret; - - mutex_init(&dev->mcu.mutex); - - ret = mt76x0_load_firmware(dev); - if (ret) - return ret; - - set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state); - - return 0; -} - -int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev) -{ - int ret; - - ret = mt76x0_mcu_function_select(dev, Q_SELECT, 1); - if (ret) - return ret; - - init_completion(&dev->mcu.resp_cmpl); - if (mt76x0_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) { - mt76x0_usb_free_buf(dev, &dev->mcu.resp); - return -ENOMEM; - } - - ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP, - &dev->mcu.resp, GFP_KERNEL, - mt76x0_complete_urb, &dev->mcu.resp_cmpl); - if (ret) { - mt76x0_usb_free_buf(dev, &dev->mcu.resp); - return ret; - } - - return 0; -} - -void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev) -{ - usb_kill_urb(dev->mcu.resp.urb); - mt76x0_usb_free_buf(dev, &dev->mcu.resp); -} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h index 8c2f77f4c3f5..b66e70f6cd89 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h @@ -15,65 +15,18 @@ #ifndef __MT76X0U_MCU_H #define __MT76X0U_MCU_H -struct mt76x0_dev; +#include "../mt76x02_mcu.h" -/* Register definitions */ -#define MT_MCU_RESET_CTL 0x070C -#define MT_MCU_INT_LEVEL 0x0718 -#define MT_MCU_COM_REG0 0x0730 -#define MT_MCU_COM_REG1 0x0734 -#define MT_MCU_COM_REG2 0x0738 -#define MT_MCU_COM_REG3 0x073C +struct mt76x02_dev; #define MT_MCU_IVB_SIZE 0x40 #define MT_MCU_DLM_OFFSET 0x80000 -#define MT_MCU_MEMMAP_WLAN 0x00410000 /* We use same space for BBP as for MAC regs * #define MT_MCU_MEMMAP_BBP 0x40000000 */ #define MT_MCU_MEMMAP_RF 0x80000000 -#define INBAND_PACKET_MAX_LEN 192 - -enum mcu_cmd { - CMD_FUN_SET_OP = 1, - CMD_LOAD_CR = 2, - CMD_INIT_GAIN_OP = 3, - CMD_DYNC_VGA_OP = 6, - CMD_TDLS_CH_SW = 7, - CMD_BURST_WRITE = 8, - CMD_READ_MODIFY_WRITE = 9, - CMD_RANDOM_READ = 10, - CMD_BURST_READ = 11, - CMD_RANDOM_WRITE = 12, - CMD_LED_MODE_OP = 16, - CMD_POWER_SAVING_OP = 20, - CMD_WOW_CONFIG = 21, - CMD_WOW_QUERY = 22, - CMD_WOW_FEATURE = 24, - CMD_CARRIER_DETECT_OP = 28, - CMD_RADOR_DETECT_OP = 29, - CMD_SWITCH_CHANNEL_OP = 30, - CMD_CALIBRATION_OP = 31, - CMD_BEACON_OP = 32, - CMD_ANTENNA_OP = 33, -}; - -enum mcu_function { - Q_SELECT = 1, - BW_SETTING = 2, - ATOMIC_TSSI_SETTING = 5, -}; - -enum mcu_power_mode { - RADIO_OFF = 0x30, - RADIO_ON = 0x31, - RADIO_OFF_AUTO_WAKEUP = 0x32, - RADIO_OFF_ADVANCE = 0x33, - RADIO_ON_ADVANCE = 0x34, -}; - enum mcu_calibrate { MCU_CAL_R = 1, MCU_CAL_RXDCOC, @@ -88,14 +41,11 @@ enum mcu_calibrate { MCU_CAL_TX_GROUP_DELAY, }; -int mt76x0_mcu_init(struct mt76x0_dev *dev); -int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev); -void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev); - -int -mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val); - -int -mt76x0_mcu_function_select(struct mt76x0_dev *dev, enum mcu_function func, u32 val); +int mt76x0e_mcu_init(struct mt76x02_dev *dev); +int mt76x0u_mcu_init(struct mt76x02_dev *dev); +static inline int mt76x0_firmware_running(struct mt76x02_dev *dev) +{ + return mt76_rr(dev, MT_MCU_COM_REG0) == 1; +} #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h index fc9857f61771..1bff2be45a13 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h @@ -25,306 +25,60 @@ #include <net/mac80211.h> #include <linux/debugfs.h> -#include "../mt76.h" -#include "regs.h" +#include "../mt76x02.h" +#include "eeprom.h" #define MT_CALIBRATE_INTERVAL (4 * HZ) -#define MT_FREQ_CAL_INIT_DELAY (30 * HZ) -#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ) -#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2) - -#define MT_BBP_REG_VERSION 0x00 - #define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */ #define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */ -#define MT_RX_ORDER 3 -#define MT_RX_URB_SIZE (PAGE_SIZE << MT_RX_ORDER) - -struct mt76x0_dma_buf { - struct urb *urb; - void *buf; - dma_addr_t dma; - size_t len; -}; - -struct mt76x0_mcu { - struct mutex mutex; - - u8 msg_seq; - - struct mt76x0_dma_buf resp; - struct completion resp_cmpl; - - struct mt76_reg_pair *reg_pairs; - unsigned int reg_pairs_len; - u32 reg_base; - bool burst_read; -}; - -struct mac_stats { - u64 rx_stat[6]; - u64 tx_stat[6]; - u64 aggr_stat[2]; - u64 aggr_n[32]; - u64 zero_len_del[2]; -}; - -#define N_RX_ENTRIES 16 -struct mt76x0_rx_queue { - struct mt76x0_dev *dev; - - struct mt76x0_dma_buf_rx { - struct urb *urb; - struct page *p; - } e[N_RX_ENTRIES]; - - unsigned int start; - unsigned int end; - unsigned int entries; - unsigned int pending; -}; - -#define N_TX_ENTRIES 64 - -struct mt76x0_tx_queue { - struct mt76x0_dev *dev; - - struct mt76x0_dma_buf_tx { - struct urb *urb; - struct sk_buff *skb; - } e[N_TX_ENTRIES]; - - unsigned int start; - unsigned int end; - unsigned int entries; - unsigned int used; - unsigned int fifo_seq; -}; - -/* WCID allocation: - * 0: mcast wcid - * 1: bssid wcid - * 1...: STAs - * ...7e: group wcids - * 7f: reserved - */ -#define N_WCIDS 128 -#define GROUP_WCID(idx) (254 - idx) - -struct mt76x0_eeprom_params; - -#define MT_EE_TEMPERATURE_SLOPE 39 -#define MT_FREQ_OFFSET_INVALID -128 - -/* addr req mask */ -#define MT_VEND_TYPE_EEPROM BIT(31) -#define MT_VEND_TYPE_CFG BIT(30) -#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) - -#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) - -enum mt_bw { - MT_BW_20, - MT_BW_40, -}; - -/** - * struct mt76x0_dev - adapter structure - * @lock: protects @wcid->tx_rate. - * @mac_lock: locks out mac80211's tx status and rx paths. - * @tx_lock: protects @tx_q and changes of MT76_STATE_*_STATS - * flags in @state. - * @rx_lock: protects @rx_q. - * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi. - * @mutex: ensures exclusive access from mac80211 callbacks. - * @reg_atomic_mutex: ensures atomicity of indirect register accesses - * (accesses to RF and BBP). - * @hw_atomic_mutex: ensures exclusive access to HW during critical - * operations (power management, channel switch). - */ -struct mt76x0_dev { - struct mt76_dev mt76; /* must be first */ - - struct mutex mutex; - - struct mutex usb_ctrl_mtx; - u8 data[32]; - - struct tasklet_struct rx_tasklet; - struct tasklet_struct tx_tasklet; - - u8 out_ep[__MT_EP_OUT_MAX]; - u16 out_max_packet; - u8 in_ep[__MT_EP_IN_MAX]; - u16 in_max_packet; - - unsigned long wcid_mask[DIV_ROUND_UP(N_WCIDS, BITS_PER_LONG)]; - unsigned long vif_mask; - - struct mt76x0_mcu mcu; - - struct delayed_work cal_work; - struct delayed_work mac_work; - - struct workqueue_struct *stat_wq; - struct delayed_work stat_work; - struct mt76_wcid *mon_wcid; - struct mt76_wcid __rcu *wcid[N_WCIDS]; - - spinlock_t mac_lock; - - const u16 *beacon_offsets; - - u8 macaddr[ETH_ALEN]; - struct mt76x0_eeprom_params *ee; - - struct mutex reg_atomic_mutex; - struct mutex hw_atomic_mutex; - - u32 rxfilter; - u32 debugfs_reg; - - /* TX */ - spinlock_t tx_lock; - struct mt76x0_tx_queue *tx_q; - struct sk_buff_head tx_skb_done; - - atomic_t avg_ampdu_len; - - /* RX */ - spinlock_t rx_lock; - struct mt76x0_rx_queue rx_q; - - /* Connection monitoring things */ - spinlock_t con_mon_lock; - u8 ap_bssid[ETH_ALEN]; - - s8 bcn_freq_off; - u8 bcn_phy_mode; - - int avg_rssi; /* starts at 0 and converges */ - - u8 agc_save; - u16 chainmask; - - struct mac_stats stats; -}; - -struct mt76x0_wcid { - u8 idx; - u8 hw_key_idx; - - u16 tx_rate; - bool tx_rate_set; - u8 tx_rate_nss; -}; - -struct mt76_vif { - u8 idx; - - struct mt76_wcid group_wcid; -}; - -struct mt76_tx_status { - u8 valid:1; - u8 success:1; - u8 aggr:1; - u8 ack_req:1; - u8 is_probe:1; - u8 wcid; - u8 pktid; - u8 retry; - u16 rate; -} __packed __aligned(2); - -struct mt76_sta { - struct mt76_wcid wcid; - struct mt76_tx_status status; - int n_frames; - u16 agg_ssn[IEEE80211_NUM_TIDS]; -}; - -struct mt76_reg_pair { - u32 reg; - u32 value; -}; - -struct mt76x0_rxwi; - -extern const struct ieee80211_ops mt76x0_ops; - -static inline bool is_mt7610e(struct mt76x0_dev *dev) +static inline bool is_mt7610e(struct mt76x02_dev *dev) { /* TODO */ return false; } -void mt76x0_init_debugfs(struct mt76x0_dev *dev); - -int mt76x0_wait_asic_ready(struct mt76x0_dev *dev); - -/* Compatibility with mt76 */ -#define mt76_rmw_field(_dev, _reg, _field, _val) \ - mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) - -int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base, - const struct mt76_reg_pair *data, int len); -int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base, - struct mt76_reg_pair *data, int len); -int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset, - const u32 *data, int n); -void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr); +void mt76x0_init_debugfs(struct mt76x02_dev *dev); /* Init */ -struct mt76x0_dev *mt76x0_alloc_device(struct device *dev); -int mt76x0_init_hardware(struct mt76x0_dev *dev); -int mt76x0_register_device(struct mt76x0_dev *dev); -void mt76x0_cleanup(struct mt76x0_dev *dev); -void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset); - -int mt76x0_mac_start(struct mt76x0_dev *dev); -void mt76x0_mac_stop(struct mt76x0_dev *dev); +struct mt76x02_dev * +mt76x0_alloc_device(struct device *pdev, + const struct mt76_driver_ops *drv_ops, + const struct ieee80211_ops *ops); +int mt76x0_init_hardware(struct mt76x02_dev *dev); +int mt76x0_register_device(struct mt76x02_dev *dev); +void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset); + +int mt76x0_mac_start(struct mt76x02_dev *dev); +void mt76x0_mac_stop(struct mt76x02_dev *dev); + +int mt76x0_config(struct ieee80211_hw *hw, u32 changed); +void mt76x0_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed); +void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const u8 *mac_addr); +void mt76x0_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value); /* PHY */ -void mt76x0_phy_init(struct mt76x0_dev *dev); -int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev); -void mt76x0_agc_save(struct mt76x0_dev *dev); -void mt76x0_agc_restore(struct mt76x0_dev *dev); -int mt76x0_phy_set_channel(struct mt76x0_dev *dev, +void mt76x0_phy_init(struct mt76x02_dev *dev); +int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev); +void mt76x0_agc_save(struct mt76x02_dev *dev); +void mt76x0_agc_restore(struct mt76x02_dev *dev); +int mt76x0_phy_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef); -void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev); -int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi); -void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev, - struct ieee80211_bss_conf *info); +void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev); +void mt76x0_phy_set_txpower(struct mt76x02_dev *dev); /* MAC */ void mt76x0_mac_work(struct work_struct *work); -void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot, +void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot, int ht_mode); -void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb); -void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval); -void -mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac); -void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev); - -/* TX */ -void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, - struct sk_buff *skb); -int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u16 queue, const struct ieee80211_tx_queue_params *params); -void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb); -void mt76x0_tx_stat(struct work_struct *work); - -/* util */ -void mt76x0_remove_hdr_pad(struct sk_buff *skb); -int mt76x0_insert_hdr_pad(struct sk_buff *skb); - -int mt76x0_dma_init(struct mt76x0_dev *dev); -void mt76x0_dma_cleanup(struct mt76x0_dev *dev); - -int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb, - struct mt76_wcid *wcid, int hw_q); +void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb); +void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval); +void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c new file mode 100644 index 000000000000..87997cddf0d6 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "mt76x0.h" +#include "mcu.h" + +static int mt76x0e_start(struct ieee80211_hw *hw) +{ + struct mt76x02_dev *dev = hw->priv; + + mutex_lock(&dev->mt76.mutex); + + mt76x02_mac_start(dev); + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, + MT_CALIBRATE_INTERVAL); + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, + MT_CALIBRATE_INTERVAL); + set_bit(MT76_STATE_RUNNING, &dev->mt76.state); + + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static void mt76x0e_stop_hw(struct mt76x02_dev *dev) +{ + cancel_delayed_work_sync(&dev->cal_work); + cancel_delayed_work_sync(&dev->mac_work); + + if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY, + 0, 1000)) + dev_warn(dev->mt76.dev, "TX DMA did not stop\n"); + mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN); + + mt76x0_mac_stop(dev); + + if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_BUSY, + 0, 1000)) + dev_warn(dev->mt76.dev, "TX DMA did not stop\n"); + mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN); +} + +static void mt76x0e_stop(struct ieee80211_hw *hw) +{ + struct mt76x02_dev *dev = hw->priv; + + mutex_lock(&dev->mt76.mutex); + clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); + mt76x0e_stop_hw(dev); + mutex_unlock(&dev->mt76.mutex); +} + +static const struct ieee80211_ops mt76x0e_ops = { + .tx = mt76x02_tx, + .start = mt76x0e_start, + .stop = mt76x0e_stop, + .config = mt76x0_config, + .add_interface = mt76x02_add_interface, + .remove_interface = mt76x02_remove_interface, + .configure_filter = mt76x02_configure_filter, +}; + +static int mt76x0e_register_device(struct mt76x02_dev *dev) +{ + int err; + + mt76x0_chip_onoff(dev, true, false); + if (!mt76x02_wait_for_mac(&dev->mt76)) + return -ETIMEDOUT; + + mt76x02_dma_disable(dev); + err = mt76x0e_mcu_init(dev); + if (err < 0) + return err; + + err = mt76x02_dma_init(dev); + if (err < 0) + return err; + + err = mt76x0_init_hardware(dev); + if (err < 0) + return err; + + if (mt76_chip(&dev->mt76) == 0x7610) { + u16 val; + + mt76_clear(dev, MT_COEXCFG0, BIT(0)); + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0); + if (val & MT_EE_NIC_CONF_0_PA_IO_CURRENT) { + u32 data; + + /* set external external PA I/O + * current to 16mA + */ + data = mt76_rr(dev, 0x11c); + val |= 0xc03; + mt76_wr(dev, 0x11c, val); + } + } + + mt76_clear(dev, 0x110, BIT(9)); + mt76_set(dev, MT_MAX_LEN_CFG, BIT(13)); + + return 0; +} + +static int +mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct mt76x02_dev *dev; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + if (ret) + return ret; + + pci_set_master(pdev); + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + dev = mt76x0_alloc_device(&pdev->dev, NULL, &mt76x0e_ops); + if (!dev) + return -ENOMEM; + + mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); + + dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION); + dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev); + + ret = mt76x0e_register_device(dev); + if (ret < 0) + goto error; + + return 0; + +error: + ieee80211_free_hw(mt76_hw(dev)); + return ret; +} + +static void mt76x0e_cleanup(struct mt76x02_dev *dev) +{ + clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); + mt76x0_chip_onoff(dev, false, false); + mt76x0e_stop_hw(dev); + mt76x02_dma_cleanup(dev); + mt76x02_mcu_cleanup(&dev->mt76); +} + +static void +mt76x0e_remove(struct pci_dev *pdev) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + + mt76_unregister_device(mdev); + mt76x0e_cleanup(dev); + ieee80211_free_hw(mdev->hw); +} + +static const struct pci_device_id mt76x0e_device_table[] = { + { PCI_DEVICE(0x14c3, 0x7630) }, + { PCI_DEVICE(0x14c3, 0x7650) }, + { }, +}; + +MODULE_DEVICE_TABLE(pci, mt76x0e_device_table); +MODULE_LICENSE("Dual BSD/GPL"); + +static struct pci_driver mt76x0e_driver = { + .name = KBUILD_MODNAME, + .id_table = mt76x0e_device_table, + .probe = mt76x0e_probe, + .remove = mt76x0e_remove, +}; + +module_pci_driver(mt76x0e_driver); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c new file mode 100644 index 000000000000..6c66656c21f4 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include <linux/kernel.h> +#include <linux/firmware.h> + +#include "mt76x0.h" +#include "mcu.h" + +#define MT7610E_FIRMWARE "mediatek/mt7610e.bin" +#define MT7650E_FIRMWARE "mediatek/mt7650e.bin" + +#define MT_MCU_IVB_ADDR (MT_MCU_ILM_ADDR + 0x54000 - MT_MCU_IVB_SIZE) + +static int mt76x0e_load_firmware(struct mt76x02_dev *dev) +{ + bool is_combo_chip = mt76_chip(&dev->mt76) != 0x7610; + u32 val, ilm_len, dlm_len, offset = 0; + const struct mt76x02_fw_header *hdr; + const struct firmware *fw; + const char *firmware; + const u8 *fw_payload; + int len, err; + + if (is_combo_chip) + firmware = MT7650E_FIRMWARE; + else + firmware = MT7610E_FIRMWARE; + + err = request_firmware(&fw, firmware, dev->mt76.dev); + if (err) + return err; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) { + err = -EIO; + goto out; + } + + hdr = (const struct mt76x02_fw_header *)fw->data; + + len = sizeof(*hdr); + len += le32_to_cpu(hdr->ilm_len); + len += le32_to_cpu(hdr->dlm_len); + + if (fw->size != len) { + err = -EIO; + goto out; + } + + fw_payload = fw->data + sizeof(*hdr); + + val = le16_to_cpu(hdr->fw_ver); + dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n", + (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf); + + val = le16_to_cpu(hdr->fw_ver); + dev_dbg(dev->mt76.dev, + "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n", + (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf, + le16_to_cpu(hdr->build_ver), hdr->build_time); + + if (is_combo_chip && !mt76_poll(dev, MT_MCU_SEMAPHORE_00, 1, 1, 600)) { + dev_err(dev->mt76.dev, + "Could not get hardware semaphore for loading fw\n"); + err = -ETIMEDOUT; + goto out; + } + + /* upload ILM. */ + mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0); + ilm_len = le32_to_cpu(hdr->ilm_len); + if (is_combo_chip) { + ilm_len -= MT_MCU_IVB_SIZE; + offset = MT_MCU_IVB_SIZE; + } + dev_dbg(dev->mt76.dev, "loading FW - ILM %u\n", ilm_len); + mt76_wr_copy(dev, MT_MCU_ILM_ADDR + offset, fw_payload + offset, + ilm_len); + + /* upload IVB. */ + if (is_combo_chip) { + dev_dbg(dev->mt76.dev, "loading FW - IVB %u\n", + MT_MCU_IVB_SIZE); + mt76_wr_copy(dev, MT_MCU_IVB_ADDR, fw_payload, MT_MCU_IVB_SIZE); + } + + /* upload DLM. */ + mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET); + dlm_len = le32_to_cpu(hdr->dlm_len); + dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len); + mt76_wr_copy(dev, MT_MCU_ILM_ADDR, + fw_payload + le32_to_cpu(hdr->ilm_len), dlm_len); + + /* trigger firmware */ + mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0); + if (is_combo_chip) + mt76_wr(dev, MT_MCU_INT_LEVEL, 0x3); + else + mt76_wr(dev, MT_MCU_RESET_CTL, 0x300); + + if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 1000)) { + dev_err(dev->mt76.dev, "Firmware failed to start\n"); + err = -ETIMEDOUT; + goto out; + } + + dev_dbg(dev->mt76.dev, "Firmware running!\n"); + +out: + if (is_combo_chip) + mt76_wr(dev, MT_MCU_SEMAPHORE_00, 0x1); + release_firmware(fw); + + return err; +} + +int mt76x0e_mcu_init(struct mt76x02_dev *dev) +{ + static const struct mt76_mcu_ops mt76x0e_mcu_ops = { + .mcu_msg_alloc = mt76x02_mcu_msg_alloc, + .mcu_send_msg = mt76x02_mcu_msg_send, + }; + int err; + + dev->mt76.mcu_ops = &mt76x0e_mcu_ops; + + err = mt76x0e_load_firmware(dev); + if (err < 0) + return err; + + set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c index 5da7bfbe907f..4850a2db18d7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c @@ -21,11 +21,12 @@ #include "phy.h" #include "initvals.h" #include "initvals_phy.h" +#include "../mt76x02_phy.h" #include <linux/etherdevice.h> static int -mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value) +mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value) { int ret = 0; u8 bank, reg; @@ -39,7 +40,7 @@ mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value) if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8) return -EINVAL; - mutex_lock(&dev->reg_atomic_mutex); + mutex_lock(&dev->phy_mutex); if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) { ret = -ETIMEDOUT; @@ -54,7 +55,7 @@ mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value) MT_RF_CSR_CFG_KICK); trace_mt76x0_rf_write(&dev->mt76, bank, offset, value); out: - mutex_unlock(&dev->reg_atomic_mutex); + mutex_unlock(&dev->phy_mutex); if (ret < 0) dev_err(dev->mt76.dev, "Error: RF write %d:%d failed:%d!!\n", @@ -63,8 +64,7 @@ out: return ret; } -static int -mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset) +static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset) { int ret = -ETIMEDOUT; u32 val; @@ -79,7 +79,7 @@ mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset) if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8) return -EINVAL; - mutex_lock(&dev->reg_atomic_mutex); + mutex_lock(&dev->phy_mutex); if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) goto out; @@ -99,7 +99,7 @@ mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset) trace_mt76x0_rf_read(&dev->mt76, bank, offset, ret); } out: - mutex_unlock(&dev->reg_atomic_mutex); + mutex_unlock(&dev->phy_mutex); if (ret < 0) dev_err(dev->mt76.dev, "Error: RF read %d:%d failed:%d!!\n", @@ -109,7 +109,7 @@ out: } static int -rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val) +rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val) { if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) { struct mt76_reg_pair pair = { @@ -117,7 +117,7 @@ rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val) .value = val, }; - return mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1); + return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1); } else { WARN_ON_ONCE(1); return mt76x0_rf_csr_wr(dev, offset, val); @@ -125,7 +125,7 @@ rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val) } static int -rf_rr(struct mt76x0_dev *dev, u32 offset) +rf_rr(struct mt76x02_dev *dev, u32 offset) { int ret; u32 val; @@ -135,7 +135,7 @@ rf_rr(struct mt76x0_dev *dev, u32 offset) .reg = offset, }; - ret = mt76x0_read_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1); + ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1); val = pair.value; } else { WARN_ON_ONCE(1); @@ -146,7 +146,7 @@ rf_rr(struct mt76x0_dev *dev, u32 offset) } static int -rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val) +rf_rmw(struct mt76x02_dev *dev, u32 offset, u8 mask, u8 val) { int ret; @@ -162,23 +162,24 @@ rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val) } static int -rf_set(struct mt76x0_dev *dev, u32 offset, u8 val) +rf_set(struct mt76x02_dev *dev, u32 offset, u8 val) { return rf_rmw(dev, offset, 0, val); } #if 0 static int -rf_clear(struct mt76x0_dev *dev, u32 offset, u8 mask) +rf_clear(struct mt76x02_dev *dev, u32 offset, u8 mask) { return rf_rmw(dev, offset, mask, 0); } #endif -#define RF_RANDOM_WRITE(dev, tab) \ - mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab)); +#define RF_RANDOM_WRITE(dev, tab) \ + mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, \ + tab, ARRAY_SIZE(tab)) -int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev) +int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev) { int i = 20; u32 val; @@ -199,7 +200,7 @@ int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev) } static void -mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width, +mt76x0_bbp_set_ctrlch(struct mt76x02_dev *dev, enum nl80211_chan_width width, u8 ctrl) { int core_val, agc_val; @@ -225,25 +226,7 @@ mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width, mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl); } -int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi) -{ - s8 lna_gain, rssi_offset; - int val; - - if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) { - lna_gain = dev->ee->lna_gain_2ghz; - rssi_offset = dev->ee->rssi_offset_2ghz[0]; - } else { - lna_gain = dev->ee->lna_gain_5ghz[0]; - rssi_offset = dev->ee->rssi_offset_5ghz[0]; - } - - val = rxwi->rssi[0] + rssi_offset - lna_gain; - - return val; -} - -static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel) +static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel) { u8 val; @@ -300,14 +283,14 @@ static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel) } static void -mt76x0_mac_set_ctrlch(struct mt76x0_dev *dev, bool primary_upper) +mt76x0_mac_set_ctrlch(struct mt76x02_dev *dev, bool primary_upper) { mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M, primary_upper); } static void -mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band) +mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band) { switch (band) { case NL80211_BAND_2GHZ: @@ -339,16 +322,12 @@ mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band) } } -#define EXT_PA_2G_5G 0x0 -#define EXT_PA_5G_ONLY 0x1 -#define EXT_PA_2G_ONLY 0x2 -#define INT_PA_2G_5G 0x3 - static void -mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band) +mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band) { u16 rf_band = rf_bw_band & 0xff00; u16 rf_bw = rf_bw_band & 0x00ff; + enum nl80211_band band; u32 mac_reg; u8 rf_val; int i; @@ -495,11 +474,8 @@ mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band mac_reg &= ~0xC; /* Clear 0x518[3:2] */ mt76_wr(dev, MT_RF_MISC, mac_reg); - if (dev->ee->pa_type == INT_PA_2G_5G || - (dev->ee->pa_type == EXT_PA_5G_ONLY && (rf_band & RF_G_BAND)) || - (dev->ee->pa_type == EXT_PA_2G_ONLY && (rf_band & RF_A_BAND))) { - ; /* Internal PA - nothing to do. */ - } else { + band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; + if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { /* MT_RF_MISC (offset: 0x0518) [2]1'b1: enable external A band PA, 1'b0: disable external A band PA @@ -538,7 +514,7 @@ mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band } static void -mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band) +mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band) { int i; @@ -551,20 +527,10 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_ban if (pair->reg == MT_BBP(AGC, 8)) { u32 val = pair->value; - u8 gain = FIELD_GET(MT_BBP_AGC_GAIN, val); - - if (channel > 14) { - if (channel < 100) - gain -= dev->ee->lna_gain_5ghz[0]*2; - else if (channel < 137) - gain -= dev->ee->lna_gain_5ghz[1]*2; - else - gain -= dev->ee->lna_gain_5ghz[2]*2; - - } else { - gain -= dev->ee->lna_gain_2ghz*2; - } + u8 gain; + gain = FIELD_GET(MT_BBP_AGC_GAIN, val); + gain -= dev->cal.rx.lna_gain * 2; val &= ~MT_BBP_AGC_GAIN; val |= FIELD_PREP(MT_BBP_AGC_GAIN, gain); mt76_wr(dev, pair->reg, val); @@ -574,46 +540,27 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_ban } } -#if 0 -static void -mt76x0_extra_power_over_mac(struct mt76x0_dev *dev) +static void mt76x0_ant_select(struct mt76x02_dev *dev) { - u32 val; - - val = ((mt76_rr(dev, MT_TX_PWR_CFG_1) & 0x00003f00) >> 8); - val |= ((mt76_rr(dev, MT_TX_PWR_CFG_2) & 0x00003f00) << 8); - mt76_wr(dev, MT_TX_PWR_CFG_7, val); - - /* TODO: fix VHT */ - val = ((mt76_rr(dev, MT_TX_PWR_CFG_3) & 0x0000ff00) >> 8); - mt76_wr(dev, MT_TX_PWR_CFG_8, val); + struct ieee80211_channel *chan = dev->mt76.chandef.chan; - val = ((mt76_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8); - mt76_wr(dev, MT_TX_PWR_CFG_9, val); -} - -static void -mt76x0_phy_set_tx_power(struct mt76x0_dev *dev, u8 channel, u8 rf_bw_band) -{ - u32 val; - int i; - int bw = (rf_bw_band & RF_BW_20) ? 0 : 1; - - for (i = 0; i < 4; i++) { - if (channel <= 14) - val = dev->ee->tx_pwr_cfg_2g[i][bw]; - else - val = dev->ee->tx_pwr_cfg_5g[i][bw]; - - mt76_wr(dev, MT_TX_PWR_CFG_0 + 4*i, val); + /* single antenna mode */ + if (chan->band == NL80211_BAND_2GHZ) { + mt76_rmw(dev, MT_COEXCFG3, + BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1)); + mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6)); + } else { + mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(2), + BIT(4) | BIT(3)); + mt76_clear(dev, MT_WLAN_FUN_CTRL, + BIT(6) | BIT(5)); } - - mt76x0_extra_power_over_mac(dev); + mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12)); + mt76_clear(dev, MT_COEXCFG0, BIT(2)); } -#endif static void -mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width) +mt76x0_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width) { enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4}; int bw; @@ -640,39 +587,27 @@ mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width) return ; } - mt76x0_mcu_function_select(dev, BW_SETTING, bw); + mt76x02_mcu_function_select(&dev->mt76, BW_SETTING, bw, false); } -static void -mt76x0_phy_set_chan_pwr(struct mt76x0_dev *dev, u8 channel) +void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) { - static const int mt76x0_tx_pwr_ch_list[] = { - 1,2,3,4,5,6,7,8,9,10,11,12,13,14, - 36,38,40,44,46,48,52,54,56,60,62,64, - 100,102,104,108,110,112,116,118,120,124,126,128,132,134,136,140, - 149,151,153,157,159,161,165,167,169,171,173, - 42,58,106,122,155 - }; - int i; - u32 val; + struct mt76_rate_power *t = &dev->mt76.rate_power; + u8 info[2]; - for (i = 0; i < ARRAY_SIZE(mt76x0_tx_pwr_ch_list); i++) - if (mt76x0_tx_pwr_ch_list[i] == channel) - break; + mt76x0_get_power_info(dev, info); + mt76x0_get_tx_power_per_rate(dev); - if (WARN_ON(i == ARRAY_SIZE(mt76x0_tx_pwr_ch_list))) - return; + mt76x02_add_rate_power_offset(t, info[0]); + mt76x02_limit_rate_power(t, dev->mt76.txpower_conf); + dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); + mt76x02_add_rate_power_offset(t, -info[0]); - val = mt76_rr(dev, MT_TX_ALC_CFG_0); - val &= ~0x3f3f; - val |= dev->ee->tx_pwr_per_chan[i]; - val |= 0x2f2f << 16; - mt76_wr(dev, MT_TX_ALC_CFG_0, val); + mt76x02_phy_set_txpower(&dev->mt76, info[0], info[1]); } -static int -__mt76x0_phy_set_channel(struct mt76x0_dev *dev, - struct cfg80211_chan_def *chandef) +int mt76x0_phy_set_channel(struct mt76x02_dev *dev, + struct cfg80211_chan_def *chandef) { u32 ext_cca_chan[4] = { [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) | @@ -706,6 +641,7 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev, freq1 = chandef->center_freq1; channel = chandef->chan->hw_value; rf_bw_band = (channel <= 14) ? RF_G_BAND : RF_A_BAND; + dev->mt76.chandef = *chandef; switch (chandef->width) { case NL80211_CHAN_WIDTH_40: @@ -732,6 +668,7 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev, mt76x0_bbp_set_bw(dev, chandef->width); mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index); mt76x0_mac_set_ctrlch(dev, ch_group_index & 1); + mt76x0_ant_select(dev); mt76_rmw(dev, MT_EXT_CCA_CFG, (MT_EXT_CCA_CFG_CCA0 | @@ -743,6 +680,7 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev, mt76x0_phy_set_band(dev, chandef->chan->band); mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band); + mt76x0_read_rx_gain(dev); /* set Japan Tx filter at channel 14 */ val = mt76_rr(dev, MT_BBP(CORE, 1)); @@ -757,35 +695,22 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev, /* Vendor driver don't do it */ /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */ + mt76x0_vco_cal(dev, channel); if (scan) - mt76x0_vco_cal(dev, channel); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false); - mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1); - mt76x0_phy_set_chan_pwr(dev, channel); + mt76x0_phy_set_txpower(dev); - dev->mt76.chandef = *chandef; return 0; } -int mt76x0_phy_set_channel(struct mt76x0_dev *dev, - struct cfg80211_chan_def *chandef) -{ - int ret; - - mutex_lock(&dev->hw_atomic_mutex); - ret = __mt76x0_phy_set_channel(dev, chandef); - mutex_unlock(&dev->hw_atomic_mutex); - - return ret; -} - -void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev) +void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev) { u32 tx_alc, reg_val; u8 channel = dev->mt76.chandef.chan->hw_value; int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0; - mt76x0_mcu_calibrate(dev, MCU_CAL_R, 0); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, false); mt76x0_vco_cal(dev, channel); @@ -797,34 +722,36 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev) reg_val &= 0xffffff7e; mt76_wr(dev, 0x2124, reg_val); - mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 0, false); - mt76x0_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz); - mt76x0_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz); - mt76x0_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz); - mt76x0_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz); - mt76x0_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz); - mt76x0_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, is_5ghz, false); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LOFT, is_5ghz, false); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_GROUP_DELAY, + is_5ghz, false); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQ, is_5ghz, false); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RX_GROUP_DELAY, + is_5ghz, false); mt76_wr(dev, 0x2124, reg_val); mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc); msleep(100); - mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false); } -void mt76x0_agc_save(struct mt76x0_dev *dev) +void mt76x0_agc_save(struct mt76x02_dev *dev) { /* Only one RX path */ dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8))); } -void mt76x0_agc_restore(struct mt76x0_dev *dev) +void mt76x0_agc_restore(struct mt76x02_dev *dev) { mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save); } -static void mt76x0_temp_sensor(struct mt76x0_dev *dev) +static void mt76x0_temp_sensor(struct mt76x02_dev *dev) { u8 rf_b7_73, rf_b0_66, rf_b0_67; int cycle, temp; @@ -860,7 +787,7 @@ static void mt76x0_temp_sensor(struct mt76x0_dev *dev) else sval |= 0xffffff00; /* Negative */ - temp = (35 * (sval - dev->ee->temp_off))/ 10 + 25; + temp = (35 * (sval - dev->cal.rx.temp_offset)) / 10 + 25; done: rf_wr(dev, MT_RF(7, 73), rf_b7_73); @@ -868,14 +795,17 @@ done: rf_wr(dev, MT_RF(0, 73), rf_b0_67); } -static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev) +static void mt76x0_dynamic_vga_tuning(struct mt76x02_dev *dev) { + struct cfg80211_chan_def *chandef = &dev->mt76.chandef; u32 val, init_vga; + int avg_rssi; - init_vga = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 0x54 : 0x4E; - if (dev->avg_rssi > -60) + init_vga = chandef->chan->band == NL80211_BAND_5GHZ ? 0x54 : 0x4E; + avg_rssi = mt76x02_phy_get_min_avg_rssi(&dev->mt76); + if (avg_rssi > -60) init_vga -= 0x20; - else if (dev->avg_rssi > -70) + else if (avg_rssi > -70) init_vga -= 0x10; val = mt76_rr(dev, MT_BBP(AGC, 8)); @@ -886,8 +816,8 @@ static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev) static void mt76x0_phy_calibrate(struct work_struct *work) { - struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev, - cal_work.work); + struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, + cal_work.work); mt76x0_dynamic_vga_tuning(dev); mt76x0_temp_sensor(dev); @@ -896,45 +826,7 @@ static void mt76x0_phy_calibrate(struct work_struct *work) MT_CALIBRATE_INTERVAL); } -void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev, - struct ieee80211_bss_conf *info) -{ - /* Start/stop collecting beacon data */ - spin_lock_bh(&dev->con_mon_lock); - ether_addr_copy(dev->ap_bssid, info->bssid); - dev->avg_rssi = 0; - dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID; - spin_unlock_bh(&dev->con_mon_lock); -} - -static void -mt76x0_set_rx_chains(struct mt76x0_dev *dev) -{ - u32 val; - - val = mt76_rr(dev, MT_BBP(AGC, 0)); - val &= ~(BIT(3) | BIT(4)); - - if (dev->chainmask & BIT(1)) - val |= BIT(3); - - mt76_wr(dev, MT_BBP(AGC, 0), val); - - mb(); - val = mt76_rr(dev, MT_BBP(AGC, 0)); -} - -static void -mt76x0_set_tx_dac(struct mt76x0_dev *dev) -{ - if (dev->chainmask & BIT(1)) - mt76_set(dev, MT_BBP(TXBE, 5), 3); - else - mt76_clear(dev, MT_BBP(TXBE, 5), 3); -} - -static void -mt76x0_rf_init(struct mt76x0_dev *dev) +static void mt76x0_rf_init(struct mt76x02_dev *dev) { int i; u8 val; @@ -966,7 +858,8 @@ mt76x0_rf_init(struct mt76x0_dev *dev) E1: B0.R22<6:0>: xo_cxo<6:0> E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1> */ - rf_wr(dev, MT_RF(0, 22), min_t(u8, dev->ee->rf_freq_off, 0xBF)); + rf_wr(dev, MT_RF(0, 22), + min_t(u8, dev->cal.rx.freq_offset, 0xbf)); val = rf_rr(dev, MT_RF(0, 22)); /* @@ -986,23 +879,11 @@ mt76x0_rf_init(struct mt76x0_dev *dev) rf_set(dev, MT_RF(0, 4), 0x80); } -static void mt76x0_ant_select(struct mt76x0_dev *dev) -{ - /* Single antenna mode. */ - mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6)); - mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12)); - mt76_clear(dev, MT_COEXCFG0, BIT(2)); - mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1)); -} - -void mt76x0_phy_init(struct mt76x0_dev *dev) +void mt76x0_phy_init(struct mt76x02_dev *dev) { INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate); - mt76x0_ant_select(dev); - mt76x0_rf_init(dev); - - mt76x0_set_rx_chains(dev); - mt76x0_set_tx_dac(dev); + mt76x02_phy_set_rxpath(&dev->mt76); + mt76x02_phy_set_txdac(&dev->mt76); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h deleted file mode 100644 index 16bed4aaa242..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h +++ /dev/null @@ -1,651 +0,0 @@ -/* - * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> - * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __MT76_REGS_H -#define __MT76_REGS_H - -#include <linux/bitops.h> - -#define MT_ASIC_VERSION 0x0000 - -#define MT76XX_REV_E3 0x22 -#define MT76XX_REV_E4 0x33 - -#define MT_CMB_CTRL 0x0020 -#define MT_CMB_CTRL_XTAL_RDY BIT(22) -#define MT_CMB_CTRL_PLL_LD BIT(23) - -#define MT_EFUSE_CTRL 0x0024 -#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0) -#define MT_EFUSE_CTRL_MODE GENMASK(7, 6) -#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8) -#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14) -#define MT_EFUSE_CTRL_AIN GENMASK(25, 16) -#define MT_EFUSE_CTRL_KICK BIT(30) -#define MT_EFUSE_CTRL_SEL BIT(31) - -#define MT_EFUSE_DATA_BASE 0x0028 -#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2)) - -#define MT_COEXCFG0 0x0040 -#define MT_COEXCFG0_COEX_EN BIT(0) - -#define MT_COEXCFG3 0x004c - -#define MT_LDO_CTRL_0 0x006c -#define MT_LDO_CTRL_1 0x0070 - -#define MT_WLAN_FUN_CTRL 0x0080 -#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0) -#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1) -#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2) - -#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */ -#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */ - -#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4) -#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5) -#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6) -#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7) - -#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */ -#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */ - -#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */ -#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */ -#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */ - -#define MT_XO_CTRL0 0x0100 -#define MT_XO_CTRL1 0x0104 -#define MT_XO_CTRL2 0x0108 -#define MT_XO_CTRL3 0x010c -#define MT_XO_CTRL4 0x0110 - -#define MT_XO_CTRL5 0x0114 -#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8) - -#define MT_XO_CTRL6 0x0118 -#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8) - -#define MT_XO_CTRL7 0x011c - -#define MT_IOCFG_6 0x0124 -#define MT_WLAN_MTC_CTRL 0x10148 -#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0) -#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12) -#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13) -#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16) -#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20) -#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21) -#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22) -#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24) -#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25) -#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26) -#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27) -#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28) - -#define MT_INT_SOURCE_CSR 0x0200 -#define MT_INT_MASK_CSR 0x0204 - -#define MT_INT_RX_DONE(_n) BIT(_n) -#define MT_INT_RX_DONE_ALL GENMASK(1, 0) -#define MT_INT_TX_DONE_ALL GENMASK(13, 4) -#define MT_INT_TX_DONE(_n) BIT(_n + 4) -#define MT_INT_RX_COHERENT BIT(16) -#define MT_INT_TX_COHERENT BIT(17) -#define MT_INT_ANY_COHERENT BIT(18) -#define MT_INT_MCU_CMD BIT(19) -#define MT_INT_TBTT BIT(20) -#define MT_INT_PRE_TBTT BIT(21) -#define MT_INT_TX_STAT BIT(22) -#define MT_INT_AUTO_WAKEUP BIT(23) -#define MT_INT_GPTIMER BIT(24) -#define MT_INT_RXDELAYINT BIT(26) -#define MT_INT_TXDELAYINT BIT(27) - -#define MT_WPDMA_GLO_CFG 0x0208 -#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0) -#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1) -#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2) -#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3) -#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4) -#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6) -#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7) -#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8) -#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30) -#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31) - -#define MT_WPDMA_RST_IDX 0x020c - -#define MT_WPDMA_DELAY_INT_CFG 0x0210 - -#define MT_WMM_AIFSN 0x0214 -#define MT_WMM_AIFSN_MASK GENMASK(3, 0) -#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4) - -#define MT_WMM_CWMIN 0x0218 -#define MT_WMM_CWMIN_MASK GENMASK(3, 0) -#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4) - -#define MT_WMM_CWMAX 0x021c -#define MT_WMM_CWMAX_MASK GENMASK(3, 0) -#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4) - -#define MT_WMM_TXOP_BASE 0x0220 -#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2)) -#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16) -#define MT_WMM_TXOP_MASK GENMASK(15, 0) - -#define MT_WMM_CTRL 0x0230 /* MT76x0 */ - -#define MT_FCE_DMA_ADDR 0x0230 -#define MT_FCE_DMA_LEN 0x0234 - -#define MT_USB_DMA_CFG 0x238 -#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0) -#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8) -#define MT_USB_DMA_CFG_TX_WL_DROP BIT(16) -#define MT_USB_DMA_CFG_WAKEUP_EN BIT(17) -#define MT_USB_DMA_CFG_RX_DROP_OR_PADDING BIT(18) -#define MT_USB_DMA_CFG_TX_CLR BIT(19) -#define MT_USB_DMA_CFG_WL_LPK_EN BIT(20) -#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21) -#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22) -#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23) -#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 24) -#define MT_USB_DMA_CFG_RX_BUSY BIT(30) -#define MT_USB_DMA_CFG_TX_BUSY BIT(31) -#if 0 -#define MT_USB_DMA_CFG_TX_CLR BIT(19) -#define MT_USB_DMA_CFG_TXOP_HALT BIT(20) -#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21) -#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22) -#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23) -#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25) -#endif - -#define MT_TSO_CTRL 0x0250 -#define MT_HEADER_TRANS_CTRL_REG 0x0260 - -#define MT_US_CYC_CFG 0x02a4 -#define MT_US_CYC_CNT GENMASK(7, 0) - -#define MT_TX_RING_BASE 0x0300 -#define MT_RX_RING_BASE 0x03c0 -#define MT_RING_SIZE 0x10 - -#define MT_TX_HW_QUEUE_MCU 8 -#define MT_TX_HW_QUEUE_MGMT 9 - -#define MT_PBF_SYS_CTRL 0x0400 -#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0) -#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1) -#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2) -#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3) -#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4) - -#define MT_PBF_CFG 0x0404 -#define MT_PBF_CFG_TX0Q_EN BIT(0) -#define MT_PBF_CFG_TX1Q_EN BIT(1) -#define MT_PBF_CFG_TX2Q_EN BIT(2) -#define MT_PBF_CFG_TX3Q_EN BIT(3) -#define MT_PBF_CFG_RX0Q_EN BIT(4) -#define MT_PBF_CFG_RX_DROP_EN BIT(8) - -#define MT_PBF_TX_MAX_PCNT 0x0408 -#define MT_PBF_RX_MAX_PCNT 0x040c - -#define MT_BCN_OFFSET_BASE 0x041c -#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2)) - -#define MT_RXQ_STA 0x0430 -#define MT_TXQ_STA 0x0434 -#define MT_RF_CSR_CFG 0x0500 -#define MT_RF_CSR_CFG_DATA GENMASK(7, 0) -#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8) -#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14) -#define MT_RF_CSR_CFG_WR BIT(30) -#define MT_RF_CSR_CFG_KICK BIT(31) - -#define MT_RF_BYPASS_0 0x0504 -#define MT_RF_BYPASS_1 0x0508 -#define MT_RF_SETTING_0 0x050c - -#define MT_RF_MISC 0x0518 -#define MT_RF_DATA_WRITE 0x0524 - -#define MT_RF_CTRL 0x0528 -#define MT_RF_CTRL_ADDR GENMASK(11, 0) -#define MT_RF_CTRL_WRITE BIT(12) -#define MT_RF_CTRL_BUSY BIT(13) -#define MT_RF_CTRL_IDX BIT(16) - -#define MT_RF_DATA_READ 0x052c - -#define MT_COM_REG0 0x0730 -#define MT_COM_REG1 0x0734 -#define MT_COM_REG2 0x0738 -#define MT_COM_REG3 0x073C - -#define MT_FCE_PSE_CTRL 0x0800 -#define MT_FCE_PARAMETERS 0x0804 -#define MT_FCE_CSO 0x0808 - -#define MT_FCE_L2_STUFF 0x080c -#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0) -#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1) -#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2) -#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3) -#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4) -#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5) -#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8) -#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16) -#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24) - -#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824 - -#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0 -#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4 -#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8 - -#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4 - -#define MT_PAUSE_ENABLE_CONTROL1 0x0a38 - -#define MT_FCE_SKIP_FS 0x0a6c - -#define MT_MAC_CSR0 0x1000 -#define MT_MAC_SYS_CTRL 0x1004 -#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0) -#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1) -#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2) -#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3) - -#define MT_MAC_ADDR_DW0 0x1008 -#define MT_MAC_ADDR_DW1 0x100c -#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16) - -#define MT_MAC_BSSID_DW0 0x1010 -#define MT_MAC_BSSID_DW1 0x1014 -#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0) -#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16) -#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18) -#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21) -#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22) -#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23) -#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24) - -#define MT_MAX_LEN_CFG 0x1018 -#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12) - -#define MT_LED_CFG 0x102c - -#define MT_AMPDU_MAX_LEN_20M1S 0x1030 -#define MT_AMPDU_MAX_LEN_20M2S 0x1034 -#define MT_AMPDU_MAX_LEN_40M1S 0x1038 -#define MT_AMPDU_MAX_LEN_40M2S 0x103c -#define MT_AMPDU_MAX_LEN 0x1040 - -#define MT_WCID_DROP_BASE 0x106c -#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4) -#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32) - -#define MT_BCN_BYPASS_MASK 0x108c - -#define MT_MAC_APC_BSSID_BASE 0x1090 -#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8)) -#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4)) -#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0) -#define MT_MAC_APC_BSSID0_H_EN BIT(16) - -#define MT_XIFS_TIME_CFG 0x1100 -#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0) -#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8) -#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16) -#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20) -#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29) - -#define MT_BKOFF_SLOT_CFG 0x1104 -#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0) -#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8) - -#define MT_BEACON_TIME_CFG 0x1114 -#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0) -#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16) -#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17) -#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19) -#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20) -#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24) - -#define MT_TBTT_SYNC_CFG 0x1118 -#define MT_TBTT_TIMER_CFG 0x1124 - -#define MT_INT_TIMER_CFG 0x1128 -#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0) -#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16) - -#define MT_INT_TIMER_EN 0x112c -#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0) -#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1) - -#define MT_MAC_STATUS 0x1200 -#define MT_MAC_STATUS_TX BIT(0) -#define MT_MAC_STATUS_RX BIT(1) - -#define MT_PWR_PIN_CFG 0x1204 -#define MT_AUX_CLK_CFG 0x120c - -#define MT_BB_PA_MODE_CFG0 0x1214 -#define MT_BB_PA_MODE_CFG1 0x1218 -#define MT_RF_PA_MODE_CFG0 0x121c -#define MT_RF_PA_MODE_CFG1 0x1220 - -#define MT_RF_PA_MODE_ADJ0 0x1228 -#define MT_RF_PA_MODE_ADJ1 0x122c - -#define MT_DACCLK_EN_DLY_CFG 0x1264 - -#define MT_EDCA_CFG_BASE 0x1300 -#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2)) -#define MT_EDCA_CFG_TXOP GENMASK(7, 0) -#define MT_EDCA_CFG_AIFSN GENMASK(11, 8) -#define MT_EDCA_CFG_CWMIN GENMASK(15, 12) -#define MT_EDCA_CFG_CWMAX GENMASK(19, 16) - -#define MT_TX_PWR_CFG_0 0x1314 -#define MT_TX_PWR_CFG_1 0x1318 -#define MT_TX_PWR_CFG_2 0x131c -#define MT_TX_PWR_CFG_3 0x1320 -#define MT_TX_PWR_CFG_4 0x1324 - -#define MT_TX_BAND_CFG 0x132c -#define MT_TX_BAND_CFG_UPPER_40M BIT(0) -#define MT_TX_BAND_CFG_5G BIT(1) -#define MT_TX_BAND_CFG_2G BIT(2) - -#define MT_HT_FBK_TO_LEGACY 0x1384 -#define MT_TX_MPDU_ADJ_INT 0x1388 - -#define MT_TX_PWR_CFG_7 0x13d4 -#define MT_TX_PWR_CFG_8 0x13d8 -#define MT_TX_PWR_CFG_9 0x13dc - -#define MT_TX_SW_CFG0 0x1330 -#define MT_TX_SW_CFG1 0x1334 -#define MT_TX_SW_CFG2 0x1338 - -#define MT_TXOP_CTRL_CFG 0x1340 -#define MT_TXOP_TRUN_EN GENMASK(5, 0) -#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8) -#define MT_TXOP_CTRL - -#define MT_TX_RTS_CFG 0x1344 -#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0) -#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8) -#define MT_TX_RTS_FALLBACK BIT(24) - -#define MT_TX_TIMEOUT_CFG 0x1348 -#define MT_TX_RETRY_CFG 0x134c -#define MT_TX_LINK_CFG 0x1350 -#define MT_HT_FBK_CFG0 0x1354 -#define MT_HT_FBK_CFG1 0x1358 -#define MT_LG_FBK_CFG0 0x135c -#define MT_LG_FBK_CFG1 0x1360 - -#define MT_CCK_PROT_CFG 0x1364 -#define MT_OFDM_PROT_CFG 0x1368 -#define MT_MM20_PROT_CFG 0x136c -#define MT_MM40_PROT_CFG 0x1370 -#define MT_GF20_PROT_CFG 0x1374 -#define MT_GF40_PROT_CFG 0x1378 - -#define MT_PROT_RATE GENMASK(15, 0) -#define MT_PROT_CTRL_RTS_CTS BIT(16) -#define MT_PROT_CTRL_CTS2SELF BIT(17) -#define MT_PROT_NAV_SHORT BIT(18) -#define MT_PROT_NAV_LONG BIT(19) -#define MT_PROT_TXOP_ALLOW_CCK BIT(20) -#define MT_PROT_TXOP_ALLOW_OFDM BIT(21) -#define MT_PROT_TXOP_ALLOW_MM20 BIT(22) -#define MT_PROT_TXOP_ALLOW_MM40 BIT(23) -#define MT_PROT_TXOP_ALLOW_GF20 BIT(24) -#define MT_PROT_TXOP_ALLOW_GF40 BIT(25) -#define MT_PROT_RTS_THR_EN BIT(26) -#define MT_PROT_RATE_CCK_11 0x0003 -#define MT_PROT_RATE_OFDM_6 0x4000 -#define MT_PROT_RATE_OFDM_24 0x4004 -#define MT_PROT_RATE_DUP_OFDM_24 0x4084 -#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20) -#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \ - ~MT_PROT_TXOP_ALLOW_MM40 & \ - ~MT_PROT_TXOP_ALLOW_GF40) - -#define MT_EXP_ACK_TIME 0x1380 - -#define MT_TX_PWR_CFG_0_EXT 0x1390 -#define MT_TX_PWR_CFG_1_EXT 0x1394 - -#define MT_TX_FBK_LIMIT 0x1398 -#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0) -#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8) -#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16) -#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17) -#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18) - -#define MT_TX0_RF_GAIN_CORR 0x13a0 -#define MT_TX1_RF_GAIN_CORR 0x13a4 -#define MT_TX0_RF_GAIN_ATTEN 0x13a8 - -#define MT_TX_ALC_CFG_0 0x13b0 -#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0) -#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8) -#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16) -#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24) - -#define MT_TX_ALC_CFG_1 0x13b4 -#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0) - -#define MT_TX_ALC_CFG_2 0x13a8 -#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0) - -#define MT_TX0_BB_GAIN_ATTEN 0x13c0 - -#define MT_TX_ALC_VGA3 0x13c8 - -#define MT_TX_PROT_CFG6 0x13e0 -#define MT_TX_PROT_CFG7 0x13e4 -#define MT_TX_PROT_CFG8 0x13e8 - -#define MT_PIFS_TX_CFG 0x13ec - -#define MT_RX_FILTR_CFG 0x1400 - -#define MT_RX_FILTR_CFG_CRC_ERR BIT(0) -#define MT_RX_FILTR_CFG_PHY_ERR BIT(1) -#define MT_RX_FILTR_CFG_PROMISC BIT(2) -#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3) -#define MT_RX_FILTR_CFG_VER_ERR BIT(4) -#define MT_RX_FILTR_CFG_MCAST BIT(5) -#define MT_RX_FILTR_CFG_BCAST BIT(6) -#define MT_RX_FILTR_CFG_DUP BIT(7) -#define MT_RX_FILTR_CFG_CFACK BIT(8) -#define MT_RX_FILTR_CFG_CFEND BIT(9) -#define MT_RX_FILTR_CFG_ACK BIT(10) -#define MT_RX_FILTR_CFG_CTS BIT(11) -#define MT_RX_FILTR_CFG_RTS BIT(12) -#define MT_RX_FILTR_CFG_PSPOLL BIT(13) -#define MT_RX_FILTR_CFG_BA BIT(14) -#define MT_RX_FILTR_CFG_BAR BIT(15) -#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16) - -#define MT_AUTO_RSP_CFG 0x1404 - -#define MT_AUTO_RSP_PREAMB_SHORT BIT(4) - -#define MT_LEGACY_BASIC_RATE 0x1408 -#define MT_HT_BASIC_RATE 0x140c -#define MT_HT_CTRL_CFG 0x1410 -#define MT_RX_PARSER_CFG 0x1418 -#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0) - -#define MT_EXT_CCA_CFG 0x141c -#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0) -#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2) -#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4) -#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6) -#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8) -#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12) - -#define MT_TX_SW_CFG3 0x1478 - -#define MT_PN_PAD_MODE 0x150c - -#define MT_TXOP_HLDR_ET 0x1608 - -#define MT_PROT_AUTO_TX_CFG 0x1648 - -#define MT_RX_STA_CNT0 0x1700 -#define MT_RX_STA_CNT1 0x1704 -#define MT_RX_STA_CNT2 0x1708 -#define MT_TX_STA_CNT0 0x170c -#define MT_TX_STA_CNT1 0x1710 -#define MT_TX_STA_CNT2 0x1714 - -/* Vendor driver defines content of the second word of STAT_FIFO as follows: - * MT_TX_STAT_FIFO_RATE GENMASK(26, 16) - * MT_TX_STAT_FIFO_ETXBF BIT(27) - * MT_TX_STAT_FIFO_SND BIT(28) - * MT_TX_STAT_FIFO_ITXBF BIT(29) - * However, tests show that b16-31 have the same layout as TXWI rate_ctl - * with rate set to rate at which frame was acked. - */ -#define MT_TX_STAT_FIFO 0x1718 -#define MT_TX_STAT_FIFO_VALID BIT(0) -#define MT_TX_STAT_FIFO_SUCCESS BIT(5) -#define MT_TX_STAT_FIFO_AGGR BIT(6) -#define MT_TX_STAT_FIFO_ACKREQ BIT(7) -#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8) -#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16) - -#define MT_TX_AGG_STAT 0x171c - -#define MT_TX_AGG_CNT_BASE0 0x1720 - -#define MT_MPDU_DENSITY_CNT 0x1740 - -#define MT_TX_AGG_CNT_BASE1 0x174c - -#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \ - MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \ - MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2)) - -#define MT_TX_STAT_FIFO_EXT 0x1798 -#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0) -#define MT_TX_STAT_FIFO_EXT_PKTID GENMASK(15, 8) - -#define MT_BBP_CORE_BASE 0x2000 -#define MT_BBP_IBI_BASE 0x2100 -#define MT_BBP_AGC_BASE 0x2300 -#define MT_BBP_TXC_BASE 0x2400 -#define MT_BBP_RXC_BASE 0x2500 -#define MT_BBP_TXO_BASE 0x2600 -#define MT_BBP_TXBE_BASE 0x2700 -#define MT_BBP_RXFE_BASE 0x2800 -#define MT_BBP_RXO_BASE 0x2900 -#define MT_BBP_DFS_BASE 0x2a00 -#define MT_BBP_TR_BASE 0x2b00 -#define MT_BBP_CAL_BASE 0x2c00 -#define MT_BBP_DSC_BASE 0x2e00 -#define MT_BBP_PFMU_BASE 0x2f00 - -#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2)) - -#define MT_BBP_CORE_R1_BW GENMASK(4, 3) - -#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8) -#define MT_BBP_AGC_R0_BW GENMASK(14, 12) - -/* AGC, R4/R5 */ -#define MT_BBP_AGC_LNA_GAIN GENMASK(21, 16) - -/* AGC, R8/R9 */ -#define MT_BBP_AGC_GAIN GENMASK(14, 8) - -#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0) -#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8) - -#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0) - -#define MT_WCID_ADDR_BASE 0x1800 -#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8) - -#define MT_SRAM_BASE 0x4000 - -#define MT_WCID_KEY_BASE 0x8000 -#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32) - -#define MT_WCID_IV_BASE 0xa000 -#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8) - -#define MT_WCID_ATTR_BASE 0xa800 -#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4) - -#define MT_WCID_ATTR_PAIRWISE BIT(0) -#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1) -#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4) -#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7) -#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10) -#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11) -#define MT_WCID_ATTR_WAPI_MCBC BIT(15) -#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24) - -#define MT_SKEY_BASE_0 0xac00 -#define MT_SKEY_BASE_1 0xb400 -#define MT_SKEY_0(_bss, _idx) \ - (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32) -#define MT_SKEY_1(_bss, _idx) \ - (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32) -#define MT_SKEY(_bss, _idx) \ - ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx)) - -#define MT_SKEY_MODE_BASE_0 0xb000 -#define MT_SKEY_MODE_BASE_1 0xb3f0 -#define MT_SKEY_MODE_0(_bss) \ - (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2)) -#define MT_SKEY_MODE_1(_bss) \ - (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2)) -#define MT_SKEY_MODE(_bss) \ - ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss)) -#define MT_SKEY_MODE_MASK GENMASK(3, 0) -#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1))) - -#define MT_BEACON_BASE 0xc000 - -#define MT_TEMP_SENSOR 0x1d000 -#define MT_TEMP_SENSOR_VAL GENMASK(6, 0) - -enum mt76_cipher_type { - MT_CIPHER_NONE, - MT_CIPHER_WEP40, - MT_CIPHER_WEP104, - MT_CIPHER_TKIP, - MT_CIPHER_AES_CCMP, - MT_CIPHER_CKIP40, - MT_CIPHER_CKIP104, - MT_CIPHER_CKIP128, - MT_CIPHER_WAPI, -}; - -#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h index 8a752a09f2dc..75d1d6738c34 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h @@ -17,7 +17,6 @@ #include <linux/tracepoint.h> #include "mt76x0.h" -#include "mac.h" #undef TRACE_SYSTEM #define TRACE_SYSTEM mt76x0 @@ -178,11 +177,11 @@ DECLARE_EVENT_CLASS(dev_simple_evt, ); TRACE_EVENT(mt76x0_rx, - TP_PROTO(struct mt76_dev *dev, struct mt76x0_rxwi *rxwi, u32 f), + TP_PROTO(struct mt76_dev *dev, struct mt76x02_rxwi *rxwi, u32 f), TP_ARGS(dev, rxwi, f), TP_STRUCT__entry( DEV_ENTRY - __field_struct(struct mt76x0_rxwi, rxwi) + __field_struct(struct mt76x02_rxwi, rxwi) __field(u32, fce_info) ), TP_fast_assign( @@ -197,13 +196,13 @@ TRACE_EVENT(mt76x0_rx, TRACE_EVENT(mt76x0_tx, TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb, - struct mt76_sta *sta, struct mt76_txwi *h), + struct mt76x02_sta *sta, struct mt76x02_txwi *h), TP_ARGS(dev, skb, sta, h), TP_STRUCT__entry( DEV_ENTRY - __field_struct(struct mt76_txwi, h) + __field_struct(struct mt76x02_txwi, h) __field(struct sk_buff *, skb) - __field(struct mt76_sta *, sta) + __field(struct mt76x02_sta *, sta) ), TP_fast_assign( DEV_ASSIGN; @@ -211,11 +210,11 @@ TRACE_EVENT(mt76x0_tx, __entry->skb = skb; __entry->sta = sta; ), - TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate_ctl:%04hx " + TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate:%04hx " "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG, __entry->skb, __entry->sta, le16_to_cpu(__entry->h.flags), - le16_to_cpu(__entry->h.rate_ctl), + le16_to_cpu(__entry->h.rate), __entry->h.ack_ctl, __entry->h.wcid, le16_to_cpu(__entry->h.len_ctl)) ); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c deleted file mode 100644 index 751b49c28ae5..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "mt76x0.h" -#include "trace.h" - -/* Take mac80211 Q id from the skb and translate it to hardware Q id */ -static u8 skb2q(struct sk_buff *skb) -{ - int qid = skb_get_queue_mapping(skb); - - if (WARN_ON(qid >= MT_TXQ_PSD)) { - qid = MT_TXQ_BE; - skb_set_queue_mapping(skb, qid); - } - - return q2hwq(qid); -} - -static void mt76x0_tx_skb_remove_dma_overhead(struct sk_buff *skb, - struct ieee80211_tx_info *info) -{ - int pkt_len = (unsigned long)info->status.status_driver_data[0]; - - skb_pull(skb, sizeof(struct mt76_txwi) + 4); - if (ieee80211_get_hdrlen_from_skb(skb) % 4) - mt76x0_remove_hdr_pad(skb); - - skb_trim(skb, pkt_len); -} - -void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - mt76x0_tx_skb_remove_dma_overhead(skb, info); - - ieee80211_tx_info_clear_status(info); - info->status.rates[0].idx = -1; - info->flags |= IEEE80211_TX_STAT_ACK; - - spin_lock(&dev->mac_lock); - ieee80211_tx_status(dev->mt76.hw, skb); - spin_unlock(&dev->mac_lock); -} - -static int mt76x0_skb_rooms(struct mt76x0_dev *dev, struct sk_buff *skb) -{ - int hdr_len = ieee80211_get_hdrlen_from_skb(skb); - u32 need_head; - - need_head = sizeof(struct mt76_txwi) + 4; - if (hdr_len % 4) - need_head += 2; - - return skb_cow(skb, need_head); -} - -static struct mt76_txwi * -mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb, - struct ieee80211_sta *sta, struct mt76_wcid *wcid, - int pkt_len) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_tx_rate *rate = &info->control.rates[0]; - struct mt76_txwi *txwi; - unsigned long flags; - u16 txwi_flags = 0; - u32 pkt_id; - u16 rate_ctl; - u8 nss; - - txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi)); - memset(txwi, 0, sizeof(*txwi)); - - if (!wcid->tx_rate_set) - ieee80211_get_tx_rates(info->control.vif, sta, skb, - info->control.rates, 1); - - spin_lock_irqsave(&dev->mt76.lock, flags); - if (rate->idx < 0 || !rate->count) { - rate_ctl = wcid->tx_rate; - nss = wcid->tx_rate_nss; - } else { - rate_ctl = mt76x0_mac_tx_rate_val(dev, rate, &nss); - } - spin_unlock_irqrestore(&dev->mt76.lock, flags); - - txwi->rate_ctl = cpu_to_le16(rate_ctl); - - if (info->flags & IEEE80211_TX_CTL_LDPC) - txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_LDPC); - if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1) - txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_STBC); - if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC) - txwi_flags |= MT_TXWI_FLAGS_MMPS; - - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { - txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; - pkt_id = 1; - } else { - pkt_id = 0; - } - - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) - pkt_id |= MT_TXWI_PKTID_PROBE; - - if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) - txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; - - if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { - u8 ba_size = IEEE80211_MIN_AMPDU_BUF; - - ba_size <<= sta->ht_cap.ampdu_factor; - ba_size = min_t(int, 7, ba_size - 1); - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { - ba_size = 0; - } else { - txwi_flags |= MT_TXWI_FLAGS_AMPDU; - txwi_flags |= FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, - sta->ht_cap.ampdu_density); - } - txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); - } - - txwi->wcid = wcid->idx; - txwi->flags |= cpu_to_le16(txwi_flags); - txwi->len_ctl = cpu_to_le16(pkt_len); - txwi->pktid = pkt_id; - - return txwi; -} - -void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, - struct sk_buff *skb) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct mt76x0_dev *dev = hw->priv; - struct ieee80211_vif *vif = info->control.vif; - struct ieee80211_sta *sta = control->sta; - struct mt76_sta *msta = NULL; - struct mt76_wcid *wcid = dev->mon_wcid; - struct mt76_txwi *txwi; - int pkt_len = skb->len; - int hw_q = skb2q(skb); - - BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); - info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len; - - if (mt76x0_skb_rooms(dev, skb) || mt76x0_insert_hdr_pad(skb)) { - ieee80211_free_txskb(dev->mt76.hw, skb); - return; - } - - if (sta) { - msta = (struct mt76_sta *) sta->drv_priv; - wcid = &msta->wcid; - } else if (vif && (!info->control.hw_key && wcid->hw_key_idx != -1)) { - struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; - - wcid = &mvif->group_wcid; - } - - txwi = mt76x0_push_txwi(dev, skb, sta, wcid, pkt_len); - - if (mt76x0_dma_enqueue_tx(dev, skb, wcid, hw_q)) - return; - - trace_mt76x0_tx(&dev->mt76, skb, msta, txwi); -} - -void mt76x0_tx_stat(struct work_struct *work) -{ - struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev, - stat_work.work); - struct mt76_tx_status stat; - unsigned long flags; - int cleaned = 0; - u8 update = 1; - - while (!test_bit(MT76_REMOVED, &dev->mt76.state)) { - stat = mt76x0_mac_fetch_tx_status(dev); - if (!stat.valid) - break; - - mt76x0_send_tx_status(dev, &stat, &update); - - cleaned++; - } - trace_mt76x0_tx_status_cleaned(&dev->mt76, cleaned); - - spin_lock_irqsave(&dev->tx_lock, flags); - if (cleaned) - queue_delayed_work(dev->stat_wq, &dev->stat_work, - msecs_to_jiffies(10)); - else if (test_and_clear_bit(MT76_MORE_STATS, &dev->mt76.state)) - queue_delayed_work(dev->stat_wq, &dev->stat_work, - msecs_to_jiffies(20)); - else - clear_bit(MT76_READING_STATS, &dev->mt76.state); - spin_unlock_irqrestore(&dev->tx_lock, flags); -} - -int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u16 queue, const struct ieee80211_tx_queue_params *params) -{ - struct mt76x0_dev *dev = hw->priv; - u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue); - u32 val; - - /* TODO: should we do funny things with the parameters? - * See what mt76x0_set_default_edca() used to do in init.c. - */ - - if (params->cw_min) - cw_min = fls(params->cw_min); - if (params->cw_max) - cw_max = fls(params->cw_max); - - WARN_ON(params->txop > 0xff); - WARN_ON(params->aifs > 0xf); - WARN_ON(cw_min > 0xf); - WARN_ON(cw_max > 0xf); - - val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) | - FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) | - FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max); - /* TODO: based on user-controlled EnableTxBurst var vendor drv sets - * a really long txop on AC0 (see connect.c:2009) but only on - * connect? When not connected should be 0. - */ - if (!hw_q) - val |= 0x60; - else - val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop); - mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val); - - val = mt76_rr(dev, MT_WMM_TXOP(hw_q)); - val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q)); - val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q); - mt76_wr(dev, MT_WMM_TXOP(hw_q), val); - - val = mt76_rr(dev, MT_WMM_AIFSN); - val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q)); - val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q); - mt76_wr(dev, MT_WMM_AIFSN, val); - - val = mt76_rr(dev, MT_WMM_CWMIN); - val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q)); - val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q); - mt76_wr(dev, MT_WMM_CWMIN, val); - - val = mt76_rr(dev, MT_WMM_CWMAX); - val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q)); - val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q); - mt76_wr(dev, MT_WMM_CWMAX, val); - - return 0; -} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 54ae1f113be2..a7fd36c2f633 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -16,8 +16,9 @@ #include <linux/usb.h> #include "mt76x0.h" -#include "usb.h" +#include "mcu.h" #include "trace.h" +#include "../mt76x02_usb.h" static struct usb_device_id mt76x0_device_table[] = { { USB_DEVICE(0x148F, 0x7610) }, /* MT7610U */ @@ -40,256 +41,215 @@ static struct usb_device_id mt76x0_device_table[] = { { USB_DEVICE(0x20f4, 0x806b) }, /* TRENDnet TEW-806UBH */ { USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */ { USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac Stick */ - { USB_DEVICE(0x2357, 0x0105) }, /* TP-LINK Archer T1U */ + { USB_DEVICE(0x2357, 0x0105), + .driver_info = 1, }, /* TP-LINK Archer T1U */ { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7630, 0xff, 0x2, 0xff)}, /* MT7630U */ { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7650, 0xff, 0x2, 0xff)}, /* MT7650U */ { 0, } }; -bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len, - struct mt76x0_dma_buf *buf) +static void mt76x0_init_usb_dma(struct mt76x02_dev *dev) { - struct usb_device *usb_dev = mt76x0_to_usb_dev(dev); + u32 val; - buf->len = len; - buf->urb = usb_alloc_urb(0, GFP_KERNEL); - buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma); + val = mt76_rr(dev, MT_USB_DMA_CFG); - return !buf->urb || !buf->buf; -} - -void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf) -{ - struct usb_device *usb_dev = mt76x0_to_usb_dev(dev); - - usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma); - usb_free_urb(buf->urb); -} - -int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx, - struct mt76x0_dma_buf *buf, gfp_t gfp, - usb_complete_t complete_fn, void *context) -{ - struct usb_device *usb_dev = mt76x0_to_usb_dev(dev); - unsigned pipe; - int ret; - - if (dir == USB_DIR_IN) - pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[ep_idx]); - else - pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep_idx]); - - usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len, - complete_fn, context); - buf->urb->transfer_dma = buf->dma; - buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - - trace_mt76x0_submit_urb(&dev->mt76, buf->urb); - ret = usb_submit_urb(buf->urb, gfp); - if (ret) - dev_err(dev->mt76.dev, "Error: submit URB dir:%d ep:%d failed:%d\n", - dir, ep_idx, ret); - return ret; -} - -void mt76x0_complete_urb(struct urb *urb) -{ - struct completion *cmpl = urb->context; + val |= MT_USB_DMA_CFG_RX_BULK_EN | + MT_USB_DMA_CFG_TX_BULK_EN; - complete(cmpl); -} + /* disable AGGR_BULK_RX in order to receive one + * frame in each rx urb and avoid copies + */ + val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN; + mt76_wr(dev, MT_USB_DMA_CFG, val); -int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req, - const u8 direction, const u16 val, const u16 offset, - void *buf, const size_t buflen) -{ - int i, ret; - struct usb_device *usb_dev = mt76x0_to_usb_dev(dev); - const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE; - const unsigned int pipe = (direction == USB_DIR_IN) ? - usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); - - for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) { - ret = usb_control_msg(usb_dev, pipe, req, req_type, - val, offset, buf, buflen, - MT_VEND_REQ_TOUT_MS); - trace_mt76x0_vend_req(&dev->mt76, pipe, req, req_type, val, offset, - buf, buflen, ret); - - if (ret == -ENODEV) - set_bit(MT76_REMOVED, &dev->mt76.state); - if (ret >= 0 || ret == -ENODEV) - return ret; - - msleep(5); - } + val = mt76_rr(dev, MT_COM_REG0); + if (val & 1) + dev_dbg(dev->mt76.dev, "MCU not ready\n"); - dev_err(dev->mt76.dev, "Vendor request req:%02x off:%04x failed:%d\n", - req, offset, ret); + val = mt76_rr(dev, MT_USB_DMA_CFG); - return ret; + val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD; + mt76_wr(dev, MT_USB_DMA_CFG, val); + val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PAD; + mt76_wr(dev, MT_USB_DMA_CFG, val); } -void mt76x0_vendor_reset(struct mt76x0_dev *dev) +static void mt76x0u_cleanup(struct mt76x02_dev *dev) { - mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT, - MT_VEND_DEV_MODE_RESET, 0, NULL, 0); + clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); + mt76x0_chip_onoff(dev, false, false); + mt76u_queues_deinit(&dev->mt76); + mt76u_mcu_deinit(&dev->mt76); } -static u32 mt76x0_rr(struct mt76_dev *dev, u32 offset) +static void mt76x0u_mac_stop(struct mt76x02_dev *dev) { - struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev; - int ret; - u32 val = ~0; + clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); + cancel_delayed_work_sync(&dev->cal_work); + cancel_delayed_work_sync(&dev->mac_work); + mt76u_stop_stat_wk(&dev->mt76); - WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset); + if (test_bit(MT76_REMOVED, &dev->mt76.state)) + return; - mutex_lock(&mdev->usb_ctrl_mtx); + mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN | + MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN | + MT_BEACON_TIME_CFG_BEACON_TX); - ret = mt76x0_vendor_request((struct mt76x0_dev *)dev, MT_VEND_MULTI_READ, USB_DIR_IN, - 0, offset, mdev->data, MT_VEND_BUF); - if (ret == MT_VEND_BUF) - val = get_unaligned_le32(mdev->data); - else if (ret > 0) - dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n", - ret, offset); + if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000)) + dev_warn(dev->mt76.dev, "TX DMA did not stop\n"); - mutex_unlock(&mdev->usb_ctrl_mtx); + mt76x0_mac_stop(dev); - trace_mt76x0_reg_read(dev, offset, val); - return val; + if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000)) + dev_warn(dev->mt76.dev, "RX DMA did not stop\n"); } -int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req, - const u16 offset, const u32 val) +static int mt76x0u_start(struct ieee80211_hw *hw) { - struct mt76x0_dev *mdev = dev; + struct mt76x02_dev *dev = hw->priv; int ret; - mutex_lock(&mdev->usb_ctrl_mtx); + mutex_lock(&dev->mt76.mutex); - ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT, - val & 0xffff, offset, NULL, 0); - if (!ret) - ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT, - val >> 16, offset + 2, NULL, 0); + ret = mt76x0_mac_start(dev); + if (ret) + goto out; - mutex_unlock(&mdev->usb_ctrl_mtx); + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, + MT_CALIBRATE_INTERVAL); + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, + MT_CALIBRATE_INTERVAL); + set_bit(MT76_STATE_RUNNING, &dev->mt76.state); +out: + mutex_unlock(&dev->mt76.mutex); return ret; } -static void mt76x0_wr(struct mt76_dev *dev, u32 offset, u32 val) +static void mt76x0u_stop(struct ieee80211_hw *hw) { - struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev; - int ret; + struct mt76x02_dev *dev = hw->priv; - WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset); - - mutex_lock(&mdev->usb_ctrl_mtx); - - put_unaligned_le32(val, mdev->data); - ret = mt76x0_vendor_request(mdev, MT_VEND_MULTI_WRITE, USB_DIR_OUT, - 0, offset, mdev->data, MT_VEND_BUF); - trace_mt76x0_reg_write(dev, offset, val); - - mutex_unlock(&mdev->usb_ctrl_mtx); + mutex_lock(&dev->mt76.mutex); + mt76x0u_mac_stop(dev); + mutex_unlock(&dev->mt76.mutex); } -static u32 mt76x0_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val) -{ - val |= mt76x0_rr(dev, offset) & ~mask; - mt76x0_wr(dev, offset, val); - return val; -} +static const struct ieee80211_ops mt76x0u_ops = { + .tx = mt76x02_tx, + .start = mt76x0u_start, + .stop = mt76x0u_stop, + .add_interface = mt76x02_add_interface, + .remove_interface = mt76x02_remove_interface, + .config = mt76x0_config, + .configure_filter = mt76x02_configure_filter, + .bss_info_changed = mt76x0_bss_info_changed, + .sta_add = mt76x02_sta_add, + .sta_remove = mt76x02_sta_remove, + .set_key = mt76x02_set_key, + .conf_tx = mt76x02_conf_tx, + .sw_scan_start = mt76x0_sw_scan, + .sw_scan_complete = mt76x0_sw_scan_complete, + .ampdu_action = mt76x02_ampdu_action, + .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, + .set_rts_threshold = mt76x0_set_rts_threshold, + .wake_tx_queue = mt76_wake_tx_queue, +}; -static void mt76x0_wr_copy(struct mt76_dev *dev, u32 offset, - const void *data, int len) +static int mt76x0u_register_device(struct mt76x02_dev *dev) { - WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset); - WARN_ONCE(len & 3, "short write copy off:%08x", offset); + struct ieee80211_hw *hw = dev->mt76.hw; + int err; - mt76x0_burst_write_regs((struct mt76x0_dev *) dev, offset, data, len / 4); -} + err = mt76u_alloc_queues(&dev->mt76); + if (err < 0) + goto out_err; -void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr) -{ - mt76_wr(dev, offset, get_unaligned_le32(addr)); - mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8); -} + err = mt76u_mcu_init_rx(&dev->mt76); + if (err < 0) + goto out_err; -static int mt76x0_assign_pipes(struct usb_interface *usb_intf, - struct mt76x0_dev *dev) -{ - struct usb_endpoint_descriptor *ep_desc; - struct usb_host_interface *intf_desc = usb_intf->cur_altsetting; - unsigned i, ep_i = 0, ep_o = 0; - - BUILD_BUG_ON(sizeof(dev->in_ep) < __MT_EP_IN_MAX); - BUILD_BUG_ON(sizeof(dev->out_ep) < __MT_EP_OUT_MAX); - - for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { - ep_desc = &intf_desc->endpoint[i].desc; - - if (usb_endpoint_is_bulk_in(ep_desc) && - ep_i++ < __MT_EP_IN_MAX) { - dev->in_ep[ep_i - 1] = usb_endpoint_num(ep_desc); - dev->in_max_packet = usb_endpoint_maxp(ep_desc); - /* Note: this is ignored by usb sub-system but vendor - * code does it. We can drop this at some point. - */ - dev->in_ep[ep_i - 1] |= USB_DIR_IN; - } else if (usb_endpoint_is_bulk_out(ep_desc) && - ep_o++ < __MT_EP_OUT_MAX) { - dev->out_ep[ep_o - 1] = usb_endpoint_num(ep_desc); - dev->out_max_packet = usb_endpoint_maxp(ep_desc); - } + mt76x0_chip_onoff(dev, true, true); + if (!mt76x02_wait_for_mac(&dev->mt76)) { + err = -ETIMEDOUT; + goto out_err; } - if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) { - dev_err(dev->mt76.dev, "Error: wrong pipe number in:%d out:%d\n", - ep_i, ep_o); - return -EINVAL; - } + err = mt76x0u_mcu_init(dev); + if (err < 0) + goto out_err; + + mt76x0_init_usb_dma(dev); + err = mt76x0_init_hardware(dev); + if (err < 0) + goto out_err; + + mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); + mt76_wr(dev, MT_TXOP_CTRL_CFG, + FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | + FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); + + err = mt76x0_register_device(dev); + if (err < 0) + goto out_err; + + /* check hw sg support in order to enable AMSDU */ + if (mt76u_check_sg(&dev->mt76)) + hw->max_tx_fragments = MT_SG_MAX_SIZE; + else + hw->max_tx_fragments = 1; + + set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); return 0; + +out_err: + mt76x0u_cleanup(dev); + return err; } -static int mt76x0_probe(struct usb_interface *usb_intf, +static int mt76x0u_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { + static const struct mt76_driver_ops drv_ops = { + .tx_prepare_skb = mt76x02u_tx_prepare_skb, + .tx_complete_skb = mt76x02u_tx_complete_skb, + .tx_status_data = mt76x02_tx_status_data, + .rx_skb = mt76x02_queue_rx_skb, + }; struct usb_device *usb_dev = interface_to_usbdev(usb_intf); - struct mt76x0_dev *dev; + struct mt76x02_dev *dev; u32 asic_rev, mac_rev; int ret; - static const struct mt76_bus_ops usb_ops = { - .rr = mt76x0_rr, - .wr = mt76x0_wr, - .rmw = mt76x0_rmw, - .copy = mt76x0_wr_copy, - }; - dev = mt76x0_alloc_device(&usb_intf->dev); + dev = mt76x0_alloc_device(&usb_intf->dev, &drv_ops, + &mt76x0u_ops); if (!dev) return -ENOMEM; + /* Quirk for Archer T1U */ + if (id->driver_info) + dev->no_2ghz = true; + usb_dev = usb_get_dev(usb_dev); usb_reset_device(usb_dev); usb_set_intfdata(usb_intf, dev); - dev->mt76.bus = &usb_ops; - - ret = mt76x0_assign_pipes(usb_intf, dev); + mt76x02u_init_mcu(&dev->mt76); + ret = mt76u_init(&dev->mt76, usb_intf); if (ret) goto err; /* Disable the HW, otherwise MCU fail to initalize on hot reboot */ mt76x0_chip_onoff(dev, false, false); - ret = mt76x0_wait_asic_ready(dev); - if (ret) + if (!mt76x02_wait_for_mac(&dev->mt76)) { + ret = -ETIMEDOUT; goto err; + } asic_rev = mt76_rr(dev, MT_ASIC_VERSION); mac_rev = mt76_rr(dev, MT_MAC_CSR0); @@ -300,77 +260,89 @@ static int mt76x0_probe(struct usb_interface *usb_intf, if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n"); - ret = mt76x0_init_hardware(dev); - if (ret) + ret = mt76x0u_register_device(dev); + if (ret < 0) goto err; - ret = mt76x0_register_device(dev); - if (ret) - goto err_hw; - - set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); - return 0; -err_hw: - mt76x0_cleanup(dev); + err: usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); - destroy_workqueue(dev->stat_wq); ieee80211_free_hw(dev->mt76.hw); return ret; } static void mt76x0_disconnect(struct usb_interface *usb_intf) { - struct mt76x0_dev *dev = usb_get_intfdata(usb_intf); + struct mt76x02_dev *dev = usb_get_intfdata(usb_intf); bool initalized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); if (!initalized) return; ieee80211_unregister_hw(dev->mt76.hw); - mt76x0_cleanup(dev); + mt76x0u_cleanup(dev); usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); - destroy_workqueue(dev->stat_wq); ieee80211_free_hw(dev->mt76.hw); } -static int mt76x0_suspend(struct usb_interface *usb_intf, pm_message_t state) +static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf, + pm_message_t state) { - struct mt76x0_dev *dev = usb_get_intfdata(usb_intf); + struct mt76x02_dev *dev = usb_get_intfdata(usb_intf); + struct mt76_usb *usb = &dev->mt76.usb; - mt76x0_cleanup(dev); + mt76u_stop_queues(&dev->mt76); + mt76x0u_mac_stop(dev); + usb_kill_urb(usb->mcu.res.urb); return 0; } -static int mt76x0_resume(struct usb_interface *usb_intf) +static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf) { - struct mt76x0_dev *dev = usb_get_intfdata(usb_intf); + struct mt76x02_dev *dev = usb_get_intfdata(usb_intf); + struct mt76_usb *usb = &dev->mt76.usb; int ret; + reinit_completion(&usb->mcu.cmpl); + ret = mt76u_submit_buf(&dev->mt76, USB_DIR_IN, + MT_EP_IN_CMD_RESP, + &usb->mcu.res, GFP_KERNEL, + mt76u_mcu_complete_urb, + &usb->mcu.cmpl); + if (ret < 0) + goto err; + + ret = mt76u_submit_rx_buffers(&dev->mt76); + if (ret < 0) + goto err; + + tasklet_enable(&usb->rx_tasklet); + tasklet_enable(&usb->tx_tasklet); + ret = mt76x0_init_hardware(dev); if (ret) - return ret; - - set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); + goto err; return 0; +err: + mt76x0u_cleanup(dev); + return ret; } MODULE_DEVICE_TABLE(usb, mt76x0_device_table); -MODULE_FIRMWARE(MT7610_FIRMWARE); MODULE_LICENSE("GPL"); static struct usb_driver mt76x0_driver = { .name = KBUILD_MODNAME, .id_table = mt76x0_device_table, - .probe = mt76x0_probe, + .probe = mt76x0u_probe, .disconnect = mt76x0_disconnect, .suspend = mt76x0_suspend, .resume = mt76x0_resume, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h deleted file mode 100644 index 492e431390a8..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __MT76X0U_USB_H -#define __MT76X0U_USB_H - -#include "mt76x0.h" - -#define MT7610_FIRMWARE "mediatek/mt7610u.bin" - -#define MT_VEND_REQ_MAX_RETRY 10 -#define MT_VEND_REQ_TOUT_MS 300 - -#define MT_VEND_DEV_MODE_RESET 1 - -#define MT_VEND_BUF sizeof(__le32) - -static inline struct usb_device *mt76x0_to_usb_dev(struct mt76x0_dev *mt76x0) -{ - return interface_to_usbdev(to_usb_interface(mt76x0->mt76.dev)); -} - -static inline struct usb_device *mt76_to_usb_dev(struct mt76_dev *mt76) -{ - return interface_to_usbdev(to_usb_interface(mt76->dev)); -} - -static inline bool mt76x0_urb_has_error(struct urb *urb) -{ - return urb->status && - urb->status != -ENOENT && - urb->status != -ECONNRESET && - urb->status != -ESHUTDOWN; -} - -bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len, - struct mt76x0_dma_buf *buf); -void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf); -int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx, - struct mt76x0_dma_buf *buf, gfp_t gfp, - usb_complete_t complete_fn, void *context); -void mt76x0_complete_urb(struct urb *urb); - -int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req, - const u8 direction, const u16 val, const u16 offset, - void *buf, const size_t buflen); -void mt76x0_vendor_reset(struct mt76x0_dev *dev); -int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req, - const u16 offset, const u32 val); - -#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c new file mode 100644 index 000000000000..fb6fa1fa5548 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include <linux/kernel.h> +#include <linux/firmware.h> + +#include "mt76x0.h" +#include "mcu.h" +#include "../mt76x02_usb.h" + +#define MCU_FW_URB_MAX_PAYLOAD 0x38f8 +#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12) +#define MT7610U_FIRMWARE "mediatek/mt7610u.bin" + +static int +mt76x0u_upload_firmware(struct mt76x02_dev *dev, + const struct mt76x02_fw_header *hdr) +{ + u8 *fw_payload = (u8 *)(hdr + 1); + u32 ilm_len, dlm_len; + void *ivb; + int err; + + ivb = kmemdup(fw_payload, MT_MCU_IVB_SIZE, GFP_KERNEL); + if (!ivb) + return -ENOMEM; + + ilm_len = le32_to_cpu(hdr->ilm_len) - MT_MCU_IVB_SIZE; + dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %u\n", + ilm_len, MT_MCU_IVB_SIZE); + err = mt76x02u_mcu_fw_send_data(&dev->mt76, + fw_payload + MT_MCU_IVB_SIZE, + ilm_len, MCU_FW_URB_MAX_PAYLOAD, + MT_MCU_IVB_SIZE); + if (err) + goto out; + + dlm_len = le32_to_cpu(hdr->dlm_len); + dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len); + err = mt76x02u_mcu_fw_send_data(&dev->mt76, + fw_payload + le32_to_cpu(hdr->ilm_len), + dlm_len, MCU_FW_URB_MAX_PAYLOAD, + MT_MCU_DLM_OFFSET); + if (err) + goto out; + + err = mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE, + USB_DIR_OUT | USB_TYPE_VENDOR, + 0x12, 0, ivb, MT_MCU_IVB_SIZE); + if (err < 0) + goto out; + + if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 1000)) { + dev_err(dev->mt76.dev, "Firmware failed to start\n"); + err = -ETIMEDOUT; + goto out; + } + + dev_dbg(dev->mt76.dev, "Firmware running!\n"); + +out: + kfree(ivb); + + return err; +} + +static int mt76x0u_load_firmware(struct mt76x02_dev *dev) +{ + const struct firmware *fw; + const struct mt76x02_fw_header *hdr; + int len, ret; + u32 val; + + mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN | + MT_USB_DMA_CFG_TX_BULK_EN)); + + if (mt76x0_firmware_running(dev)) + return 0; + + ret = request_firmware(&fw, MT7610U_FIRMWARE, dev->mt76.dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) + goto err_inv_fw; + + hdr = (const struct mt76x02_fw_header *)fw->data; + + if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE) + goto err_inv_fw; + + len = sizeof(*hdr); + len += le32_to_cpu(hdr->ilm_len); + len += le32_to_cpu(hdr->dlm_len); + + if (fw->size != len) + goto err_inv_fw; + + val = le16_to_cpu(hdr->fw_ver); + dev_dbg(dev->mt76.dev, + "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n", + (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf, + le16_to_cpu(hdr->build_ver), hdr->build_time); + + len = le32_to_cpu(hdr->ilm_len); + + mt76_wr(dev, 0x1004, 0x2c); + + mt76_set(dev, MT_USB_DMA_CFG, + (MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN) | + FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20)); + mt76x02u_mcu_fw_reset(&dev->mt76); + usleep_range(5000, 6000); +/* + mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN | + MT_PBF_CFG_TX1Q_EN | + MT_PBF_CFG_TX2Q_EN | + MT_PBF_CFG_TX3Q_EN)); +*/ + + mt76_wr(dev, MT_FCE_PSE_CTRL, 1); + + /* FCE tx_fs_base_ptr */ + mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230); + /* FCE tx_fs_max_cnt */ + mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1); + /* FCE pdma enable */ + mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44); + /* FCE skip_fs_en */ + mt76_wr(dev, MT_FCE_SKIP_FS, 3); + + val = mt76_rr(dev, MT_USB_DMA_CFG); + val |= MT_USB_DMA_CFG_UDMA_TX_WL_DROP; + mt76_wr(dev, MT_USB_DMA_CFG, val); + val &= ~MT_USB_DMA_CFG_UDMA_TX_WL_DROP; + mt76_wr(dev, MT_USB_DMA_CFG, val); + + ret = mt76x0u_upload_firmware(dev, hdr); + release_firmware(fw); + + mt76_wr(dev, MT_FCE_PSE_CTRL, 1); + + return ret; + +err_inv_fw: + dev_err(dev->mt76.dev, "Invalid firmware image\n"); + release_firmware(fw); + return -ENOENT; +} + +int mt76x0u_mcu_init(struct mt76x02_dev *dev) +{ + int ret; + + ret = mt76x0u_load_firmware(dev); + if (ret < 0) + return ret; + + set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state); + + return 0; +} + +MODULE_FIRMWARE(MT7610U_FIRMWARE); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/util.c b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c deleted file mode 100644 index 7856dd760419..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/util.c +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "mt76x0.h" - -void mt76x0_remove_hdr_pad(struct sk_buff *skb) -{ - int len = ieee80211_get_hdrlen_from_skb(skb); - - memmove(skb->data + 2, skb->data, len); - skb_pull(skb, 2); -} - -int mt76x0_insert_hdr_pad(struct sk_buff *skb) -{ - int len = ieee80211_get_hdrlen_from_skb(skb); - int ret; - - if (len % 4 == 0) - return 0; - - ret = skb_cow(skb, 2); - if (ret) - return ret; - - skb_push(skb, 2); - memmove(skb->data, skb->data + 2, len); - - skb->data[len] = 0; - skb->data[len + 1] = 0; - return 0; -} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h new file mode 100644 index 000000000000..65174817ebc4 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h @@ -0,0 +1,208 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76X02_UTIL_H +#define __MT76X02_UTIL_H + +#include <linux/kfifo.h> + +#include "mt76.h" +#include "mt76x02_regs.h" +#include "mt76x02_mac.h" +#include "mt76x02_dfs.h" +#include "mt76x02_dma.h" + +struct mt76x02_mac_stats { + u64 rx_stat[6]; + u64 tx_stat[6]; + u64 aggr_stat[2]; + u64 aggr_n[32]; + u64 zero_len_del[2]; +}; + +#define MT_MAX_CHAINS 2 +struct mt76x02_rx_freq_cal { + s8 high_gain[MT_MAX_CHAINS]; + s8 rssi_offset[MT_MAX_CHAINS]; + s8 lna_gain; + u32 mcu_gain; + s16 temp_offset; + u8 freq_offset; +}; + +struct mt76x02_calibration { + struct mt76x02_rx_freq_cal rx; + + u8 agc_gain_init[MT_MAX_CHAINS]; + u8 agc_gain_cur[MT_MAX_CHAINS]; + + u16 false_cca; + s8 avg_rssi_all; + s8 agc_gain_adjust; + s8 low_gain; + + u8 temp; + + bool init_cal_done; + bool tssi_cal_done; + bool tssi_comp_pending; + bool dpd_cal_done; + bool channel_cal_done; +}; + +struct mt76x02_dev { + struct mt76_dev mt76; /* must be first */ + + struct mac_address macaddr_list[8]; + + struct mutex phy_mutex; + struct mutex mutex; + + u8 txdone_seq; + DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status); + + struct sk_buff *rx_head; + + struct tasklet_struct tx_tasklet; + struct tasklet_struct pre_tbtt_tasklet; + struct delayed_work cal_work; + struct delayed_work mac_work; + + struct mt76x02_mac_stats stats; + atomic_t avg_ampdu_len; + u32 aggr_stats[32]; + + struct sk_buff *beacons[8]; + u8 beacon_mask; + u8 beacon_data_mask; + + u8 tbtt_count; + u16 beacon_int; + + struct mt76x02_calibration cal; + + s8 target_power; + s8 target_power_delta[2]; + bool enable_tpc; + + bool no_2ghz; + + u8 agc_save; + + u8 coverage_class; + u8 slottime; + + struct mt76x02_dfs_pattern_detector dfs_pd; +}; + +extern struct ieee80211_rate mt76x02_rates[12]; + +void mt76x02_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, u64 multicast); +int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + +void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif, + unsigned int idx); +int mt76x02_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +void mt76x02_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + +int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params); +int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); +int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params); +void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev, + const struct ieee80211_tx_rate *rate); +s8 mt76x02_tx_get_txpwr_adj(struct mt76_dev *mdev, s8 txpwr, s8 max_txpwr_adj); +void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr); +int mt76x02_insert_hdr_pad(struct sk_buff *skb); +void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len); +void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb); +bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update); +void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb); +void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); +irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance); +void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb); +int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, struct ieee80211_sta *sta, + u32 *tx_info); + +extern const u16 mt76x02_beacon_offsets[16]; +void mt76x02_set_beacon_offsets(struct mt76_dev *dev); +void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set); +void mt76x02_mac_start(struct mt76x02_dev *dev); + +static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask) +{ + mt76x02_set_irq_mask(dev, 0, mask); +} + +static inline void mt76x02_irq_disable(struct mt76x02_dev *dev, u32 mask) +{ + mt76x02_set_irq_mask(dev, mask, 0); +} + +static inline bool +mt76x02_wait_for_txrx_idle(struct mt76_dev *dev) +{ + return __mt76_poll_msec(dev, MT_MAC_STATUS, + MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, + 0, 100); +} + +static inline struct mt76x02_sta * +mt76x02_rx_get_sta(struct mt76_dev *dev, u8 idx) +{ + struct mt76_wcid *wcid; + + if (idx >= ARRAY_SIZE(dev->wcid)) + return NULL; + + wcid = rcu_dereference(dev->wcid[idx]); + if (!wcid) + return NULL; + + return container_of(wcid, struct mt76x02_sta, wcid); +} + +static inline struct mt76_wcid * +mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast) +{ + if (!sta) + return NULL; + + if (unicast) + return &sta->wcid; + else + return &sta->vif->group_wcid; +} + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h index 693f421bf096..7e177c934592 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h @@ -14,8 +14,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef __MT76x2_DFS_H -#define __MT76x2_DFS_H +#ifndef __MT76x02_DFS_H +#define __MT76x02_DFS_H #include <linux/types.h> #include <linux/nl80211.h> @@ -49,7 +49,7 @@ #define MT_DFS_ETSI_MAX_PRI (133333 + 125000 + 117647 + 1000) #define MT_DFS_ETSI_MIN_PRI (4500 - 20) -struct mt76x2_radar_specs { +struct mt76x02_radar_specs { u8 mode; u16 avg_len; u16 e_low; @@ -70,7 +70,7 @@ struct mt76x2_radar_specs { #define MT_DFS_EVENT_ENGINE(x) (((x) & BIT(31)) ? 2 : 0) #define MT_DFS_EVENT_TIMESTAMP(x) ((x) & GENMASK(21, 0)) #define MT_DFS_EVENT_WIDTH(x) ((x) & GENMASK(11, 0)) -struct mt76x2_dfs_event { +struct mt76x02_dfs_event { unsigned long fetch_ts; u32 ts; u16 width; @@ -78,12 +78,12 @@ struct mt76x2_dfs_event { }; #define MT_DFS_EVENT_BUFLEN 256 -struct mt76x2_dfs_event_rb { - struct mt76x2_dfs_event data[MT_DFS_EVENT_BUFLEN]; +struct mt76x02_dfs_event_rb { + struct mt76x02_dfs_event data[MT_DFS_EVENT_BUFLEN]; int h_rb, t_rb; }; -struct mt76x2_dfs_sequence { +struct mt76x02_dfs_sequence { struct list_head head; u32 first_ts; u32 last_ts; @@ -92,7 +92,7 @@ struct mt76x2_dfs_sequence { u8 engine; }; -struct mt76x2_dfs_hw_pulse { +struct mt76x02_dfs_hw_pulse { u8 engine; u32 period; u32 w1; @@ -100,47 +100,41 @@ struct mt76x2_dfs_hw_pulse { u32 burst; }; -struct mt76x2_dfs_sw_detector_params { +struct mt76x02_dfs_sw_detector_params { u32 min_pri; u32 max_pri; u32 pri_margin; }; -struct mt76x2_dfs_engine_stats { +struct mt76x02_dfs_engine_stats { u32 hw_pattern; u32 hw_pulse_discarded; u32 sw_pattern; }; -struct mt76x2_dfs_seq_stats { +struct mt76x02_dfs_seq_stats { u32 seq_pool_len; u32 seq_len; }; -struct mt76x2_dfs_pattern_detector { +struct mt76x02_dfs_pattern_detector { enum nl80211_dfs_regions region; u8 chirp_pulse_cnt; u32 chirp_pulse_ts; - struct mt76x2_dfs_sw_detector_params sw_dpd_params; - struct mt76x2_dfs_event_rb event_rb[2]; + struct mt76x02_dfs_sw_detector_params sw_dpd_params; + struct mt76x02_dfs_event_rb event_rb[2]; struct list_head sequences; struct list_head seq_pool; - struct mt76x2_dfs_seq_stats seq_stats; + struct mt76x02_dfs_seq_stats seq_stats; unsigned long last_sw_check; u32 last_event_ts; - struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES]; + struct mt76x02_dfs_engine_stats stats[MT_DFS_NUM_ENGINES]; struct tasklet_struct dfs_tasklet; }; -void mt76x2_dfs_init_params(struct mt76x2_dev *dev); -void mt76x2_dfs_init_detector(struct mt76x2_dev *dev); -void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev); -void mt76x2_dfs_set_domain(struct mt76x2_dev *dev, - enum nl80211_dfs_regions region); - -#endif /* __MT76x2_DFS_H */ +#endif /* __MT76x02_DFS_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h new file mode 100644 index 000000000000..6394010a565f --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x02_DMA_H +#define __MT76x02_DMA_H + +#include "mt76x02.h" +#include "dma.h" + +#define MT_TXD_INFO_LEN GENMASK(15, 0) +#define MT_TXD_INFO_NEXT_VLD BIT(16) +#define MT_TXD_INFO_TX_BURST BIT(17) +#define MT_TXD_INFO_80211 BIT(19) +#define MT_TXD_INFO_TSO BIT(20) +#define MT_TXD_INFO_CSO BIT(21) +#define MT_TXD_INFO_WIV BIT(24) +#define MT_TXD_INFO_QSEL GENMASK(26, 25) +#define MT_TXD_INFO_DPORT GENMASK(29, 27) +#define MT_TXD_INFO_TYPE GENMASK(31, 30) + +#define MT_RX_FCE_INFO_LEN GENMASK(13, 0) +#define MT_RX_FCE_INFO_SELF_GEN BIT(15) +#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16) +#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20) +#define MT_RX_FCE_INFO_PCIE_INTR BIT(24) +#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25) +#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27) +#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30) + +/* MCU request message header */ +#define MT_MCU_MSG_LEN GENMASK(15, 0) +#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16) +#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20) +#define MT_MCU_MSG_PORT GENMASK(29, 27) +#define MT_MCU_MSG_TYPE GENMASK(31, 30) +#define MT_MCU_MSG_TYPE_CMD BIT(30) + +#define MT_RX_HEADROOM 32 +#define MT76X02_RX_RING_SIZE 256 + +enum dma_msg_port { + WLAN_PORT, + CPU_RX_PORT, + CPU_TX_PORT, + HOST_PORT, + VIRTUAL_CPU_RX_PORT, + VIRTUAL_CPU_TX_PORT, + DISCARD, +}; + +static inline bool +mt76x02_wait_for_wpdma(struct mt76_dev *dev, int timeout) +{ + return __mt76_poll(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, + 0, timeout); +} + +int mt76x02_dma_init(struct mt76x02_dev *dev); +void mt76x02_dma_disable(struct mt76x02_dev *dev); +void mt76x02_dma_cleanup(struct mt76x02_dev *dev); + +#endif /* __MT76x02_DMA_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c new file mode 100644 index 000000000000..d3efeb8a72b7 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <asm/unaligned.h> + +#include "mt76.h" +#include "mt76x02_eeprom.h" +#include "mt76x02_regs.h" + +static int +mt76x02_efuse_read(struct mt76_dev *dev, u16 addr, u8 *data, + enum mt76x02_eeprom_modes mode) +{ + u32 val; + int i; + + val = __mt76_rr(dev, MT_EFUSE_CTRL); + val &= ~(MT_EFUSE_CTRL_AIN | + MT_EFUSE_CTRL_MODE); + val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf); + val |= FIELD_PREP(MT_EFUSE_CTRL_MODE, mode); + val |= MT_EFUSE_CTRL_KICK; + __mt76_wr(dev, MT_EFUSE_CTRL, val); + + if (!__mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, + 0, 1000)) + return -ETIMEDOUT; + + udelay(2); + + val = __mt76_rr(dev, MT_EFUSE_CTRL); + if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) { + memset(data, 0xff, 16); + return 0; + } + + for (i = 0; i < 4; i++) { + val = __mt76_rr(dev, MT_EFUSE_DATA(i)); + put_unaligned_le32(val, data + 4 * i); + } + + return 0; +} + +int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf, + int len, enum mt76x02_eeprom_modes mode) +{ + int ret, i; + + for (i = 0; i + 16 <= len; i += 16) { + ret = mt76x02_efuse_read(dev, base + i, buf + i, mode); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x02_get_efuse_data); + +void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev) +{ + u16 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0); + + switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) { + case BOARD_TYPE_5GHZ: + dev->cap.has_5ghz = true; + break; + case BOARD_TYPE_2GHZ: + dev->cap.has_2ghz = true; + break; + default: + dev->cap.has_2ghz = true; + dev->cap.has_5ghz = true; + break; + } +} +EXPORT_SYMBOL_GPL(mt76x02_eeprom_parse_hw_cap); + +bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band) +{ + u16 conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0); + + if (band == NL80211_BAND_5GHZ) + return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_5G); + else + return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G); +} +EXPORT_SYMBOL_GPL(mt76x02_ext_pa_enabled); + +void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band, + u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g) +{ + u16 val; + + val = mt76x02_eeprom_get(dev, MT_EE_LNA_GAIN); + *lna_2g = val & 0xff; + lna_5g[0] = val >> 8; + + val = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_1); + lna_5g[1] = val >> 8; + + val = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_1); + lna_5g[2] = val >> 8; + + if (!mt76x02_field_valid(lna_5g[1])) + lna_5g[1] = lna_5g[0]; + + if (!mt76x02_field_valid(lna_5g[2])) + lna_5g[2] = lna_5g[0]; + + if (band == NL80211_BAND_2GHZ) + *rssi_offset = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_0); + else + *rssi_offset = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_0); +} +EXPORT_SYMBOL_GPL(mt76x02_get_rx_gain); + +u8 mt76x02_get_lna_gain(struct mt76_dev *dev, + s8 *lna_2g, s8 *lna_5g, + struct ieee80211_channel *chan) +{ + u16 val; + u8 lna; + + val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1); + if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G) + *lna_2g = 0; + if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G) + memset(lna_5g, 0, sizeof(s8) * 3); + + if (chan->band == NL80211_BAND_2GHZ) + lna = *lna_2g; + else if (chan->hw_value <= 64) + lna = lna_5g[0]; + else if (chan->hw_value <= 128) + lna = lna_5g[1]; + else + lna = lna_5g[2]; + + return lna != 0xff ? lna : 0; +} +EXPORT_SYMBOL_GPL(mt76x02_get_lna_gain); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h index 0f3e4d2f4fee..bcd05f7c5f45 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h @@ -1,5 +1,6 @@ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -14,18 +15,19 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef __MT76x2_EEPROM_H -#define __MT76x2_EEPROM_H +#ifndef __MT76x02_EEPROM_H +#define __MT76x02_EEPROM_H -#include "mt76x2.h" - -enum mt76x2_eeprom_field { +enum mt76x02_eeprom_field { MT_EE_CHIP_ID = 0x000, MT_EE_VERSION = 0x002, MT_EE_MAC_ADDR = 0x004, MT_EE_PCI_ID = 0x00A, MT_EE_NIC_CONF_0 = 0x034, MT_EE_NIC_CONF_1 = 0x036, + MT_EE_COUNTRY_REGION_5GHZ = 0x038, + MT_EE_COUNTRY_REGION_2GHZ = 0x039, + MT_EE_FREQ_OFFSET = 0x03a, MT_EE_NIC_CONF_2 = 0x042, MT_EE_XTAL_TRIM_1 = 0x03a, @@ -34,8 +36,10 @@ enum mt76x2_eeprom_field { MT_EE_LNA_GAIN = 0x044, MT_EE_RSSI_OFFSET_2G_0 = 0x046, MT_EE_RSSI_OFFSET_2G_1 = 0x048, + MT_EE_LNA_GAIN_5GHZ_1 = 0x049, MT_EE_RSSI_OFFSET_5G_0 = 0x04a, MT_EE_RSSI_OFFSET_5G_1 = 0x04c, + MT_EE_LNA_GAIN_5GHZ_2 = 0x04d, MT_EE_TX_POWER_DELTA_BW40 = 0x050, MT_EE_TX_POWER_DELTA_BW80 = 0x052, @@ -68,6 +72,17 @@ enum mt76x2_eeprom_field { MT_EE_TX_POWER_VHT_MCS4 = 0x0bc, MT_EE_TX_POWER_VHT_MCS8 = 0x0be, + MT_EE_2G_TARGET_POWER = 0x0d0, + MT_EE_TEMP_OFFSET = 0x0d1, + MT_EE_5G_TARGET_POWER = 0x0d2, + MT_EE_TSSI_BOUND1 = 0x0d4, + MT_EE_TSSI_BOUND2 = 0x0d6, + MT_EE_TSSI_BOUND3 = 0x0d8, + MT_EE_TSSI_BOUND4 = 0x0da, + MT_EE_FREQ_OFFSET_COMPENSATION = 0x0db, + MT_EE_TSSI_BOUND5 = 0x0dc, + MT_EE_TX_POWER_BYRATE_BASE = 0x0de, + MT_EE_RF_TEMP_COMP_SLOPE_5G = 0x0f2, MT_EE_RF_TEMP_COMP_SLOPE_2G = 0x0f4, @@ -81,13 +96,21 @@ enum mt76x2_eeprom_field { MT_EE_BT_VCDL_CALIBRATION = 0x13c, MT_EE_BT_PMUCFG = 0x13e, + MT_EE_USAGE_MAP_START = 0x1e0, + MT_EE_USAGE_MAP_END = 0x1fc, + __MT_EE_MAX }; +#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0) +#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4) +#define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8) #define MT_EE_NIC_CONF_0_PA_INT_2G BIT(8) #define MT_EE_NIC_CONF_0_PA_INT_5G BIT(9) +#define MT_EE_NIC_CONF_0_PA_IO_CURRENT BIT(10) #define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12) +#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0) #define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1) #define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2) #define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3) @@ -100,93 +123,89 @@ enum mt76x2_eeprom_field { #define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11) #define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13) -enum mt76x2_board_type { +#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \ + MT_EE_USAGE_MAP_START + 1) + +enum mt76x02_eeprom_modes { + MT_EE_READ, + MT_EE_PHYSICAL_READ, +}; + +enum mt76x02_board_type { BOARD_TYPE_2GHZ = 1, BOARD_TYPE_5GHZ = 2, }; -enum mt76x2_cal_channel_group { - MT_CH_5G_JAPAN, - MT_CH_5G_UNII_1, - MT_CH_5G_UNII_2, - MT_CH_5G_UNII_2E_1, - MT_CH_5G_UNII_2E_2, - MT_CH_5G_UNII_3, - __MT_CH_MAX -}; +static inline bool mt76x02_field_valid(u8 val) +{ + return val != 0 && val != 0xff; +} -struct mt76x2_tx_power_info { - u8 target_power; +static inline int +mt76x02_sign_extend(u32 val, unsigned int size) +{ + bool sign = val & BIT(size - 1); - s8 delta_bw40; - s8 delta_bw80; + val &= BIT(size - 1) - 1; - struct { - s8 tssi_slope; - s8 tssi_offset; - s8 target_power; - s8 delta; - } chain[MT_MAX_CHAINS]; -}; + return sign ? val : -val; +} -struct mt76x2_temp_comp { - u8 temp_25_ref; - int lower_bound; /* J */ - int upper_bound; /* J */ - unsigned int high_slope; /* J / dB */ - unsigned int low_slope; /* J / dB */ -}; +static inline int +mt76x02_sign_extend_optional(u32 val, unsigned int size) +{ + bool enable = val & BIT(size); + + return enable ? mt76x02_sign_extend(val, size) : 0; +} + +static inline s8 mt76x02_rate_power_val(u8 val) +{ + if (!mt76x02_field_valid(val)) + return 0; + + return mt76x02_sign_extend_optional(val, 7); +} static inline int -mt76x2_eeprom_get(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field) +mt76x02_eeprom_get(struct mt76_dev *dev, + enum mt76x02_eeprom_field field) { if ((field & 1) || field >= __MT_EE_MAX) return -1; - return get_unaligned_le16(dev->mt76.eeprom.data + field); + return get_unaligned_le16(dev->eeprom.data + field); } -void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t, - struct ieee80211_channel *chan); -int mt76x2_get_max_rate_power(struct mt76_rate_power *r); -void mt76x2_get_power_info(struct mt76x2_dev *dev, - struct mt76x2_tx_power_info *t, - struct ieee80211_channel *chan); -int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t); -bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band); -void mt76x2_read_rx_gain(struct mt76x2_dev *dev); -void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev); - static inline bool -mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev) +mt76x02_temp_tx_alc_enabled(struct mt76_dev *dev) { u16 val; - val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G); + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G); if (!(val & BIT(15))) return false; - return mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) & + return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) & MT_EE_NIC_CONF_1_TEMP_TX_ALC; } static inline bool -mt76x2_tssi_enabled(struct mt76x2_dev *dev) +mt76x02_tssi_enabled(struct mt76_dev *dev) { - return !mt76x2_temp_tx_alc_enabled(dev) && - (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) & + return !mt76x02_temp_tx_alc_enabled(dev) && + (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) & MT_EE_NIC_CONF_1_TX_ALC_EN); } -static inline bool -mt76x2_has_ext_lna(struct mt76x2_dev *dev) -{ - u32 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1); - - if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) - return val & MT_EE_NIC_CONF_1_LNA_EXT_2G; - else - return val & MT_EE_NIC_CONF_1_LNA_EXT_5G; -} - -#endif +bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band); +int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf, + int len, enum mt76x02_eeprom_modes mode); +void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band, + u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g); +u8 mt76x02_get_lna_gain(struct mt76_dev *dev, + s8 *lna_2g, s8 *lna_5g, + struct ieee80211_channel *chan); +void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev); + +#endif /* __MT76x02_EEPROM_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index 6542644bc325..244245418ebb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -15,222 +15,11 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "mt76x2.h" +#include "mt76x02.h" +#include "mt76x02_trace.h" -void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force) -{ - bool stopped = false; - u32 rts_cfg; - int i; - - mt76_wr(dev, MT_MAC_SYS_CTRL, 0); - - rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG); - mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT); - - /* Wait for MAC to become idle */ - for (i = 0; i < 300; i++) { - if ((mt76_rr(dev, MT_MAC_STATUS) & - (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) || - mt76_rr(dev, MT_BBP(IBI, 12))) { - udelay(1); - continue; - } - - stopped = true; - break; - } - - if (force && !stopped) { - mt76_set(dev, MT_BBP(CORE, 4), BIT(1)); - mt76_clear(dev, MT_BBP(CORE, 4), BIT(1)); - - mt76_set(dev, MT_BBP(CORE, 4), BIT(0)); - mt76_clear(dev, MT_BBP(CORE, 4), BIT(0)); - } - - mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg); -} -EXPORT_SYMBOL_GPL(mt76x2_mac_stop); - -bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev, - struct mt76x2_tx_status *stat) -{ - u32 stat1, stat2; - - stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT); - stat1 = mt76_rr(dev, MT_TX_STAT_FIFO); - - stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID); - if (!stat->valid) - return false; - - stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS); - stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR); - stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ); - stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1); - stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1); - - stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2); - stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2); - - return true; -} -EXPORT_SYMBOL_GPL(mt76x2_mac_load_tx_status); - -static int -mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate, - enum nl80211_band band) -{ - u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); - - txrate->idx = 0; - txrate->flags = 0; - txrate->count = 1; - - switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) { - case MT_PHY_TYPE_OFDM: - if (band == NL80211_BAND_2GHZ) - idx += 4; - - txrate->idx = idx; - return 0; - case MT_PHY_TYPE_CCK: - if (idx >= 8) - idx -= 8; - - txrate->idx = idx; - return 0; - case MT_PHY_TYPE_HT_GF: - txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD; - /* fall through */ - case MT_PHY_TYPE_HT: - txrate->flags |= IEEE80211_TX_RC_MCS; - txrate->idx = idx; - break; - case MT_PHY_TYPE_VHT: - txrate->flags |= IEEE80211_TX_RC_VHT_MCS; - txrate->idx = idx; - break; - default: - return -EINVAL; - } - - switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) { - case MT_PHY_BW_20: - break; - case MT_PHY_BW_40: - txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; - break; - case MT_PHY_BW_80: - txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; - break; - default: - return -EINVAL; - } - - if (rate & MT_RXWI_RATE_SGI) - txrate->flags |= IEEE80211_TX_RC_SHORT_GI; - - return 0; -} - -static void -mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev, - struct ieee80211_tx_info *info, - struct mt76x2_tx_status *st, int n_frames) -{ - struct ieee80211_tx_rate *rate = info->status.rates; - int cur_idx, last_rate; - int i; - - if (!n_frames) - return; - - last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1); - mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate, - dev->mt76.chandef.chan->band); - if (last_rate < IEEE80211_TX_MAX_RATES - 1) - rate[last_rate + 1].idx = -1; - - cur_idx = rate[last_rate].idx + last_rate; - for (i = 0; i <= last_rate; i++) { - rate[i].flags = rate[last_rate].flags; - rate[i].idx = max_t(int, 0, cur_idx - i); - rate[i].count = 1; - } - rate[last_rate].count = st->retry + 1 - last_rate; - - info->status.ampdu_len = n_frames; - info->status.ampdu_ack_len = st->success ? n_frames : 0; - - if (st->pktid & MT_TXWI_PKTID_PROBE) - info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; - - if (st->aggr) - info->flags |= IEEE80211_TX_CTL_AMPDU | - IEEE80211_TX_STAT_AMPDU; - - if (!st->ack_req) - info->flags |= IEEE80211_TX_CTL_NO_ACK; - else if (st->success) - info->flags |= IEEE80211_TX_STAT_ACK; -} - -void mt76x2_send_tx_status(struct mt76x2_dev *dev, - struct mt76x2_tx_status *stat, u8 *update) -{ - struct ieee80211_tx_info info = {}; - struct ieee80211_sta *sta = NULL; - struct mt76_wcid *wcid = NULL; - struct mt76x2_sta *msta = NULL; - - rcu_read_lock(); - if (stat->wcid < ARRAY_SIZE(dev->wcid)) - wcid = rcu_dereference(dev->wcid[stat->wcid]); - - if (wcid) { - void *priv; - - priv = msta = container_of(wcid, struct mt76x2_sta, wcid); - sta = container_of(priv, struct ieee80211_sta, - drv_priv); - } - - if (msta && stat->aggr) { - u32 stat_val, stat_cache; - - stat_val = stat->rate; - stat_val |= ((u32) stat->retry) << 16; - stat_cache = msta->status.rate; - stat_cache |= ((u32) msta->status.retry) << 16; - - if (*update == 0 && stat_val == stat_cache && - stat->wcid == msta->status.wcid && msta->n_frames < 32) { - msta->n_frames++; - goto out; - } - - mt76x2_mac_fill_tx_status(dev, &info, &msta->status, - msta->n_frames); - - msta->status = *stat; - msta->n_frames = 1; - *update = 0; - } else { - mt76x2_mac_fill_tx_status(dev, &info, stat, 1); - *update = 1; - } - - ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info); - -out: - rcu_read_unlock(); -} -EXPORT_SYMBOL_GPL(mt76x2_send_tx_status); - -static enum mt76x2_cipher_type -mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) +enum mt76x02_cipher_type +mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) { memset(key_data, 0, 32); if (!key) @@ -254,61 +43,162 @@ mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) return MT_CIPHER_NONE; } } +EXPORT_SYMBOL_GPL(mt76x02_mac_get_key_info); -int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx, +int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx, struct ieee80211_key_conf *key) { - enum mt76x2_cipher_type cipher; + enum mt76x02_cipher_type cipher; u8 key_data[32]; u32 val; - cipher = mt76x2_mac_get_key_info(key, key_data); + cipher = mt76x02_mac_get_key_info(key, key_data); if (cipher == MT_CIPHER_NONE && key) return -EOPNOTSUPP; - val = mt76_rr(dev, MT_SKEY_MODE(vif_idx)); + val = __mt76_rr(dev, MT_SKEY_MODE(vif_idx)); val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx)); val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx); - mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); + __mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); - mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data, - sizeof(key_data)); + __mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data, + sizeof(key_data)); return 0; } -EXPORT_SYMBOL_GPL(mt76x2_mac_shared_key_setup); +EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup); -int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx, +int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx, struct ieee80211_key_conf *key) { - enum mt76x2_cipher_type cipher; + enum mt76x02_cipher_type cipher; u8 key_data[32]; u8 iv_data[8]; - cipher = mt76x2_mac_get_key_info(key, key_data); + cipher = mt76x02_mac_get_key_info(key, key_data); if (cipher == MT_CIPHER_NONE && key) return -EOPNOTSUPP; - mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher); - mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); + __mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); + __mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher); memset(iv_data, 0, sizeof(iv_data)); if (key) { - mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, - !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); + __mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, + !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); iv_data[3] = key->keyidx << 6; if (cipher >= MT_CIPHER_TKIP) iv_data[3] |= 0x20; } - mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); + __mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); return 0; } -EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_key); +EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_key); + +void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac) +{ + struct mt76_wcid_addr addr = {}; + u32 attr; + + attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) | + FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8)); + + __mt76_wr(dev, MT_WCID_ATTR(idx), attr); + + __mt76_wr(dev, MT_WCID_TX_RATE(idx), 0); + __mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0); + + if (idx >= 128) + return; + + if (mac) + memcpy(addr.macaddr, mac, ETH_ALEN); + + __mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr)); +} +EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup); + +void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop) +{ + u32 val = __mt76_rr(dev, MT_WCID_DROP(idx)); + u32 bit = MT_WCID_DROP_MASK(idx); + + /* prevent unnecessary writes */ + if ((val & bit) != (bit * drop)) + __mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop)); +} +EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_drop); + +void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) +{ + struct mt76_txq *mtxq; + + if (!txq) + return; + + mtxq = (struct mt76_txq *) txq->drv_priv; + if (txq->sta) { + struct mt76x02_sta *sta; + + sta = (struct mt76x02_sta *) txq->sta->drv_priv; + mtxq->wcid = &sta->wcid; + } else { + struct mt76x02_vif *mvif; + + mvif = (struct mt76x02_vif *) txq->vif->drv_priv; + mtxq->wcid = &mvif->group_wcid; + } + + mt76_txq_init(dev, txq); +} +EXPORT_SYMBOL_GPL(mt76x02_txq_init); + +static void +mt76x02_mac_fill_txwi(struct mt76x02_txwi *txwi, struct sk_buff *skb, + struct ieee80211_sta *sta, int len, u8 nss) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + u16 txwi_flags = 0; + + if (info->flags & IEEE80211_TX_CTL_LDPC) + txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC); + if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1) + txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC); + if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC) + txwi_flags |= MT_TXWI_FLAGS_MMPS; + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + txwi->pktid |= MT_TXWI_PKTID_PROBE; + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { + u8 ba_size = IEEE80211_MIN_AMPDU_BUF; + + ba_size <<= sta->ht_cap.ampdu_factor; + ba_size = min_t(int, 63, ba_size - 1); + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + ba_size = 0; + txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); + + txwi_flags |= MT_TXWI_FLAGS_AMPDU | + FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, + sta->ht_cap.ampdu_density); + } + + if (ieee80211_is_probe_resp(hdr->frame_control) || + ieee80211_is_beacon(hdr->frame_control)) + txwi_flags |= MT_TXWI_FLAGS_TS; + + txwi->flags |= cpu_to_le16(txwi_flags); + txwi->len_ctl = cpu_to_le16(len); +} static __le16 -mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev, +mt76x02_mac_tx_rate_val(struct mt76_dev *dev, const struct ieee80211_tx_rate *rate, u8 *nss_val) { u16 rateval; @@ -334,10 +224,10 @@ mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev, bw = 1; } else { const struct ieee80211_rate *r; - int band = dev->mt76.chandef.chan->band; + int band = dev->chandef.chan->band; u16 val; - r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx]; + r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx]; if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) val = r->hw_value_short; else @@ -358,29 +248,108 @@ mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev, return cpu_to_le16(rateval); } -void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid, +void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid, const struct ieee80211_tx_rate *rate) { - spin_lock_bh(&dev->mt76.lock); - wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss); + spin_lock_bh(&dev->lock); + wcid->tx_rate = mt76x02_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss); wcid->tx_rate_set = true; - spin_unlock_bh(&dev->mt76.lock); + spin_unlock_bh(&dev->lock); +} + +bool mt76x02_mac_load_tx_status(struct mt76_dev *dev, + struct mt76x02_tx_status *stat) +{ + u32 stat1, stat2; + + stat2 = __mt76_rr(dev, MT_TX_STAT_FIFO_EXT); + stat1 = __mt76_rr(dev, MT_TX_STAT_FIFO); + + stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID); + if (!stat->valid) + return false; + + stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS); + stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR); + stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ); + stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1); + stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1); + + stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2); + stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2); + + return true; } -EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_rate); +EXPORT_SYMBOL_GPL(mt76x02_mac_load_tx_status); -void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi, - struct sk_buff *skb, struct mt76_wcid *wcid, - struct ieee80211_sta *sta, int len) +static int +mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate, + enum nl80211_band band) +{ + u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); + + txrate->idx = 0; + txrate->flags = 0; + txrate->count = 1; + + switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) { + case MT_PHY_TYPE_OFDM: + if (band == NL80211_BAND_2GHZ) + idx += 4; + + txrate->idx = idx; + return 0; + case MT_PHY_TYPE_CCK: + if (idx >= 8) + idx -= 8; + + txrate->idx = idx; + return 0; + case MT_PHY_TYPE_HT_GF: + txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD; + /* fall through */ + case MT_PHY_TYPE_HT: + txrate->flags |= IEEE80211_TX_RC_MCS; + txrate->idx = idx; + break; + case MT_PHY_TYPE_VHT: + txrate->flags |= IEEE80211_TX_RC_VHT_MCS; + txrate->idx = idx; + break; + default: + return -EINVAL; + } + + switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) { + case MT_PHY_BW_20: + break; + case MT_PHY_BW_40: + txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + break; + case MT_PHY_BW_80: + txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; + break; + default: + return -EINVAL; + } + + if (rate & MT_RXWI_RATE_SGI) + txrate->flags |= IEEE80211_TX_RC_SHORT_GI; + + return 0; +} + +void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, int len) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_rate *rate = &info->control.rates[0]; struct ieee80211_key_conf *key = info->control.hw_key; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2)); - u16 txwi_flags = 0; u8 nss; s8 txpwr_adj, max_txpwr_adj; - u8 ccmp_pn[8]; + u8 ccmp_pn[8], nstreams = dev->chainmask & 0xf; memset(txwi, 0, sizeof(*txwi)); @@ -405,98 +374,127 @@ void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi, txwi->eiv = *((__le32 *)&ccmp_pn[1]); } - spin_lock_bh(&dev->mt76.lock); + spin_lock_bh(&dev->lock); if (wcid && (rate->idx < 0 || !rate->count)) { txwi->rate = wcid->tx_rate; max_txpwr_adj = wcid->max_txpwr_adj; nss = wcid->tx_rate_nss; } else { - txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss); - max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate); + txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss); + max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate); } - spin_unlock_bh(&dev->mt76.lock); + spin_unlock_bh(&dev->lock); - txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf, - max_txpwr_adj); + txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf, + max_txpwr_adj); txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj); - if (mt76xx_rev(dev) >= MT76XX_REV_E4) + if (nstreams > 1 && mt76_rev(dev) >= MT76XX_REV_E4) txwi->txstream = 0x13; - else if (mt76xx_rev(dev) >= MT76XX_REV_E3 && + else if (nstreams > 1 && mt76_rev(dev) >= MT76XX_REV_E3 && !(txwi->rate & cpu_to_le16(rate_ht_mask))) txwi->txstream = 0x93; - if (info->flags & IEEE80211_TX_CTL_LDPC) - txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC); - if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1) - txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC); - if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC) - txwi_flags |= MT_TXWI_FLAGS_MMPS; - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) - txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; - if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) - txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) - txwi->pktid |= MT_TXWI_PKTID_PROBE; - if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { - u8 ba_size = IEEE80211_MIN_AMPDU_BUF; + mt76x02_mac_fill_txwi(txwi, skb, sta, len, nss); +} +EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi); - ba_size <<= sta->ht_cap.ampdu_factor; - ba_size = min_t(int, 63, ba_size - 1); - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) - ba_size = 0; - txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); +static void +mt76x02_mac_fill_tx_status(struct mt76_dev *dev, + struct ieee80211_tx_info *info, + struct mt76x02_tx_status *st, int n_frames) +{ + struct ieee80211_tx_rate *rate = info->status.rates; + int cur_idx, last_rate; + int i; - txwi_flags |= MT_TXWI_FLAGS_AMPDU | - FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, - sta->ht_cap.ampdu_density); + if (!n_frames) + return; + + last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1); + mt76x02_mac_process_tx_rate(&rate[last_rate], st->rate, + dev->chandef.chan->band); + if (last_rate < IEEE80211_TX_MAX_RATES - 1) + rate[last_rate + 1].idx = -1; + + cur_idx = rate[last_rate].idx + last_rate; + for (i = 0; i <= last_rate; i++) { + rate[i].flags = rate[last_rate].flags; + rate[i].idx = max_t(int, 0, cur_idx - i); + rate[i].count = 1; } + rate[last_rate].count = st->retry + 1 - last_rate; - if (ieee80211_is_probe_resp(hdr->frame_control) || - ieee80211_is_beacon(hdr->frame_control)) - txwi_flags |= MT_TXWI_FLAGS_TS; + info->status.ampdu_len = n_frames; + info->status.ampdu_ack_len = st->success ? n_frames : 0; - txwi->flags |= cpu_to_le16(txwi_flags); - txwi->len_ctl = cpu_to_le16(len); -} -EXPORT_SYMBOL_GPL(mt76x2_mac_write_txwi); + if (st->pktid & MT_TXWI_PKTID_PROBE) + info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; -void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop) -{ - u32 val = mt76_rr(dev, MT_WCID_DROP(idx)); - u32 bit = MT_WCID_DROP_MASK(idx); + if (st->aggr) + info->flags |= IEEE80211_TX_CTL_AMPDU | + IEEE80211_TX_STAT_AMPDU; - /* prevent unnecessary writes */ - if ((val & bit) != (bit * drop)) - mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop)); + if (!st->ack_req) + info->flags |= IEEE80211_TX_CTL_NO_ACK; + else if (st->success) + info->flags |= IEEE80211_TX_STAT_ACK; } -EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_drop); -void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac) +void mt76x02_send_tx_status(struct mt76_dev *dev, + struct mt76x02_tx_status *stat, u8 *update) { - struct mt76_wcid_addr addr = {}; - u32 attr; + struct ieee80211_tx_info info = {}; + struct ieee80211_sta *sta = NULL; + struct mt76_wcid *wcid = NULL; + struct mt76x02_sta *msta = NULL; - attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) | - FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8)); + rcu_read_lock(); + if (stat->wcid < ARRAY_SIZE(dev->wcid)) + wcid = rcu_dereference(dev->wcid[stat->wcid]); - mt76_wr(dev, MT_WCID_ATTR(idx), attr); + if (wcid) { + void *priv; - mt76_wr(dev, MT_WCID_TX_RATE(idx), 0); - mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0); + priv = msta = container_of(wcid, struct mt76x02_sta, wcid); + sta = container_of(priv, struct ieee80211_sta, + drv_priv); + } - if (idx >= 128) - return; + if (msta && stat->aggr) { + u32 stat_val, stat_cache; - if (mac) - memcpy(addr.macaddr, mac, ETH_ALEN); + stat_val = stat->rate; + stat_val |= ((u32) stat->retry) << 16; + stat_cache = msta->status.rate; + stat_cache |= ((u32) msta->status.retry) << 16; - mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr)); + if (*update == 0 && stat_val == stat_cache && + stat->wcid == msta->status.wcid && msta->n_frames < 32) { + msta->n_frames++; + goto out; + } + + mt76x02_mac_fill_tx_status(dev, &info, &msta->status, + msta->n_frames); + + msta->status = *stat; + msta->n_frames = 1; + *update = 0; + } else { + mt76x02_mac_fill_tx_status(dev, &info, stat, 1); + *update = 1; + } + + ieee80211_tx_status_noskb(dev->hw, sta, &info); + +out: + rcu_read_unlock(); } -EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_setup); +EXPORT_SYMBOL_GPL(mt76x02_send_tx_status); -static int -mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate) +int +mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate) { u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); @@ -561,22 +559,30 @@ mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate) return 0; } +EXPORT_SYMBOL_GPL(mt76x02_mac_process_rate); -static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len) +void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr) { - int hdrlen; + ether_addr_copy(dev->macaddr, addr); - if (!len) - return; + if (!is_valid_ether_addr(dev->macaddr)) { + eth_random_addr(dev->macaddr); + dev_info(dev->dev, + "Invalid MAC address, using random address %pM\n", + dev->macaddr); + } - hdrlen = ieee80211_get_hdrlen_from_skb(skb); - memmove(skb->data + len, skb->data, hdrlen); - skb_pull(skb, len); + __mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr)); + __mt76_wr(dev, MT_MAC_ADDR_DW1, + get_unaligned_le16(dev->macaddr + 4) | + FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); } +EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr); -int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain) +static int +mt76x02_mac_get_rssi(struct mt76x02_dev *dev, s8 rssi, int chain) { - struct mt76x2_rx_freq_cal *cal = &dev->cal.rx; + struct mt76x02_rx_freq_cal *cal = &dev->cal.rx; rssi += cal->rssi_offset[chain]; rssi -= cal->lna_gain; @@ -584,46 +590,19 @@ int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain) return rssi; } -static struct mt76x2_sta * -mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx) -{ - struct mt76_wcid *wcid; - - if (idx >= ARRAY_SIZE(dev->wcid)) - return NULL; - - wcid = rcu_dereference(dev->wcid[idx]); - if (!wcid) - return NULL; - - return container_of(wcid, struct mt76x2_sta, wcid); -} - -static struct mt76_wcid * -mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta, - bool unicast) -{ - if (!sta) - return NULL; - - if (unicast) - return &sta->wcid; - else - return &sta->vif->group_wcid; -} - -int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, - void *rxi) +int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, + void *rxi) { struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; - struct mt76x2_rxwi *rxwi = rxi; - struct mt76x2_sta *sta; + struct mt76x02_rxwi *rxwi = rxi; + struct mt76x02_sta *sta; u32 rxinfo = le32_to_cpu(rxwi->rxinfo); u32 ctl = le32_to_cpu(rxwi->ctl); u16 rate = le16_to_cpu(rxwi->rate); u16 tid_sn = le16_to_cpu(rxwi->tid_sn); bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST); - int pad_len = 0; + int i, pad_len = 0, nstreams = dev->mt76.chainmask & 0xf; + s8 signal; u8 pn_len; u8 wcid; int len; @@ -642,8 +621,8 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, } wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl); - sta = mt76x2_rx_get_sta(dev, wcid); - status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast); + sta = mt76x02_rx_get_sta(&dev->mt76, wcid); + status->wcid = mt76x02_rx_get_sta_wcid(sta, unicast); len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl); pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo); @@ -670,7 +649,7 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, } } - mt76x2_remove_hdr_pad(skb, pad_len); + mt76x02_remove_hdr_pad(skb, pad_len); if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL)) status->aggr = true; @@ -679,10 +658,17 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, return -EINVAL; pskb_trim(skb, len); - status->chains = BIT(0) | BIT(1); - status->chain_signal[0] = mt76x2_mac_get_rssi(dev, rxwi->rssi[0], 0); - status->chain_signal[1] = mt76x2_mac_get_rssi(dev, rxwi->rssi[1], 1); - status->signal = max(status->chain_signal[0], status->chain_signal[1]); + + status->chains = BIT(0); + signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0); + for (i = 1; i < nstreams; i++) { + status->chains |= BIT(i); + status->chain_signal[i] = mt76x02_mac_get_rssi(dev, + rxwi->rssi[i], + i); + signal = max_t(s8, signal, status->chain_signal[i]); + } + status->signal = signal; status->freq = dev->mt76.chandef.chan->center_freq; status->band = dev->mt76.chandef.chan->band; @@ -694,6 +680,66 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, sta->inactive_count = 0; } - return mt76x2_mac_process_rate(status, rate); + return mt76x02_mac_process_rate(status, rate); +} + +void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq) +{ + struct mt76x02_tx_status stat = {}; + unsigned long flags; + u8 update = 1; + bool ret; + + if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) + return; + + trace_mac_txstat_poll(dev); + + while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) { + spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags); + ret = mt76x02_mac_load_tx_status(&dev->mt76, &stat); + spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags); + + if (!ret) + break; + + trace_mac_txstat_fetch(dev, &stat); + + if (!irq) { + mt76x02_send_tx_status(&dev->mt76, &stat, &update); + continue; + } + + kfifo_put(&dev->txstatus_fifo, stat); + } +} +EXPORT_SYMBOL_GPL(mt76x02_mac_poll_tx_status); + +static void +mt76x02_mac_queue_txdone(struct mt76x02_dev *dev, struct sk_buff *skb, + void *txwi_ptr) +{ + struct mt76x02_tx_info *txi = mt76x02_skb_tx_info(skb); + struct mt76x02_txwi *txwi = txwi_ptr; + + mt76x02_mac_poll_tx_status(dev, false); + + txi->tries = 0; + txi->jiffies = jiffies; + txi->wcid = txwi->wcid; + txi->pktid = txwi->pktid; + trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid); + mt76x02_tx_complete(&dev->mt76, skb); +} + +void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush) +{ + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + + if (e->txwi) + mt76x02_mac_queue_txdone(dev, e->skb, &e->txwi->txwi); + else + dev_kfree_skb_any(e->skb); } -EXPORT_SYMBOL_GPL(mt76x2_mac_process_rx); +EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h index 5af0107ba748..4f7ee4620ab5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h @@ -1,5 +1,6 @@ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -14,17 +15,14 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef __MT76x2_MAC_H -#define __MT76x2_MAC_H +#ifndef __MT76X02_MAC_H +#define __MT76X02_MAC_H -#include "mt76.h" +#include <linux/average.h> -struct mt76x2_dev; -struct mt76x2_sta; -struct mt76x2_vif; -struct mt76x2_txwi; +struct mt76x02_dev; -struct mt76x2_tx_status { +struct mt76x02_tx_status { u8 valid:1; u8 success:1; u8 aggr:1; @@ -35,7 +33,16 @@ struct mt76x2_tx_status { u16 rate; } __packed __aligned(2); -struct mt76x2_tx_info { +#define MT_VIF_WCID(_n) (254 - ((_n) & 7)) +#define MT_MAX_VIFS 8 + +struct mt76x02_vif { + u8 idx; + + struct mt76_wcid group_wcid; +}; + +struct mt76x02_tx_info { unsigned long jiffies; u8 tries; @@ -44,17 +51,17 @@ struct mt76x2_tx_info { u8 retry; }; -struct mt76x2_rxwi { - __le32 rxinfo; +DECLARE_EWMA(signal, 10, 8); - __le32 ctl; +struct mt76x02_sta { + struct mt76_wcid wcid; /* must be first */ - __le16 tid_sn; - __le16 rate; + struct mt76x02_vif *vif; + struct mt76x02_tx_status status; + int n_frames; - u8 rssi[4]; - - __le32 bbp_rxinfo[4]; + struct ewma_signal rssi; + int inactive_count; }; #define MT_RXINFO_BA BIT(0) @@ -108,6 +115,19 @@ struct mt76x2_rxwi { #define MT_RATE_INDEX_VHT_IDX GENMASK(3, 0) #define MT_RATE_INDEX_VHT_NSS GENMASK(5, 4) +struct mt76x02_rxwi { + __le32 rxinfo; + + __le32 ctl; + + __le16 tid_sn; + __le16 rate; + + u8 rssi[4]; + + __le32 bbp_rxinfo[4]; +}; + #define MT_TX_PWR_ADJ GENMASK(3, 0) enum mt76x2_phy_bandwidth { @@ -135,7 +155,7 @@ enum mt76x2_phy_bandwidth { #define MT_TXWI_PKTID_PROBE BIT(7) -struct mt76x2_txwi { +struct mt76x02_txwi { __le16 flags; __le16 rate; u8 ack_ctl; @@ -149,41 +169,60 @@ struct mt76x2_txwi { u8 pktid; } __packed __aligned(4); -static inline struct mt76x2_tx_info * -mt76x2_skb_tx_info(struct sk_buff *skb) +static inline bool mt76x02_wait_for_mac(struct mt76_dev *dev) +{ + const u32 MAC_CSR0 = 0x1000; + int i; + + for (i = 0; i < 500; i++) { + if (test_bit(MT76_REMOVED, &dev->state)) + return false; + + switch (dev->bus->rr(dev, MAC_CSR0)) { + case 0: + case ~0: + break; + default: + return true; + } + usleep_range(5000, 10000); + } + return false; +} + +static inline struct mt76x02_tx_info * +mt76x02_skb_tx_info(struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - return (void *) info->status.status_driver_data; + return (void *)info->status.status_driver_data; } -int mt76x2_mac_start(struct mt76x2_dev *dev); -void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force); -void mt76x2_mac_resume(struct mt76x2_dev *dev); -void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr); - -int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, - void *rxi); -void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi, - struct sk_buff *skb, struct mt76_wcid *wcid, - struct ieee80211_sta *sta, int len); -void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac); -int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx, - struct ieee80211_key_conf *key); -void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid, - const struct ieee80211_tx_rate *rate); -void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop); +void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); +enum mt76x02_cipher_type +mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data); -int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx, +int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx, struct ieee80211_key_conf *key); - -int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx, - struct sk_buff *skb); -void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val); - -void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq); -void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev); - -void mt76x2_mac_work(struct work_struct *work); - +int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx, + struct ieee80211_key_conf *key); +void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac); +void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop); +void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid, + const struct ieee80211_tx_rate *rate); +bool mt76x02_mac_load_tx_status(struct mt76_dev *dev, + struct mt76x02_tx_status *stat); +void mt76x02_send_tx_status(struct mt76_dev *dev, + struct mt76x02_tx_status *stat, u8 *update); +int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, + void *rxi); +int +mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate); +void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr); +void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, int len); +void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq); +void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c new file mode 100644 index 000000000000..6d565133b7af --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c @@ -0,0 +1,226 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/firmware.h> +#include <linux/delay.h> + +#include "mt76.h" +#include "mt76x02_mcu.h" +#include "mt76x02_dma.h" + +struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len) +{ + struct sk_buff *skb; + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + return NULL; + memcpy(skb_put(skb, len), data, len); + + return skb; +} +EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc); + +static struct sk_buff * +mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires) +{ + unsigned long timeout; + + if (!time_is_after_jiffies(expires)) + return NULL; + + timeout = expires - jiffies; + wait_event_timeout(dev->mmio.mcu.wait, + !skb_queue_empty(&dev->mmio.mcu.res_q), + timeout); + return skb_dequeue(&dev->mmio.mcu.res_q); +} + +static int +mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid, + struct sk_buff *skb, int cmd, int seq) +{ + struct mt76_queue *q = &dev->q_tx[qid]; + struct mt76_queue_buf buf; + dma_addr_t addr; + u32 tx_info; + + tx_info = MT_MCU_MSG_TYPE_CMD | + FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) | + FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) | + FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | + FIELD_PREP(MT_MCU_MSG_LEN, skb->len); + + addr = dma_map_single(dev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev->dev, addr)) + return -ENOMEM; + + buf.addr = addr; + buf.len = skb->len; + spin_lock_bh(&q->lock); + dev->queue_ops->add_buf(dev, q, &buf, 1, tx_info, skb, NULL); + dev->queue_ops->kick(dev, q); + spin_unlock_bh(&q->lock); + + return 0; +} + +int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb, + int cmd, bool wait_resp) +{ + unsigned long expires = jiffies + HZ; + int ret; + u8 seq; + + if (!skb) + return -EINVAL; + + mutex_lock(&dev->mmio.mcu.mutex); + + seq = ++dev->mmio.mcu.msg_seq & 0xf; + if (!seq) + seq = ++dev->mmio.mcu.msg_seq & 0xf; + + ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq); + if (ret) + goto out; + + while (wait_resp) { + u32 *rxfce; + bool check_seq = false; + + skb = mt76x02_mcu_get_response(dev, expires); + if (!skb) { + dev_err(dev->dev, + "MCU message %d (seq %d) timed out\n", cmd, + seq); + ret = -ETIMEDOUT; + break; + } + + rxfce = (u32 *) skb->cb; + + if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce)) + check_seq = true; + + dev_kfree_skb(skb); + if (check_seq) + break; + } + +out: + mutex_unlock(&dev->mmio.mcu.mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send); + +int mt76x02_mcu_function_select(struct mt76_dev *dev, + enum mcu_function func, + u32 val, bool wait_resp) +{ + struct sk_buff *skb; + struct { + __le32 id; + __le32 value; + } __packed __aligned(4) msg = { + .id = cpu_to_le32(func), + .value = cpu_to_le32(val), + }; + + skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg)); + return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_FUN_SET_OP, + wait_resp); +} +EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select); + +int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on, + bool wait_resp) +{ + struct sk_buff *skb; + struct { + __le32 mode; + __le32 level; + } __packed __aligned(4) msg = { + .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF), + .level = cpu_to_le32(0), + }; + + skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg)); + return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, + wait_resp); +} +EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state); + +int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type, + u32 param, bool wait) +{ + struct sk_buff *skb; + struct { + __le32 id; + __le32 value; + } __packed __aligned(4) msg = { + .id = cpu_to_le32(type), + .value = cpu_to_le32(param), + }; + int ret; + + if (wait) + dev->bus->rmw(dev, MT_MCU_COM_REG0, BIT(31), 0); + + skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg)); + ret = dev->mcu_ops->mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true); + if (ret) + return ret; + + if (wait && + WARN_ON(!__mt76_poll_msec(dev, MT_MCU_COM_REG0, + BIT(31), BIT(31), 100))) + return -ETIMEDOUT; + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate); + +int mt76x02_mcu_cleanup(struct mt76_dev *dev) +{ + struct sk_buff *skb; + + dev->bus->wr(dev, MT_MCU_INT_LEVEL, 1); + usleep_range(20000, 30000); + + while ((skb = skb_dequeue(&dev->mmio.mcu.res_q)) != NULL) + dev_kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup); + +void mt76x02_set_ethtool_fwver(struct mt76_dev *dev, + const struct mt76x02_fw_header *h) +{ + u16 bld = le16_to_cpu(h->build_ver); + u16 ver = le16_to_cpu(h->fw_ver); + + snprintf(dev->hw->wiphy->fw_version, + sizeof(dev->hw->wiphy->fw_version), + "%d.%d.%02d-b%x", + (ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld); +} +EXPORT_SYMBOL_GPL(mt76x02_set_ethtool_fwver); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h new file mode 100644 index 000000000000..ce664f8b1c94 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x02_MCU_H +#define __MT76x02_MCU_H + +#define MT_MCU_RESET_CTL 0x070C +#define MT_MCU_INT_LEVEL 0x0718 +#define MT_MCU_COM_REG0 0x0730 +#define MT_MCU_COM_REG1 0x0734 +#define MT_MCU_COM_REG2 0x0738 +#define MT_MCU_COM_REG3 0x073C + +#define MT_INBAND_PACKET_MAX_LEN 192 +#define MT_MCU_MEMMAP_WLAN 0x410000 + +#define MT_MCU_PCIE_REMAP_BASE4 0x074C + +#define MT_MCU_SEMAPHORE_00 0x07B0 +#define MT_MCU_SEMAPHORE_01 0x07B4 +#define MT_MCU_SEMAPHORE_02 0x07B8 +#define MT_MCU_SEMAPHORE_03 0x07BC + +#define MT_MCU_ILM_ADDR 0x80000 + +enum mcu_cmd { + CMD_FUN_SET_OP = 1, + CMD_LOAD_CR = 2, + CMD_INIT_GAIN_OP = 3, + CMD_DYNC_VGA_OP = 6, + CMD_TDLS_CH_SW = 7, + CMD_BURST_WRITE = 8, + CMD_READ_MODIFY_WRITE = 9, + CMD_RANDOM_READ = 10, + CMD_BURST_READ = 11, + CMD_RANDOM_WRITE = 12, + CMD_LED_MODE_OP = 16, + CMD_POWER_SAVING_OP = 20, + CMD_WOW_CONFIG = 21, + CMD_WOW_QUERY = 22, + CMD_WOW_FEATURE = 24, + CMD_CARRIER_DETECT_OP = 28, + CMD_RADOR_DETECT_OP = 29, + CMD_SWITCH_CHANNEL_OP = 30, + CMD_CALIBRATION_OP = 31, + CMD_BEACON_OP = 32, + CMD_ANTENNA_OP = 33, +}; + +enum mcu_power_mode { + RADIO_OFF = 0x30, + RADIO_ON = 0x31, + RADIO_OFF_AUTO_WAKEUP = 0x32, + RADIO_OFF_ADVANCE = 0x33, + RADIO_ON_ADVANCE = 0x34, +}; + +enum mcu_function { + Q_SELECT = 1, + BW_SETTING = 2, + USB2_SW_DISCONNECT = 2, + USB3_SW_DISCONNECT = 3, + LOG_FW_DEBUG_MSG = 4, + GET_FW_VERSION = 5, +}; + +struct mt76x02_fw_header { + __le32 ilm_len; + __le32 dlm_len; + __le16 build_ver; + __le16 fw_ver; + u8 pad[4]; + char build_time[16]; +}; + +struct mt76x02_patch_header { + char build_time[16]; + char platform[4]; + char hw_version[4]; + char patch_version[4]; + u8 pad[2]; +}; + +int mt76x02_mcu_cleanup(struct mt76_dev *dev); +int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type, + u32 param, bool wait); +struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len); +int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb, + int cmd, bool wait_resp); +int mt76x02_mcu_function_select(struct mt76_dev *dev, + enum mcu_function func, + u32 val, bool wait_resp); +int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on, + bool wait_resp); +void mt76x02_set_ethtool_fwver(struct mt76_dev *dev, + const struct mt76x02_fw_header *h); + +#endif /* __MT76x02_MCU_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c new file mode 100644 index 000000000000..1b945079c802 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -0,0 +1,260 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/irq.h> + +#include "mt76x02.h" +#include "mt76x02_trace.h" + +static int +mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, + int idx, int n_desc) +{ + int ret; + + q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE; + q->ndesc = n_desc; + q->hw_idx = idx; + + ret = mt76_queue_alloc(dev, q); + if (ret) + return ret; + + mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx)); + + return 0; +} + +static int +mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, + int idx, int n_desc, int bufsize) +{ + int ret; + + q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE; + q->ndesc = n_desc; + q->buf_size = bufsize; + + ret = mt76_queue_alloc(dev, q); + if (ret) + return ret; + + mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx)); + + return 0; +} + +static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev) +{ + struct mt76x02_tx_status stat; + u8 update = 1; + + while (kfifo_get(&dev->txstatus_fifo, &stat)) + mt76x02_send_tx_status(&dev->mt76, &stat, &update); +} + +static void mt76x02_tx_tasklet(unsigned long data) +{ + struct mt76x02_dev *dev = (struct mt76x02_dev *)data; + int i; + + mt76x02_process_tx_status_fifo(dev); + + for (i = MT_TXQ_MCU; i >= 0; i--) + mt76_queue_tx_cleanup(dev, i, false); + + mt76x02_mac_poll_tx_status(dev, false); + mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL); +} + +int mt76x02_dma_init(struct mt76x02_dev *dev) +{ + struct mt76_txwi_cache __maybe_unused *t; + int i, ret, fifo_size; + struct mt76_queue *q; + void *status_fifo; + + BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x02_txwi)); + BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM); + + fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status)); + status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL); + if (!status_fifo) + return -ENOMEM; + + tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev); + kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); + + mt76_dma_attach(&dev->mt76); + + mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i], + mt76_ac_to_hwq(i), + MT_TX_RING_SIZE); + if (ret) + return ret; + } + + ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD], + MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE); + if (ret) + return ret; + + ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], + MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE); + if (ret) + return ret; + + ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, + MT_MCU_RING_SIZE, MT_RX_BUF_SIZE); + if (ret) + return ret; + + q = &dev->mt76.q_rx[MT_RXQ_MAIN]; + q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi); + ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE, + MT_RX_BUF_SIZE); + if (ret) + return ret; + + return mt76_init_queues(dev); +} +EXPORT_SYMBOL_GPL(mt76x02_dma_init); + +void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) +{ + struct mt76x02_dev *dev; + + dev = container_of(mdev, struct mt76x02_dev, mt76); + mt76x02_irq_enable(dev, MT_INT_RX_DONE(q)); +} +EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete); + +irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance) +{ + struct mt76x02_dev *dev = dev_instance; + u32 intr; + + intr = mt76_rr(dev, MT_INT_SOURCE_CSR); + mt76_wr(dev, MT_INT_SOURCE_CSR, intr); + + if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state)) + return IRQ_NONE; + + trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask); + + intr &= dev->mt76.mmio.irqmask; + + if (intr & MT_INT_TX_DONE_ALL) { + mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL); + tasklet_schedule(&dev->tx_tasklet); + } + + if (intr & MT_INT_RX_DONE(0)) { + mt76x02_irq_disable(dev, MT_INT_RX_DONE(0)); + napi_schedule(&dev->mt76.napi[0]); + } + + if (intr & MT_INT_RX_DONE(1)) { + mt76x02_irq_disable(dev, MT_INT_RX_DONE(1)); + napi_schedule(&dev->mt76.napi[1]); + } + + if (intr & MT_INT_PRE_TBTT) + tasklet_schedule(&dev->pre_tbtt_tasklet); + + /* send buffered multicast frames now */ + if (intr & MT_INT_TBTT) + mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]); + + if (intr & MT_INT_TX_STAT) { + mt76x02_mac_poll_tx_status(dev, true); + tasklet_schedule(&dev->tx_tasklet); + } + + if (intr & MT_INT_GPTIMER) { + mt76x02_irq_disable(dev, MT_INT_GPTIMER); + tasklet_schedule(&dev->dfs_pd.dfs_tasklet); + } + + return IRQ_HANDLED; +} +EXPORT_SYMBOL_GPL(mt76x02_irq_handler); + +void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags); + dev->mt76.mmio.irqmask &= ~clear; + dev->mt76.mmio.irqmask |= set; + mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); + spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags); +} +EXPORT_SYMBOL_GPL(mt76x02_set_irq_mask); + +static void mt76x02_dma_enable(struct mt76x02_dev *dev) +{ + u32 val; + + mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); + mt76x02_wait_for_wpdma(&dev->mt76, 1000); + usleep_range(50, 100); + + val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) | + MT_WPDMA_GLO_CFG_TX_DMA_EN | + MT_WPDMA_GLO_CFG_RX_DMA_EN; + mt76_set(dev, MT_WPDMA_GLO_CFG, val); + mt76_clear(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); +} +EXPORT_SYMBOL_GPL(mt76x02_dma_enable); + +void mt76x02_dma_cleanup(struct mt76x02_dev *dev) +{ + tasklet_kill(&dev->tx_tasklet); + mt76_dma_cleanup(&dev->mt76); +} +EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup); + +void mt76x02_dma_disable(struct mt76x02_dev *dev) +{ + u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG); + + val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE | + MT_WPDMA_GLO_CFG_BIG_ENDIAN | + MT_WPDMA_GLO_CFG_HDR_SEG_LEN; + val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE; + mt76_wr(dev, MT_WPDMA_GLO_CFG, val); +} +EXPORT_SYMBOL_GPL(mt76x02_dma_disable); + +void mt76x02_mac_start(struct mt76x02_dev *dev) +{ + mt76x02_dma_enable(dev); + mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); + mt76_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | + MT_MAC_SYS_CTRL_ENABLE_RX); + mt76x02_irq_enable(dev, + MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | + MT_INT_TX_STAT); +} +EXPORT_SYMBOL_GPL(mt76x02_mac_start); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c new file mode 100644 index 000000000000..d31ce1d7b689 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c @@ -0,0 +1,183 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> + +#include "mt76.h" +#include "mt76x02_phy.h" +#include "mt76x02_mac.h" + +void mt76x02_phy_set_rxpath(struct mt76_dev *dev) +{ + u32 val; + + val = __mt76_rr(dev, MT_BBP(AGC, 0)); + val &= ~BIT(4); + + switch (dev->chainmask & 0xf) { + case 2: + val |= BIT(3); + break; + default: + val &= ~BIT(3); + break; + } + + __mt76_wr(dev, MT_BBP(AGC, 0), val); + mb(); + val = __mt76_rr(dev, MT_BBP(AGC, 0)); +} +EXPORT_SYMBOL_GPL(mt76x02_phy_set_rxpath); + +void mt76x02_phy_set_txdac(struct mt76_dev *dev) +{ + int txpath; + + txpath = (dev->chainmask >> 8) & 0xf; + switch (txpath) { + case 2: + __mt76_set(dev, MT_BBP(TXBE, 5), 0x3); + break; + default: + __mt76_clear(dev, MT_BBP(TXBE, 5), 0x3); + break; + } +} +EXPORT_SYMBOL_GPL(mt76x02_phy_set_txdac); + +static u32 +mt76x02_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4) +{ + u32 val = 0; + + val |= (v1 & (BIT(6) - 1)) << 0; + val |= (v2 & (BIT(6) - 1)) << 8; + val |= (v3 & (BIT(6) - 1)) << 16; + val |= (v4 & (BIT(6) - 1)) << 24; + return val; +} + +int mt76x02_get_max_rate_power(struct mt76_rate_power *r) +{ + s8 ret = 0; + int i; + + for (i = 0; i < sizeof(r->all); i++) + ret = max(ret, r->all[i]); + + return ret; +} +EXPORT_SYMBOL_GPL(mt76x02_get_max_rate_power); + +void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit) +{ + int i; + + for (i = 0; i < sizeof(r->all); i++) + if (r->all[i] > limit) + r->all[i] = limit; +} +EXPORT_SYMBOL_GPL(mt76x02_limit_rate_power); + +void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset) +{ + int i; + + for (i = 0; i < sizeof(r->all); i++) + r->all[i] += offset; +} +EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset); + +void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_1) +{ + struct mt76_rate_power *t = &dev->rate_power; + + __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, + txp_0); + __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, + txp_1); + + __mt76_wr(dev, MT_TX_PWR_CFG_0, + mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0], + t->ofdm[2])); + __mt76_wr(dev, MT_TX_PWR_CFG_1, + mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0], + t->ht[2])); + __mt76_wr(dev, MT_TX_PWR_CFG_2, + mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8], + t->ht[10])); + __mt76_wr(dev, MT_TX_PWR_CFG_3, + mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0], + t->stbc[2])); + __mt76_wr(dev, MT_TX_PWR_CFG_4, + mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0)); + __mt76_wr(dev, MT_TX_PWR_CFG_7, + mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7], + t->vht[9])); + __mt76_wr(dev, MT_TX_PWR_CFG_8, + mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9])); + __mt76_wr(dev, MT_TX_PWR_CFG_9, + mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9])); +} +EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower); + +int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev) +{ + struct mt76x02_sta *sta; + struct mt76_wcid *wcid; + int i, j, min_rssi = 0; + s8 cur_rssi; + + local_bh_disable(); + rcu_read_lock(); + + for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) { + unsigned long mask = dev->wcid_mask[i]; + + if (!mask) + continue; + + for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) { + if (!(mask & 1)) + continue; + + wcid = rcu_dereference(dev->wcid[j]); + if (!wcid) + continue; + + sta = container_of(wcid, struct mt76x02_sta, wcid); + spin_lock(&dev->rx_lock); + if (sta->inactive_count++ < 5) + cur_rssi = ewma_signal_read(&sta->rssi); + else + cur_rssi = 0; + spin_unlock(&dev->rx_lock); + + if (cur_rssi < min_rssi) + min_rssi = cur_rssi; + } + } + + rcu_read_unlock(); + local_bh_enable(); + + if (!min_rssi) + return -75; + + return min_rssi; +} +EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h new file mode 100644 index 000000000000..e70ea6eeb077 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x02_PHY_H +#define __MT76x02_PHY_H + +#include "mt76x02_regs.h" + +void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset); +void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_2); +void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit); +int mt76x02_get_max_rate_power(struct mt76_rate_power *r); +void mt76x02_phy_set_rxpath(struct mt76_dev *dev); +void mt76x02_phy_set_txdac(struct mt76_dev *dev); +int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev); + +#endif /* __MT76x02_PHY_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h index 1551ea453180..24d1e6d747dd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h @@ -14,8 +14,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef __MT76x2_REGS_H -#define __MT76x2_REGS_H +#ifndef __MT76X02_REGS_H +#define __MT76X02_REGS_H #define MT_ASIC_VERSION 0x0000 @@ -46,6 +46,11 @@ #define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1) #define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2) +#define MT_COEXCFG3 0x004c + +#define MT_LDO_CTRL_0 0x006c +#define MT_LDO_CTRL_1 0x0070 + #define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */ #define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */ @@ -75,6 +80,8 @@ #define MT_XO_CTRL7 0x011c +#define MT_IOCFG_6 0x0124 + #define MT_USB_U3DMA_CFG 0x9018 #define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0) #define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8) @@ -156,18 +163,23 @@ #define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16) #define MT_WMM_TXOP_MASK GENMASK(15, 0) +#define MT_WMM_CTRL 0x0230 /* MT76x0 */ +#define MT_FCE_DMA_ADDR 0x0230 +#define MT_FCE_DMA_LEN 0x0234 +#define MT_USB_DMA_CFG 0x0238 + #define MT_TSO_CTRL 0x0250 #define MT_HEADER_TRANS_CTRL_REG 0x0260 +#define MT_US_CYC_CFG 0x02a4 +#define MT_US_CYC_CNT GENMASK(7, 0) + #define MT_TX_RING_BASE 0x0300 #define MT_RX_RING_BASE 0x03c0 #define MT_TX_HW_QUEUE_MCU 8 #define MT_TX_HW_QUEUE_MGMT 9 -#define MT_US_CYC_CFG 0x02a4 -#define MT_US_CYC_CNT GENMASK(7, 0) - #define MT_PBF_SYS_CTRL 0x0400 #define MT_PBF_SYS_CTRL_MCU_RESET BIT(0) #define MT_PBF_SYS_CTRL_DMA_RESET BIT(1) @@ -189,10 +201,20 @@ #define MT_BCN_OFFSET_BASE 0x041c #define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2)) +#define MT_RXQ_STA 0x0430 +#define MT_TXQ_STA 0x0434 +#define MT_RF_CSR_CFG 0x0500 +#define MT_RF_CSR_CFG_DATA GENMASK(7, 0) +#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8) +#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14) +#define MT_RF_CSR_CFG_WR BIT(30) +#define MT_RF_CSR_CFG_KICK BIT(31) + #define MT_RF_BYPASS_0 0x0504 #define MT_RF_BYPASS_1 0x0508 #define MT_RF_SETTING_0 0x050c +#define MT_RF_MISC 0x0518 #define MT_RF_DATA_WRITE 0x0524 #define MT_RF_CTRL 0x0528 @@ -203,6 +225,11 @@ #define MT_RF_DATA_READ 0x052c +#define MT_COM_REG0 0x0730 +#define MT_COM_REG1 0x0734 +#define MT_COM_REG2 0x0738 +#define MT_COM_REG3 0x073C + #define MT_FCE_PSE_CTRL 0x0800 #define MT_FCE_PARAMETERS 0x0804 #define MT_FCE_CSO 0x0808 @@ -222,6 +249,7 @@ #define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0 #define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4 +#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8 #define MT_FCE_PDMA_GLOBAL_CONF 0x09c4 #define MT_FCE_SKIP_FS 0x0a6c @@ -250,6 +278,9 @@ #define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24) #define MT_MAX_LEN_CFG 0x1018 +#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12) + +#define MT_LED_CFG 0x102c #define MT_AMPDU_MAX_LEN_20M1S 0x1030 #define MT_AMPDU_MAX_LEN_20M2S 0x1034 @@ -365,6 +396,8 @@ #define MT_TX_SW_CFG2 0x1338 #define MT_TXOP_CTRL_CFG 0x1340 +#define MT_TXOP_TRUN_EN GENMASK(5, 0) +#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8) #define MT_TX_RTS_CFG 0x1344 #define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0) @@ -376,7 +409,10 @@ #define MT_TX_RETRY_CFG 0x134c #define MT_TX_LINK_CFG 0x1350 +#define MT_VHT_HT_FBK_CFG0 0x1354 #define MT_VHT_HT_FBK_CFG1 0x1358 +#define MT_LG_FBK_CFG0 0x135c +#define MT_LG_FBK_CFG1 0x1360 #define MT_PROT_CFG_RATE GENMASK(15, 0) #define MT_PROT_CFG_CTRL GENMASK(17, 16) @@ -391,6 +427,27 @@ #define MT_GF20_PROT_CFG 0x1374 #define MT_GF40_PROT_CFG 0x1378 +#define MT_PROT_RATE GENMASK(15, 0) +#define MT_PROT_CTRL_RTS_CTS BIT(16) +#define MT_PROT_CTRL_CTS2SELF BIT(17) +#define MT_PROT_NAV_SHORT BIT(18) +#define MT_PROT_NAV_LONG BIT(19) +#define MT_PROT_TXOP_ALLOW_CCK BIT(20) +#define MT_PROT_TXOP_ALLOW_OFDM BIT(21) +#define MT_PROT_TXOP_ALLOW_MM20 BIT(22) +#define MT_PROT_TXOP_ALLOW_MM40 BIT(23) +#define MT_PROT_TXOP_ALLOW_GF20 BIT(24) +#define MT_PROT_TXOP_ALLOW_GF40 BIT(25) +#define MT_PROT_RTS_THR_EN BIT(26) +#define MT_PROT_RATE_CCK_11 0x0003 +#define MT_PROT_RATE_OFDM_6 0x4000 +#define MT_PROT_RATE_OFDM_24 0x4004 +#define MT_PROT_RATE_DUP_OFDM_24 0x4084 +#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20) +#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \ + ~MT_PROT_TXOP_ALLOW_MM40 & \ + ~MT_PROT_TXOP_ALLOW_GF40) + #define MT_EXP_ACK_TIME 0x1380 #define MT_TX_PWR_CFG_0_EXT 0x1390 @@ -405,6 +462,8 @@ #define MT_TX0_RF_GAIN_CORR 0x13a0 #define MT_TX1_RF_GAIN_CORR 0x13a4 +#define MT_TX0_RF_GAIN_ATTEN 0x13a8 +#define MT_TX0_RF_GAIN_ATTEN 0x13a8 /* MT76x0 */ #define MT_TX_ALC_CFG_0 0x13b0 #define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0) @@ -421,6 +480,7 @@ #define MT_TX_ALC_CFG_3 0x13ac #define MT_TX_ALC_CFG_4 0x13c0 #define MT_TX_ALC_CFG_4_LOWGAIN_CH_EN BIT(31) +#define MT_TX0_BB_GAIN_ATTEN 0x13c0 /* MT76x0 */ #define MT_TX_ALC_VGA3 0x13c8 @@ -451,10 +511,13 @@ #define MT_RX_FILTR_CFG_CTRL_RSV BIT(16) #define MT_AUTO_RSP_CFG 0x1404 +#define MT_AUTO_RSP_PREAMB_SHORT BIT(4) #define MT_LEGACY_BASIC_RATE 0x1408 #define MT_HT_BASIC_RATE 0x140c #define MT_HT_CTRL_CFG 0x1410 +#define MT_RX_PARSER_CFG 0x1418 +#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0) #define MT_EXT_CCA_CFG 0x141c #define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0) @@ -498,7 +561,10 @@ #define MT_TX_STAT_FIFO_WCID GENMASK(15, 8) #define MT_TX_STAT_FIFO_RATE GENMASK(31, 16) +#define MT_TX_AGG_STAT 0x171c + #define MT_TX_AGG_CNT_BASE0 0x1720 +#define MT_MPDU_DENSITY_CNT 0x1740 #define MT_TX_AGG_CNT_BASE1 0x174c #define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \ @@ -604,7 +670,7 @@ struct mt76_wcid_key { u8 rx_mic[8]; } __packed __aligned(4); -enum mt76x2_cipher_type { +enum mt76x02_cipher_type { MT_CIPHER_NONE, MT_CIPHER_WEP40, MT_CIPHER_WEP104, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c index a09f117848d6..5b42d2c87937 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c @@ -18,6 +18,6 @@ #ifndef __CHECKER__ #define CREATE_TRACE_POINTS -#include "mt76x2_trace.h" +#include "mt76x02_trace.h" #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h index 4cd424148d4b..713f12d3c8de 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h @@ -14,14 +14,14 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#if !defined(__MT76x2_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) -#define __MT76x2_TRACE_H +#if !defined(__MT76x02_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __MT76x02_TRACE_H #include <linux/tracepoint.h> -#include "mt76x2.h" +#include "mt76x02.h" #undef TRACE_SYSTEM -#define TRACE_SYSTEM mt76x2 +#define TRACE_SYSTEM mt76x02 #define MAXNAME 32 #define DEV_ENTRY __array(char, wiphy_name, 32) @@ -35,7 +35,7 @@ #define TXID_PR_ARG __entry->wcid, __entry->pktid DECLARE_EVENT_CLASS(dev_evt, - TP_PROTO(struct mt76x2_dev *dev), + TP_PROTO(struct mt76x02_dev *dev), TP_ARGS(dev), TP_STRUCT__entry( DEV_ENTRY @@ -47,7 +47,7 @@ DECLARE_EVENT_CLASS(dev_evt, ); DECLARE_EVENT_CLASS(dev_txid_evt, - TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid), + TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid), TP_ARGS(dev, wcid, pktid), TP_STRUCT__entry( DEV_ENTRY @@ -63,19 +63,19 @@ DECLARE_EVENT_CLASS(dev_txid_evt, ) ); -DEFINE_EVENT(dev_evt, mac_txstat_poll, - TP_PROTO(struct mt76x2_dev *dev), - TP_ARGS(dev) -); - DEFINE_EVENT(dev_txid_evt, mac_txdone_add, - TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid), + TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid), TP_ARGS(dev, wcid, pktid) ); +DEFINE_EVENT(dev_evt, mac_txstat_poll, + TP_PROTO(struct mt76x02_dev *dev), + TP_ARGS(dev) +); + TRACE_EVENT(mac_txstat_fetch, - TP_PROTO(struct mt76x2_dev *dev, - struct mt76x2_tx_status *stat), + TP_PROTO(struct mt76x02_dev *dev, + struct mt76x02_tx_status *stat), TP_ARGS(dev, stat), @@ -110,9 +110,8 @@ TRACE_EVENT(mac_txstat_fetch, ) ); - TRACE_EVENT(dev_irq, - TP_PROTO(struct mt76x2_dev *dev, u32 val, u32 mask), + TP_PROTO(struct mt76x02_dev *dev, u32 val, u32 mask), TP_ARGS(dev, val, mask), @@ -139,6 +138,6 @@ TRACE_EVENT(dev_irq, #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE mt76x2_trace +#define TRACE_INCLUDE_FILE mt76x02_trace #include <trace/define_trace.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c new file mode 100644 index 000000000000..830377221739 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c @@ -0,0 +1,203 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> + +#include "mt76x02.h" + +void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct mt76x02_dev *dev = hw->priv; + struct ieee80211_vif *vif = info->control.vif; + struct mt76_wcid *wcid = &dev->mt76.global_wcid; + + if (control->sta) { + struct mt76x02_sta *msta; + + msta = (struct mt76x02_sta *)control->sta->drv_priv; + wcid = &msta->wcid; + /* sw encrypted frames */ + if (!info->control.hw_key && wcid->hw_key_idx != 0xff) + control->sta = NULL; + } + + if (vif && !control->sta) { + struct mt76x02_vif *mvif; + + mvif = (struct mt76x02_vif *)vif->drv_priv; + wcid = &mvif->group_wcid; + } + + mt76_tx(&dev->mt76, control->sta, wcid, skb); +} +EXPORT_SYMBOL_GPL(mt76x02_tx); + +void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb) +{ + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + void *rxwi = skb->data; + + if (q == MT_RXQ_MCU) { + /* this is used just by mmio code */ + skb_queue_tail(&mdev->mmio.mcu.res_q, skb); + wake_up(&mdev->mmio.mcu.wait); + return; + } + + skb_pull(skb, sizeof(struct mt76x02_rxwi)); + if (mt76x02_mac_process_rx(dev, skb, rxwi)) { + dev_kfree_skb(skb); + return; + } + + mt76_rx(mdev, q, skb); +} +EXPORT_SYMBOL_GPL(mt76x02_queue_rx_skb); + +s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev, + const struct ieee80211_tx_rate *rate) +{ + s8 max_txpwr; + + if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + u8 mcs = ieee80211_rate_get_vht_mcs(rate); + + if (mcs == 8 || mcs == 9) { + max_txpwr = dev->rate_power.vht[8]; + } else { + u8 nss, idx; + + nss = ieee80211_rate_get_vht_nss(rate); + idx = ((nss - 1) << 3) + mcs; + max_txpwr = dev->rate_power.ht[idx & 0xf]; + } + } else if (rate->flags & IEEE80211_TX_RC_MCS) { + max_txpwr = dev->rate_power.ht[rate->idx & 0xf]; + } else { + enum nl80211_band band = dev->chandef.chan->band; + + if (band == NL80211_BAND_2GHZ) { + const struct ieee80211_rate *r; + struct wiphy *wiphy = dev->hw->wiphy; + struct mt76_rate_power *rp = &dev->rate_power; + + r = &wiphy->bands[band]->bitrates[rate->idx]; + if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE) + max_txpwr = rp->cck[r->hw_value & 0x3]; + else + max_txpwr = rp->ofdm[r->hw_value & 0x7]; + } else { + max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7]; + } + } + + return max_txpwr; +} +EXPORT_SYMBOL_GPL(mt76x02_tx_get_max_txpwr_adj); + +s8 mt76x02_tx_get_txpwr_adj(struct mt76_dev *mdev, s8 txpwr, s8 max_txpwr_adj) +{ + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + + txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf); + txpwr -= (dev->target_power + dev->target_power_delta[0]); + txpwr = min_t(s8, txpwr, max_txpwr_adj); + + if (!dev->enable_tpc) + return 0; + else if (txpwr >= 0) + return min_t(s8, txpwr, 7); + else + return (txpwr < -16) ? 8 : (txpwr + 32) / 2; +} +EXPORT_SYMBOL_GPL(mt76x02_tx_get_txpwr_adj); + +void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr) +{ + s8 txpwr_adj; + + txpwr_adj = mt76x02_tx_get_txpwr_adj(&dev->mt76, txpwr, + dev->mt76.rate_power.ofdm[4]); + mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, + MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj); + mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, + MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj); +} +EXPORT_SYMBOL_GPL(mt76x02_tx_set_txpwr_auto); + +void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (info->flags & IEEE80211_TX_CTL_AMPDU) { + ieee80211_free_txskb(dev->hw, skb); + } else { + ieee80211_tx_info_clear_status(info); + info->status.rates[0].idx = -1; + info->flags |= IEEE80211_TX_STAT_ACK; + ieee80211_tx_status(dev->hw, skb); + } +} +EXPORT_SYMBOL_GPL(mt76x02_tx_complete); + +bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update) +{ + struct mt76x02_tx_status stat; + + if (!mt76x02_mac_load_tx_status(dev, &stat)) + return false; + + mt76x02_send_tx_status(dev, &stat, update); + + return true; +} +EXPORT_SYMBOL_GPL(mt76x02_tx_status_data); + +int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, struct ieee80211_sta *sta, + u32 *tx_info) +{ + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int qsel = MT_QSEL_EDCA; + int ret; + + if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128) + mt76x02_mac_wcid_set_drop(&dev->mt76, wcid->idx, false); + + mt76x02_mac_write_txwi(mdev, txwi, skb, wcid, sta, skb->len); + + ret = mt76x02_insert_hdr_pad(skb); + if (ret < 0) + return ret; + + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + qsel = MT_QSEL_MGMT; + + *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | + MT_TXD_INFO_80211; + + if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv) + *tx_info |= MT_TXD_INFO_WIV; + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x02_tx_prepare_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h new file mode 100644 index 000000000000..6b2138328eb2 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x02_USB_H +#define __MT76x02_USB_H + +#include "mt76.h" + +void mt76x02u_init_mcu(struct mt76_dev *dev); +void mt76x02u_mcu_fw_reset(struct mt76_dev *dev); +int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data, + int data_len, u32 max_payload, u32 offset); + +int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags); +int mt76x02u_tx_prepare_skb(struct mt76_dev *dev, void *data, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, struct ieee80211_sta *sta, + u32 *tx_info); +void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush); +#endif /* __MT76x02_USB_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c new file mode 100644 index 000000000000..7c6c973af386 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76x02.h" + +static void mt76x02u_remove_dma_hdr(struct sk_buff *skb) +{ + int hdr_len; + + skb_pull(skb, sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN); + hdr_len = ieee80211_get_hdrlen_from_skb(skb); + if (hdr_len % 4) + mt76x02_remove_hdr_pad(skb, 2); +} + +void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush) +{ + mt76x02u_remove_dma_hdr(e->skb); + mt76x02_tx_complete(mdev, e->skb); +} +EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb); + +static int mt76x02u_check_skb_rooms(struct sk_buff *skb) +{ + int hdr_len = ieee80211_get_hdrlen_from_skb(skb); + u32 need_head; + + need_head = sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN; + if (hdr_len % 4) + need_head += 2; + return skb_cow(skb, need_head); +} + +int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) +{ + struct sk_buff *iter, *last = skb; + u32 info, pad; + + /* Buffer layout: + * | 4B | xfer len | pad | 4B | + * | TXINFO | pkt/cmd | zero pad to 4B | zero | + * + * length field of TXINFO should be set to 'xfer len'. + */ + info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) | + FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags; + put_unaligned_le32(info, skb_push(skb, sizeof(info))); + + pad = round_up(skb->len, 4) + 4 - skb->len; + skb_walk_frags(skb, iter) { + last = iter; + if (!iter->next) { + skb->data_len += pad; + skb->len += pad; + break; + } + } + + if (unlikely(pad)) { + if (skb_pad(last, pad)) + return -ENOMEM; + __skb_put(last, pad); + } + return 0; +} + +static int +mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + enum mt76_qsel qsel; + u32 flags; + + if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) || + ep == MT_EP_OUT_HCCA) + qsel = MT_QSEL_MGMT; + else + qsel = MT_QSEL_EDCA; + + flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | + MT_TXD_INFO_80211; + if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv) + flags |= MT_TXD_INFO_WIV; + + return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags); +} + +int mt76x02u_tx_prepare_skb(struct mt76_dev *dev, void *data, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, struct ieee80211_sta *sta, + u32 *tx_info) +{ + struct mt76x02_txwi *txwi; + int err, len = skb->len; + + err = mt76x02u_check_skb_rooms(skb); + if (err < 0) + return -ENOMEM; + + mt76x02_insert_hdr_pad(skb); + + txwi = skb_push(skb, sizeof(struct mt76x02_txwi)); + mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len); + + return mt76x02u_set_txinfo(skb, wcid, q2ep(q->hw_idx)); +} +EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c new file mode 100644 index 000000000000..cb5f073f08af --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c @@ -0,0 +1,360 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/firmware.h> + +#include "mt76.h" +#include "mt76x02_dma.h" +#include "mt76x02_mcu.h" +#include "mt76x02_usb.h" + +#define MT_CMD_HDR_LEN 4 + +#define MT_FCE_DMA_ADDR 0x0230 +#define MT_FCE_DMA_LEN 0x0234 + +#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8 + +static struct sk_buff * +mt76x02u_mcu_msg_alloc(const void *data, int len) +{ + struct sk_buff *skb; + + skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL); + if (!skb) + return NULL; + + skb_reserve(skb, MT_CMD_HDR_LEN); + skb_put_data(skb, data, len); + + return skb; +} + +static void +mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len) +{ + struct mt76_usb *usb = &dev->usb; + u32 reg, val; + int i; + + if (usb->mcu.burst) { + WARN_ON_ONCE(len / 4 != usb->mcu.rp_len); + + reg = usb->mcu.rp[0].reg - usb->mcu.base; + for (i = 0; i < usb->mcu.rp_len; i++) { + val = get_unaligned_le32(data + 4 * i); + usb->mcu.rp[i].reg = reg++; + usb->mcu.rp[i].value = val; + } + } else { + WARN_ON_ONCE(len / 8 != usb->mcu.rp_len); + + for (i = 0; i < usb->mcu.rp_len; i++) { + reg = get_unaligned_le32(data + 8 * i) - + usb->mcu.base; + val = get_unaligned_le32(data + 8 * i + 4); + + WARN_ON_ONCE(usb->mcu.rp[i].reg != reg); + usb->mcu.rp[i].value = val; + } + } +} + +static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq) +{ + struct mt76_usb *usb = &dev->usb; + struct mt76u_buf *buf = &usb->mcu.res; + struct urb *urb = buf->urb; + int i, ret; + u32 rxfce; + u8 *data; + + for (i = 0; i < 5; i++) { + if (!wait_for_completion_timeout(&usb->mcu.cmpl, + msecs_to_jiffies(300))) + continue; + + if (urb->status) + return -EIO; + + data = sg_virt(&urb->sg[0]); + if (usb->mcu.rp) + mt76x02u_multiple_mcu_reads(dev, data + 4, + urb->actual_length - 8); + + rxfce = get_unaligned_le32(data); + ret = mt76u_submit_buf(dev, USB_DIR_IN, + MT_EP_IN_CMD_RESP, + buf, GFP_KERNEL, + mt76u_mcu_complete_urb, + &usb->mcu.cmpl); + if (ret) + return ret; + + if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) && + FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE) + return 0; + + dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n", + FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce), + seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce)); + } + + dev_err(dev->dev, "error: %s timed out\n", __func__); + return -ETIMEDOUT; +} + +static int +__mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, + int cmd, bool wait_resp) +{ + struct usb_interface *intf = to_usb_interface(dev->dev); + struct usb_device *udev = interface_to_usbdev(intf); + struct mt76_usb *usb = &dev->usb; + unsigned int pipe; + int ret, sent; + u8 seq = 0; + u32 info; + + if (!skb) + return -EINVAL; + + if (test_bit(MT76_REMOVED, &dev->state)) + return 0; + + pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]); + if (wait_resp) { + seq = ++usb->mcu.msg_seq & 0xf; + if (!seq) + seq = ++usb->mcu.msg_seq & 0xf; + } + + info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) | + FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) | + MT_MCU_MSG_TYPE_CMD; + ret = mt76x02u_skb_dma_info(skb, CPU_TX_PORT, info); + if (ret) + return ret; + + ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500); + if (ret) + return ret; + + if (wait_resp) + ret = mt76x02u_mcu_wait_resp(dev, seq); + + consume_skb(skb); + + return ret; +} + +static int +mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, + int cmd, bool wait_resp) +{ + struct mt76_usb *usb = &dev->usb; + int err; + + mutex_lock(&usb->mcu.mutex); + err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp); + mutex_unlock(&usb->mcu.mutex); + + return err; +} + +static inline void skb_put_le32(struct sk_buff *skb, u32 val) +{ + put_unaligned_le32(val, skb_put(skb, 4)); +} + +static int +mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base, + const struct mt76_reg_pair *data, int n) +{ + const int CMD_RANDOM_WRITE = 12; + const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8; + struct sk_buff *skb; + int cnt, i, ret; + + if (!n) + return 0; + + cnt = min(max_vals_per_cmd, n); + + skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); + if (!skb) + return -ENOMEM; + skb_reserve(skb, MT_DMA_HDR_LEN); + + for (i = 0; i < cnt; i++) { + skb_put_le32(skb, base + data[i].reg); + skb_put_le32(skb, data[i].value); + } + + ret = mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n); + if (ret) + return ret; + + return mt76x02u_mcu_wr_rp(dev, base, data + cnt, n - cnt); +} + +static int +mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base, + struct mt76_reg_pair *data, int n) +{ + const int CMD_RANDOM_READ = 10; + const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8; + struct mt76_usb *usb = &dev->usb; + struct sk_buff *skb; + int cnt, i, ret; + + if (!n) + return 0; + + cnt = min(max_vals_per_cmd, n); + if (cnt != n) + return -EINVAL; + + skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); + if (!skb) + return -ENOMEM; + skb_reserve(skb, MT_DMA_HDR_LEN); + + for (i = 0; i < cnt; i++) { + skb_put_le32(skb, base + data[i].reg); + skb_put_le32(skb, data[i].value); + } + + mutex_lock(&usb->mcu.mutex); + + usb->mcu.rp = data; + usb->mcu.rp_len = n; + usb->mcu.base = base; + usb->mcu.burst = false; + + ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true); + + usb->mcu.rp = NULL; + + mutex_unlock(&usb->mcu.mutex); + + return ret; +} + +void mt76x02u_mcu_fw_reset(struct mt76_dev *dev) +{ + mt76u_vendor_request(dev, MT_VEND_DEV_MODE, + USB_DIR_OUT | USB_TYPE_VENDOR, + 0x1, 0, NULL, 0); +} +EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset); + +static int +__mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf, + const void *fw_data, int len, u32 dst_addr) +{ + u8 *data = sg_virt(&buf->urb->sg[0]); + DECLARE_COMPLETION_ONSTACK(cmpl); + __le32 info; + u32 val; + int err; + + info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | + FIELD_PREP(MT_MCU_MSG_LEN, len) | + MT_MCU_MSG_TYPE_CMD); + + memcpy(data, &info, sizeof(info)); + memcpy(data + sizeof(info), fw_data, len); + memset(data + sizeof(info) + len, 0, 4); + + mt76u_single_wr(dev, MT_VEND_WRITE_FCE, + MT_FCE_DMA_ADDR, dst_addr); + len = roundup(len, 4); + mt76u_single_wr(dev, MT_VEND_WRITE_FCE, + MT_FCE_DMA_LEN, len << 16); + + buf->len = MT_CMD_HDR_LEN + len + sizeof(info); + err = mt76u_submit_buf(dev, USB_DIR_OUT, + MT_EP_OUT_INBAND_CMD, + buf, GFP_KERNEL, + mt76u_mcu_complete_urb, &cmpl); + if (err < 0) + return err; + + if (!wait_for_completion_timeout(&cmpl, + msecs_to_jiffies(1000))) { + dev_err(dev->dev, "firmware upload timed out\n"); + usb_kill_urb(buf->urb); + return -ETIMEDOUT; + } + + if (mt76u_urb_error(buf->urb)) { + dev_err(dev->dev, "firmware upload failed: %d\n", + buf->urb->status); + return buf->urb->status; + } + + val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); + val++; + mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val); + + return 0; +} + +int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data, + int data_len, u32 max_payload, u32 offset) +{ + int err, len, pos = 0, max_len = max_payload - 8; + struct mt76u_buf buf; + + err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload, + GFP_KERNEL); + if (err < 0) + return err; + + while (data_len > 0) { + len = min_t(int, data_len, max_len); + err = __mt76x02u_mcu_fw_send_data(dev, &buf, data + pos, + len, offset + pos); + if (err < 0) + break; + + data_len -= len; + pos += len; + usleep_range(5000, 10000); + } + mt76u_buf_free(&buf); + + return err; +} +EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data); + +void mt76x02u_init_mcu(struct mt76_dev *dev) +{ + static const struct mt76_mcu_ops mt76x02u_mcu_ops = { + .mcu_msg_alloc = mt76x02u_mcu_msg_alloc, + .mcu_send_msg = mt76x02u_mcu_send_msg, + .mcu_wr_rp = mt76x02u_mcu_wr_rp, + .mcu_rd_rp = mt76x02u_mcu_rd_rp, + }; + + dev->mcu_ops = &mt76x02u_mcu_ops; +} +EXPORT_SYMBOL_GPL(mt76x02u_init_mcu); + +MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c new file mode 100644 index 000000000000..5851ab6b7e26 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -0,0 +1,444 @@ +/* + * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> +#include "mt76x02.h" + +#define CCK_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ + .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \ + .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \ +} + +#define OFDM_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \ + .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \ +} + +struct ieee80211_rate mt76x02_rates[] = { + CCK_RATE(0, 10), + CCK_RATE(1, 20), + CCK_RATE(2, 55), + CCK_RATE(3, 110), + OFDM_RATE(0, 60), + OFDM_RATE(1, 90), + OFDM_RATE(2, 120), + OFDM_RATE(3, 180), + OFDM_RATE(4, 240), + OFDM_RATE(5, 360), + OFDM_RATE(6, 480), + OFDM_RATE(7, 540), +}; +EXPORT_SYMBOL_GPL(mt76x02_rates); + +void mt76x02_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, u64 multicast) +{ + struct mt76_dev *dev = hw->priv; + u32 flags = 0; + +#define MT76_FILTER(_flag, _hw) do { \ + flags |= *total_flags & FIF_##_flag; \ + dev->rxfilter &= ~(_hw); \ + dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ + } while (0) + + mutex_lock(&dev->mutex); + + dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS; + + MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR); + MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR); + MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK | + MT_RX_FILTR_CFG_CTS | + MT_RX_FILTR_CFG_CFEND | + MT_RX_FILTR_CFG_CFACK | + MT_RX_FILTR_CFG_BA | + MT_RX_FILTR_CFG_CTRL_RSV); + MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL); + + *total_flags = flags; + dev->bus->wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + + mutex_unlock(&dev->mutex); +} +EXPORT_SYMBOL_GPL(mt76x02_configure_filter); + +int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt76_dev *dev = hw->priv; + struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; + struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; + int ret = 0; + int idx = 0; + int i; + + mutex_lock(&dev->mutex); + + idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid)); + if (idx < 0) { + ret = -ENOSPC; + goto out; + } + + msta->vif = mvif; + msta->wcid.sta = 1; + msta->wcid.idx = idx; + msta->wcid.hw_key_idx = -1; + mt76x02_mac_wcid_setup(dev, idx, mvif->idx, sta->addr); + mt76x02_mac_wcid_set_drop(dev, idx, false); + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + mt76x02_txq_init(dev, sta->txq[i]); + + if (vif->type == NL80211_IFTYPE_AP) + set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags); + + ewma_signal_init(&msta->rssi); + + rcu_assign_pointer(dev->wcid[idx], &msta->wcid); + +out: + mutex_unlock(&dev->mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mt76x02_sta_add); + +int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt76_dev *dev = hw->priv; + struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; + int idx = msta->wcid.idx; + int i; + + mutex_lock(&dev->mutex); + rcu_assign_pointer(dev->wcid[idx], NULL); + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + mt76_txq_remove(dev, sta->txq[i]); + mt76x02_mac_wcid_set_drop(dev, idx, true); + mt76_wcid_free(dev->wcid_mask, idx); + mt76x02_mac_wcid_setup(dev, idx, 0, NULL); + mutex_unlock(&dev->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x02_sta_remove); + +void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif, + unsigned int idx) +{ + struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; + + mvif->idx = idx; + mvif->group_wcid.idx = MT_VIF_WCID(idx); + mvif->group_wcid.hw_key_idx = -1; + mt76x02_txq_init(dev, vif->txq); +} +EXPORT_SYMBOL_GPL(mt76x02_vif_init); + +int +mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt76_dev *dev = hw->priv; + unsigned int idx = 0; + + if (vif->addr[0] & BIT(1)) + idx = 1 + (((dev->macaddr[0] ^ vif->addr[0]) >> 2) & 7); + + /* + * Client mode typically only has one configurable BSSID register, + * which is used for bssidx=0. This is linked to the MAC address. + * Since mac80211 allows changing interface types, and we cannot + * force the use of the primary MAC address for a station mode + * interface, we need some other way of configuring a per-interface + * remote BSSID. + * The hardware provides an AP-Client feature, where bssidx 0-7 are + * used for AP mode and bssidx 8-15 for client mode. + * We shift the station interface bss index by 8 to force the + * hardware to recognize the BSSID. + * The resulting bssidx mismatch for unicast frames is ignored by hw. + */ + if (vif->type == NL80211_IFTYPE_STATION) + idx += 8; + + mt76x02_vif_init(dev, vif, idx); + return 0; +} +EXPORT_SYMBOL_GPL(mt76x02_add_interface); + +void mt76x02_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt76_dev *dev = hw->priv; + + mt76_txq_remove(dev, vif->txq); +} +EXPORT_SYMBOL_GPL(mt76x02_remove_interface); + +int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + enum ieee80211_ampdu_mlme_action action = params->action; + struct ieee80211_sta *sta = params->sta; + struct mt76_dev *dev = hw->priv; + struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; + struct ieee80211_txq *txq = sta->txq[params->tid]; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; + struct mt76_txq *mtxq; + + if (!txq) + return -EINVAL; + + mtxq = (struct mt76_txq *)txq->drv_priv; + + switch (action) { + case IEEE80211_AMPDU_RX_START: + mt76_rx_aggr_start(dev, &msta->wcid, tid, *ssn, params->buf_size); + __mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); + break; + case IEEE80211_AMPDU_RX_STOP: + mt76_rx_aggr_stop(dev, &msta->wcid, tid); + __mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + mtxq->aggr = true; + mtxq->send_bar = false; + ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + mtxq->aggr = false; + ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); + break; + case IEEE80211_AMPDU_TX_START: + mtxq->agg_ssn = *ssn << 4; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + mtxq->aggr = false; + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x02_ampdu_action); + +int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt76_dev *dev = hw->priv; + struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; + struct mt76x02_sta *msta; + struct mt76_wcid *wcid; + int idx = key->keyidx; + int ret; + + /* fall back to sw encryption for unsupported ciphers */ + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + break; + default: + return -EOPNOTSUPP; + } + + /* + * The hardware does not support per-STA RX GTK, fall back + * to software mode for these. + */ + if ((vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MESH_POINT) && + (key->cipher == WLAN_CIPHER_SUITE_TKIP || + key->cipher == WLAN_CIPHER_SUITE_CCMP) && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + + msta = sta ? (struct mt76x02_sta *) sta->drv_priv : NULL; + wcid = msta ? &msta->wcid : &mvif->group_wcid; + + if (cmd == SET_KEY) { + key->hw_key_idx = wcid->idx; + wcid->hw_key_idx = idx; + if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) { + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + wcid->sw_iv = true; + } + } else { + if (idx == wcid->hw_key_idx) { + wcid->hw_key_idx = -1; + wcid->sw_iv = true; + } + + key = NULL; + } + mt76_wcid_key_setup(dev, wcid, key); + + if (!msta) { + if (key || wcid->hw_key_idx == idx) { + ret = mt76x02_mac_wcid_set_key(dev, wcid->idx, key); + if (ret) + return ret; + } + + return mt76x02_mac_shared_key_setup(dev, mvif->idx, idx, key); + } + + return mt76x02_mac_wcid_set_key(dev, msta->wcid.idx, key); +} +EXPORT_SYMBOL_GPL(mt76x02_set_key); + +int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params) +{ + struct mt76_dev *dev = hw->priv; + u8 cw_min = 5, cw_max = 10, qid; + u32 val; + + qid = dev->q_tx[queue].hw_idx; + + if (params->cw_min) + cw_min = fls(params->cw_min); + if (params->cw_max) + cw_max = fls(params->cw_max); + + val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) | + FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) | + FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) | + FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max); + __mt76_wr(dev, MT_EDCA_CFG_AC(qid), val); + + val = __mt76_rr(dev, MT_WMM_TXOP(qid)); + val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid)); + val |= params->txop << MT_WMM_TXOP_SHIFT(qid); + __mt76_wr(dev, MT_WMM_TXOP(qid), val); + + val = __mt76_rr(dev, MT_WMM_AIFSN); + val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid)); + val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid); + __mt76_wr(dev, MT_WMM_AIFSN, val); + + val = __mt76_rr(dev, MT_WMM_CWMIN); + val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid)); + val |= cw_min << MT_WMM_CWMIN_SHIFT(qid); + __mt76_wr(dev, MT_WMM_CWMIN, val); + + val = __mt76_rr(dev, MT_WMM_CWMAX); + val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid)); + val |= cw_max << MT_WMM_CWMAX_SHIFT(qid); + __mt76_wr(dev, MT_WMM_CWMAX, val); + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x02_conf_tx); + +void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt76_dev *dev = hw->priv; + struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; + struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates); + struct ieee80211_tx_rate rate = {}; + + if (!rates) + return; + + rate.idx = rates->rate[0].idx; + rate.flags = rates->rate[0].flags; + mt76x02_mac_wcid_set_rate(dev, &msta->wcid, &rate); + msta->wcid.max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, &rate); +} +EXPORT_SYMBOL_GPL(mt76x02_sta_rate_tbl_update); + +int mt76x02_insert_hdr_pad(struct sk_buff *skb) +{ + int len = ieee80211_get_hdrlen_from_skb(skb); + + if (len % 4 == 0) + return 0; + + skb_push(skb, 2); + memmove(skb->data, skb->data + 2, len); + + skb->data[len] = 0; + skb->data[len + 1] = 0; + return 2; +} +EXPORT_SYMBOL_GPL(mt76x02_insert_hdr_pad); + +void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len) +{ + int hdrlen; + + if (!len) + return; + + hdrlen = ieee80211_get_hdrlen_from_skb(skb); + memmove(skb->data + len, skb->data, hdrlen); + skb_pull(skb, len); +} +EXPORT_SYMBOL_GPL(mt76x02_remove_hdr_pad); + +const u16 mt76x02_beacon_offsets[16] = { + /* 1024 byte per beacon */ + 0xc000, + 0xc400, + 0xc800, + 0xcc00, + 0xd000, + 0xd400, + 0xd800, + 0xdc00, + /* BSS idx 8-15 not used for beacons */ + 0xc000, + 0xc000, + 0xc000, + 0xc000, + 0xc000, + 0xc000, + 0xc000, + 0xc000, +}; +EXPORT_SYMBOL_GPL(mt76x02_beacon_offsets); + +void mt76x02_set_beacon_offsets(struct mt76_dev *dev) +{ + u16 val, base = MT_BEACON_BASE; + u32 regs[4] = {}; + int i; + + for (i = 0; i < 16; i++) { + val = mt76x02_beacon_offsets[i] - base; + regs[i / 4] |= (val / 64) << (8 * (i % 4)); + } + + for (i = 0; i < 4; i++) + __mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]); +} +EXPORT_SYMBOL_GPL(mt76x02_set_beacon_offsets); + +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h deleted file mode 100644 index dca3209bf5f1..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef __MT76x2_H -#define __MT76x2_H - -#include <linux/device.h> -#include <linux/dma-mapping.h> -#include <linux/spinlock.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <linux/irq.h> -#include <linux/interrupt.h> -#include <linux/mutex.h> -#include <linux/bitops.h> -#include <linux/kfifo.h> -#include <linux/average.h> - -#define MT7662_FIRMWARE "mt7662.bin" -#define MT7662_ROM_PATCH "mt7662_rom_patch.bin" -#define MT7662_EEPROM_SIZE 512 - -#define MT7662U_FIRMWARE "mediatek/mt7662u.bin" -#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin" - -#define MT76x2_RX_RING_SIZE 256 -#define MT_RX_HEADROOM 32 - -#define MT_MAX_CHAINS 2 - -#define MT_CALIBRATE_INTERVAL HZ - -#define MT_MAX_VIFS 8 -#define MT_VIF_WCID(_n) (254 - ((_n) & 7)) - -#include "mt76.h" -#include "mt76x2_regs.h" -#include "mt76x2_mac.h" -#include "mt76x2_dfs.h" - -DECLARE_EWMA(signal, 10, 8) - -struct mt76x2_mcu { - struct mutex mutex; - - wait_queue_head_t wait; - struct sk_buff_head res_q; - struct mt76u_buf res_u; - - u32 msg_seq; -}; - -struct mt76x2_rx_freq_cal { - s8 high_gain[MT_MAX_CHAINS]; - s8 rssi_offset[MT_MAX_CHAINS]; - s8 lna_gain; - u32 mcu_gain; -}; - -struct mt76x2_calibration { - struct mt76x2_rx_freq_cal rx; - - u8 agc_gain_init[MT_MAX_CHAINS]; - u8 agc_gain_cur[MT_MAX_CHAINS]; - - u16 false_cca; - s8 avg_rssi_all; - s8 agc_gain_adjust; - s8 low_gain; - - u8 temp; - - bool init_cal_done; - bool tssi_cal_done; - bool tssi_comp_pending; - bool dpd_cal_done; - bool channel_cal_done; -}; - -struct mt76x2_dev { - struct mt76_dev mt76; /* must be first */ - - struct mac_address macaddr_list[8]; - - struct mutex mutex; - - const u16 *beacon_offsets; - unsigned long wcid_mask[128 / BITS_PER_LONG]; - - int txpower_conf; - int txpower_cur; - - u8 txdone_seq; - DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x2_tx_status); - - struct mt76x2_mcu mcu; - struct sk_buff *rx_head; - - struct tasklet_struct tx_tasklet; - struct tasklet_struct pre_tbtt_tasklet; - struct delayed_work cal_work; - struct delayed_work mac_work; - - u32 aggr_stats[32]; - - struct mt76_wcid global_wcid; - struct mt76_wcid __rcu *wcid[128]; - - spinlock_t irq_lock; - u32 irqmask; - - struct sk_buff *beacons[8]; - u8 beacon_mask; - u8 beacon_data_mask; - - u8 tbtt_count; - u16 beacon_int; - - u16 chainmask; - - u32 rxfilter; - - struct mt76x2_calibration cal; - - s8 target_power; - s8 target_power_delta[2]; - struct mt76_rate_power rate_power; - bool enable_tpc; - - u8 coverage_class; - u8 slottime; - - struct mt76x2_dfs_pattern_detector dfs_pd; -}; - -struct mt76x2_vif { - u8 idx; - - struct mt76_wcid group_wcid; -}; - -struct mt76x2_sta { - struct mt76_wcid wcid; /* must be first */ - - struct mt76x2_vif *vif; - struct mt76x2_tx_status status; - int n_frames; - - struct ewma_signal rssi; - int inactive_count; -}; - -static inline bool mt76x2_wait_for_mac(struct mt76x2_dev *dev) -{ - int i; - - for (i = 0; i < 500; i++) { - switch (mt76_rr(dev, MT_MAC_CSR0)) { - case 0: - case ~0: - break; - default: - return true; - } - usleep_range(5000, 10000); - } - return false; -} - -static inline bool is_mt7612(struct mt76x2_dev *dev) -{ - return mt76_chip(&dev->mt76) == 0x7612; -} - -void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set); - -static inline bool mt76x2_channel_silent(struct mt76x2_dev *dev) -{ - struct ieee80211_channel *chan = dev->mt76.chandef.chan; - - return ((chan->flags & IEEE80211_CHAN_RADAR) && - chan->dfs_state != NL80211_DFS_AVAILABLE); -} - -static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask) -{ - mt76x2_set_irq_mask(dev, 0, mask); -} - -static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask) -{ - mt76x2_set_irq_mask(dev, mask, 0); -} - -static inline bool mt76x2_wait_for_bbp(struct mt76x2_dev *dev) -{ - return mt76_poll_msec(dev, MT_MAC_STATUS, - MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, - 0, 100); -} - -static inline bool wait_for_wpdma(struct mt76x2_dev *dev) -{ - return mt76_poll(dev, MT_WPDMA_GLO_CFG, - MT_WPDMA_GLO_CFG_TX_DMA_BUSY | - MT_WPDMA_GLO_CFG_RX_DMA_BUSY, - 0, 1000); -} - -extern const struct ieee80211_ops mt76x2_ops; - -extern struct ieee80211_rate mt76x2_rates[12]; - -struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev); -int mt76x2_register_device(struct mt76x2_dev *dev); -void mt76x2_init_debugfs(struct mt76x2_dev *dev); -void mt76x2_init_device(struct mt76x2_dev *dev); - -irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance); -void mt76x2_phy_power_on(struct mt76x2_dev *dev); -int mt76x2_init_hardware(struct mt76x2_dev *dev); -void mt76x2_stop_hardware(struct mt76x2_dev *dev); -int mt76x2_eeprom_init(struct mt76x2_dev *dev); -int mt76x2_apply_calibration_data(struct mt76x2_dev *dev, int channel); -void mt76x2_set_tx_ackto(struct mt76x2_dev *dev); - -void mt76x2_phy_set_antenna(struct mt76x2_dev *dev); -int mt76x2_phy_start(struct mt76x2_dev *dev); -int mt76x2_phy_set_channel(struct mt76x2_dev *dev, - struct cfg80211_chan_def *chandef); -int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain); -void mt76x2_phy_calibrate(struct work_struct *work); -void mt76x2_phy_set_txpower(struct mt76x2_dev *dev); - -int mt76x2_mcu_init(struct mt76x2_dev *dev); -int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw, - u8 bw_index, bool scan); -int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on); -int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level, - u8 channel); -int mt76x2_mcu_cleanup(struct mt76x2_dev *dev); - -int mt76x2_dma_init(struct mt76x2_dev *dev); -void mt76x2_dma_cleanup(struct mt76x2_dev *dev); - -void mt76x2_cleanup(struct mt76x2_dev *dev); - -int mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid, - struct sk_buff *skb, int cmd, int seq); -void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, - struct sk_buff *skb); -void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb); -int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, - struct sk_buff *skb, struct mt76_queue *q, - struct mt76_wcid *wcid, struct ieee80211_sta *sta, - u32 *tx_info); -void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, - struct mt76_queue_entry *e, bool flush); -void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val); - -void mt76x2_pre_tbtt_tasklet(unsigned long arg); - -void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); -void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, - struct sk_buff *skb); - -void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); - -void mt76x2_update_channel(struct mt76_dev *mdev); - -s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev, - const struct ieee80211_tx_rate *rate); -s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj); -void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr); - -int mt76x2_insert_hdr_pad(struct sk_buff *skb); - -bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev, - struct mt76x2_tx_status *stat); -void mt76x2_send_tx_status(struct mt76x2_dev *dev, - struct mt76x2_tx_status *stat, u8 *update); -void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable); -void mt76x2_init_txpower(struct mt76x2_dev *dev, - struct ieee80211_supported_band *sband); -void mt76_write_mac_initvals(struct mt76x2_dev *dev); - -int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_ampdu_params *params); -int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -void mt76x2_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); -int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key); -int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u16 queue, const struct ieee80211_tx_queue_params *params); -void mt76x2_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, u64 multicast); -void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq); -void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta); - -void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, - enum nl80211_band band); -void mt76x2_configure_tx_delay(struct mt76x2_dev *dev, - enum nl80211_band band, u8 bw); -void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl); -void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper); -int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev); -void mt76x2_apply_gain_adj(struct mt76x2_dev *dev); - -#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig new file mode 100644 index 000000000000..2b414a0e9088 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig @@ -0,0 +1,20 @@ +config MT76x2_COMMON + tristate + select MT76x02_LIB + +config MT76x2E + tristate "MediaTek MT76x2E (PCIe) support" + select MT76x2_COMMON + depends on MAC80211 + depends on PCI + ---help--- + This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices. + +config MT76x2U + tristate "MediaTek MT76x2U (USB) support" + select MT76x2_COMMON + select MT76x02_USB + depends on MAC80211 + depends on USB + help + This adds support for MT7612U-based wireless USB dongles. diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile new file mode 100644 index 000000000000..b71bb1049170 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile @@ -0,0 +1,16 @@ +obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o +obj-$(CONFIG_MT76x2E) += mt76x2e.o +obj-$(CONFIG_MT76x2U) += mt76x2u.o + +mt76x2-common-y := \ + eeprom.o mac.o init.o phy.o debugfs.o mcu.o + +mt76x2e-y := \ + pci.o pci_main.o pci_init.o pci_tx.o \ + pci_mac.o pci_mcu.o pci_phy.o pci_dfs.o + +mt76x2u-y := \ + usb.o usb_init.o usb_main.o usb_mac.o usb_mcu.o \ + usb_phy.o + +CFLAGS_pci_trace.o := -I$(src) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c index 77b5ff1be05f..e8f8ccc0a5ed 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c @@ -20,7 +20,7 @@ static int mt76x2_ampdu_stat_read(struct seq_file *file, void *data) { - struct mt76x2_dev *dev = file->private; + struct mt76x02_dev *dev = file->private; int i, j; for (i = 0; i < 4; i++) { @@ -47,33 +47,14 @@ mt76x2_ampdu_stat_open(struct inode *inode, struct file *f) return single_open(f, mt76x2_ampdu_stat_read, inode->i_private); } -static void -seq_puts_array(struct seq_file *file, const char *str, s8 *val, int len) -{ - int i; - - seq_printf(file, "%10s:", str); - for (i = 0; i < len; i++) - seq_printf(file, " %2d", val[i]); - seq_puts(file, "\n"); -} - static int read_txpower(struct seq_file *file, void *data) { - struct mt76x2_dev *dev = dev_get_drvdata(file->private); + struct mt76x02_dev *dev = dev_get_drvdata(file->private); seq_printf(file, "Target power: %d\n", dev->target_power); - seq_puts_array(file, "Delta", dev->target_power_delta, - ARRAY_SIZE(dev->target_power_delta)); - seq_puts_array(file, "CCK", dev->rate_power.cck, - ARRAY_SIZE(dev->rate_power.cck)); - seq_puts_array(file, "OFDM", dev->rate_power.ofdm, - ARRAY_SIZE(dev->rate_power.ofdm)); - seq_puts_array(file, "HT", dev->rate_power.ht, - ARRAY_SIZE(dev->rate_power.ht)); - seq_puts_array(file, "VHT", dev->rate_power.vht, - ARRAY_SIZE(dev->rate_power.vht)); + mt76_seq_puts_array(file, "Delta", dev->target_power_delta, + ARRAY_SIZE(dev->target_power_delta)); return 0; } @@ -87,9 +68,9 @@ static const struct file_operations fops_ampdu_stat = { static int mt76x2_dfs_stat_read(struct seq_file *file, void *data) { + struct mt76x02_dev *dev = file->private; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; int i; - struct mt76x2_dev *dev = file->private; - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; seq_printf(file, "allocated sequences:\t%d\n", dfs_pd->seq_stats.seq_pool_len); @@ -125,7 +106,7 @@ static const struct file_operations fops_dfs_stat = { static int read_agc(struct seq_file *file, void *data) { - struct mt76x2_dev *dev = dev_get_drvdata(file->private); + struct mt76x02_dev *dev = dev_get_drvdata(file->private); seq_printf(file, "avg_rssi: %d\n", dev->cal.avg_rssi_all); seq_printf(file, "low_gain: %d\n", dev->cal.low_gain); @@ -135,7 +116,7 @@ static int read_agc(struct seq_file *file, void *data) return 0; } -void mt76x2_init_debugfs(struct mt76x2_dev *dev) +void mt76x2_init_debugfs(struct mt76x02_dev *dev) { struct dentry *dir; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h index da294558c268..3cb9d1864286 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -14,16 +14,13 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef __MT76x2_DMA_H -#define __MT76x2_DMA_H +#ifndef __DFS_H +#define __DFS_H -#include "dma.h" +void mt76x2_dfs_init_params(struct mt76x02_dev *dev); +void mt76x2_dfs_init_detector(struct mt76x02_dev *dev); +void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev); +void mt76x2_dfs_set_domain(struct mt76x02_dev *dev, + enum nl80211_dfs_regions region); -enum mt76x2_qsel { - MT_QSEL_MGMT, - MT_QSEL_HCCA, - MT_QSEL_EDCA, - MT_QSEL_EDCA_2, -}; - -#endif +#endif /* __DFS_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c index 1753bcb36356..bbab021b5f1a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c @@ -14,14 +14,15 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include <linux/module.h> #include <asm/unaligned.h> #include "mt76x2.h" -#include "mt76x2_eeprom.h" +#include "eeprom.h" #define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1 static int -mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field, +mt76x2_eeprom_copy(struct mt76x02_dev *dev, enum mt76x02_eeprom_field field, void *dest, int len) { if (field + len > dev->mt76.eeprom.size) @@ -32,7 +33,7 @@ mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field, } static int -mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev) +mt76x2_eeprom_get_macaddr(struct mt76x02_dev *dev) { void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR; @@ -40,73 +41,8 @@ mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev) return 0; } -void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev) -{ - u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0); - - switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) { - case BOARD_TYPE_5GHZ: - dev->mt76.cap.has_5ghz = true; - break; - case BOARD_TYPE_2GHZ: - dev->mt76.cap.has_2ghz = true; - break; - default: - dev->mt76.cap.has_2ghz = true; - dev->mt76.cap.has_5ghz = true; - break; - } -} -EXPORT_SYMBOL_GPL(mt76x2_eeprom_parse_hw_cap); - -static int -mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data) -{ - u32 val; - int i; - - val = mt76_rr(dev, MT_EFUSE_CTRL); - val &= ~(MT_EFUSE_CTRL_AIN | - MT_EFUSE_CTRL_MODE); - val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf); - val |= MT_EFUSE_CTRL_KICK; - mt76_wr(dev, MT_EFUSE_CTRL, val); - - if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000)) - return -ETIMEDOUT; - - udelay(2); - - val = mt76_rr(dev, MT_EFUSE_CTRL); - if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) { - memset(data, 0xff, 16); - return 0; - } - - for (i = 0; i < 4; i++) { - val = mt76_rr(dev, MT_EFUSE_DATA(i)); - put_unaligned_le32(val, data + 4 * i); - } - - return 0; -} - -static int -mt76x2_get_efuse_data(struct mt76x2_dev *dev, void *buf, int len) -{ - int ret, i; - - for (i = 0; i + 16 <= len; i += 16) { - ret = mt76x2_efuse_read(dev, i, buf + i); - if (ret) - return ret; - } - - return 0; -} - static bool -mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse) +mt76x2_has_cal_free_data(struct mt76x02_dev *dev, u8 *efuse) { u16 *efuse_w = (u16 *) efuse; @@ -132,7 +68,7 @@ mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse) } static void -mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse) +mt76x2_apply_cal_free_data(struct mt76x02_dev *dev, u8 *efuse) { #define GROUP_5G(_id) \ MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id), \ @@ -201,7 +137,7 @@ mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse) eeprom[MT_EE_BT_PMUCFG] = val & 0xff; } -static int mt76x2_check_eeprom(struct mt76x2_dev *dev) +static int mt76x2_check_eeprom(struct mt76x02_dev *dev) { u16 val = get_unaligned_le16(dev->mt76.eeprom.data); @@ -219,7 +155,7 @@ static int mt76x2_check_eeprom(struct mt76x2_dev *dev) } static int -mt76x2_eeprom_load(struct mt76x2_dev *dev) +mt76x2_eeprom_load(struct mt76x02_dev *dev) { void *efuse; bool found; @@ -241,7 +177,8 @@ mt76x2_eeprom_load(struct mt76x2_dev *dev) efuse = dev->mt76.otp.data; - if (mt76x2_get_efuse_data(dev, efuse, MT7662_EEPROM_SIZE)) + if (mt76x02_get_efuse_data(&dev->mt76, 0, efuse, + MT7662_EEPROM_SIZE, MT_EE_READ)) goto out; if (found) { @@ -259,56 +196,32 @@ out: return 0; } -static inline int -mt76x2_sign_extend(u32 val, unsigned int size) -{ - bool sign = val & BIT(size - 1); - - val &= BIT(size - 1) - 1; - - return sign ? val : -val; -} - -static inline int -mt76x2_sign_extend_optional(u32 val, unsigned int size) -{ - bool enable = val & BIT(size); - - return enable ? mt76x2_sign_extend(val, size) : 0; -} - -static bool -field_valid(u8 val) -{ - return val != 0 && val != 0xff; -} - static void -mt76x2_set_rx_gain_group(struct mt76x2_dev *dev, u8 val) +mt76x2_set_rx_gain_group(struct mt76x02_dev *dev, u8 val) { s8 *dest = dev->cal.rx.high_gain; - if (!field_valid(val)) { + if (!mt76x02_field_valid(val)) { dest[0] = 0; dest[1] = 0; return; } - dest[0] = mt76x2_sign_extend(val, 4); - dest[1] = mt76x2_sign_extend(val >> 4, 4); + dest[0] = mt76x02_sign_extend(val, 4); + dest[1] = mt76x02_sign_extend(val >> 4, 4); } static void -mt76x2_set_rssi_offset(struct mt76x2_dev *dev, int chain, u8 val) +mt76x2_set_rssi_offset(struct mt76x02_dev *dev, int chain, u8 val) { s8 *dest = dev->cal.rx.rssi_offset; - if (!field_valid(val)) { + if (!mt76x02_field_valid(val)) { dest[chain] = 0; return; } - dest[chain] = mt76x2_sign_extend_optional(val, 7); + dest[chain] = mt76x02_sign_extend_optional(val, 7); } static enum mt76x2_cal_channel_group @@ -328,28 +241,34 @@ mt76x2_get_cal_channel_group(int channel) } static u8 -mt76x2_get_5g_rx_gain(struct mt76x2_dev *dev, u8 channel) +mt76x2_get_5g_rx_gain(struct mt76x02_dev *dev, u8 channel) { enum mt76x2_cal_channel_group group; group = mt76x2_get_cal_channel_group(channel); switch (group) { case MT_CH_5G_JAPAN: - return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN); + return mt76x02_eeprom_get(&dev->mt76, + MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN); case MT_CH_5G_UNII_1: - return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8; + return mt76x02_eeprom_get(&dev->mt76, + MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8; case MT_CH_5G_UNII_2: - return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN); + return mt76x02_eeprom_get(&dev->mt76, + MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN); case MT_CH_5G_UNII_2E_1: - return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8; + return mt76x02_eeprom_get(&dev->mt76, + MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8; case MT_CH_5G_UNII_2E_2: - return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN); + return mt76x02_eeprom_get(&dev->mt76, + MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN); default: - return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8; + return mt76x02_eeprom_get(&dev->mt76, + MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8; } } -void mt76x2_read_rx_gain(struct mt76x2_dev *dev) +void mt76x2_read_rx_gain(struct mt76x02_dev *dev) { struct ieee80211_channel *chan = dev->mt76.chandef.chan; int channel = chan->hw_value; @@ -358,75 +277,28 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev) u16 val; if (chan->band == NL80211_BAND_2GHZ) - val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8; + val = mt76x02_eeprom_get(&dev->mt76, + MT_EE_RF_2G_RX_HIGH_GAIN) >> 8; else val = mt76x2_get_5g_rx_gain(dev, channel); mt76x2_set_rx_gain_group(dev, val); - if (chan->band == NL80211_BAND_2GHZ) { - val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_0); - mt76x2_set_rssi_offset(dev, 0, val); - mt76x2_set_rssi_offset(dev, 1, val >> 8); - } else { - val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_0); - mt76x2_set_rssi_offset(dev, 0, val); - mt76x2_set_rssi_offset(dev, 1, val >> 8); - } - - val = mt76x2_eeprom_get(dev, MT_EE_LNA_GAIN); - lna_2g = val & 0xff; - lna_5g[0] = val >> 8; - - val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_1); - lna_5g[1] = val >> 8; - - val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_1); - lna_5g[2] = val >> 8; - - if (!field_valid(lna_5g[1])) - lna_5g[1] = lna_5g[0]; - - if (!field_valid(lna_5g[2])) - lna_5g[2] = lna_5g[0]; + mt76x02_get_rx_gain(&dev->mt76, chan->band, &val, &lna_2g, lna_5g); + mt76x2_set_rssi_offset(dev, 0, val); + mt76x2_set_rssi_offset(dev, 1, val >> 8); dev->cal.rx.mcu_gain = (lna_2g & 0xff); dev->cal.rx.mcu_gain |= (lna_5g[0] & 0xff) << 8; dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16; dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24; - val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1); - if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G) - lna_2g = 0; - if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G) - memset(lna_5g, 0, sizeof(lna_5g)); - - if (chan->band == NL80211_BAND_2GHZ) - lna = lna_2g; - else if (channel <= 64) - lna = lna_5g[0]; - else if (channel <= 128) - lna = lna_5g[1]; - else - lna = lna_5g[2]; - - if (lna == 0xff) - lna = 0; - - dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8); + lna = mt76x02_get_lna_gain(&dev->mt76, &lna_2g, lna_5g, chan); + dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8); } EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain); -static s8 -mt76x2_rate_power_val(u8 val) -{ - if (!field_valid(val)) - return 0; - - return mt76x2_sign_extend_optional(val, 7); -} - -void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t, +void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t, struct ieee80211_channel *chan) { bool is_5ghz; @@ -436,70 +308,68 @@ void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t, memset(t, 0, sizeof(*t)); - val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_CCK); - t->cck[0] = t->cck[1] = mt76x2_rate_power_val(val); - t->cck[2] = t->cck[3] = mt76x2_rate_power_val(val >> 8); + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_CCK); + t->cck[0] = t->cck[1] = mt76x02_rate_power_val(val); + t->cck[2] = t->cck[3] = mt76x02_rate_power_val(val >> 8); if (is_5ghz) - val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M); + val = mt76x02_eeprom_get(&dev->mt76, + MT_EE_TX_POWER_OFDM_5G_6M); else - val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M); - t->ofdm[0] = t->ofdm[1] = mt76x2_rate_power_val(val); - t->ofdm[2] = t->ofdm[3] = mt76x2_rate_power_val(val >> 8); + val = mt76x02_eeprom_get(&dev->mt76, + MT_EE_TX_POWER_OFDM_2G_6M); + t->ofdm[0] = t->ofdm[1] = mt76x02_rate_power_val(val); + t->ofdm[2] = t->ofdm[3] = mt76x02_rate_power_val(val >> 8); if (is_5ghz) - val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M); + val = mt76x02_eeprom_get(&dev->mt76, + MT_EE_TX_POWER_OFDM_5G_24M); else - val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M); - t->ofdm[4] = t->ofdm[5] = mt76x2_rate_power_val(val); - t->ofdm[6] = t->ofdm[7] = mt76x2_rate_power_val(val >> 8); + val = mt76x02_eeprom_get(&dev->mt76, + MT_EE_TX_POWER_OFDM_2G_24M); + t->ofdm[4] = t->ofdm[5] = mt76x02_rate_power_val(val); + t->ofdm[6] = t->ofdm[7] = mt76x02_rate_power_val(val >> 8); - val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0); - t->ht[0] = t->ht[1] = mt76x2_rate_power_val(val); - t->ht[2] = t->ht[3] = mt76x2_rate_power_val(val >> 8); + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS0); + t->ht[0] = t->ht[1] = mt76x02_rate_power_val(val); + t->ht[2] = t->ht[3] = mt76x02_rate_power_val(val >> 8); - val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4); - t->ht[4] = t->ht[5] = mt76x2_rate_power_val(val); - t->ht[6] = t->ht[7] = mt76x2_rate_power_val(val >> 8); + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS4); + t->ht[4] = t->ht[5] = mt76x02_rate_power_val(val); + t->ht[6] = t->ht[7] = mt76x02_rate_power_val(val >> 8); - val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8); - t->ht[8] = t->ht[9] = mt76x2_rate_power_val(val); - t->ht[10] = t->ht[11] = mt76x2_rate_power_val(val >> 8); + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS8); + t->ht[8] = t->ht[9] = mt76x02_rate_power_val(val); + t->ht[10] = t->ht[11] = mt76x02_rate_power_val(val >> 8); - val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12); - t->ht[12] = t->ht[13] = mt76x2_rate_power_val(val); - t->ht[14] = t->ht[15] = mt76x2_rate_power_val(val >> 8); + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS12); + t->ht[12] = t->ht[13] = mt76x02_rate_power_val(val); + t->ht[14] = t->ht[15] = mt76x02_rate_power_val(val >> 8); - val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0); - t->vht[0] = t->vht[1] = mt76x2_rate_power_val(val); - t->vht[2] = t->vht[3] = mt76x2_rate_power_val(val >> 8); + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS0); + t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val); + t->vht[2] = t->vht[3] = mt76x02_rate_power_val(val >> 8); - val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4); - t->vht[4] = t->vht[5] = mt76x2_rate_power_val(val); - t->vht[6] = t->vht[7] = mt76x2_rate_power_val(val >> 8); + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS4); + t->vht[4] = t->vht[5] = mt76x02_rate_power_val(val); + t->vht[6] = t->vht[7] = mt76x02_rate_power_val(val >> 8); - val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8); + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS8); if (!is_5ghz) val >>= 8; - t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8); -} -EXPORT_SYMBOL_GPL(mt76x2_get_rate_power); + t->vht[8] = t->vht[9] = mt76x02_rate_power_val(val >> 8); -int mt76x2_get_max_rate_power(struct mt76_rate_power *r) -{ - int i; - s8 ret = 0; - - for (i = 0; i < sizeof(r->all); i++) - ret = max(ret, r->all[i]); - - return ret; + memcpy(t->stbc, t->ht, sizeof(t->stbc[0]) * 8); + t->stbc[8] = t->vht[8]; + t->stbc[9] = t->vht[9]; } -EXPORT_SYMBOL_GPL(mt76x2_get_max_rate_power); +EXPORT_SYMBOL_GPL(mt76x2_get_rate_power); static void -mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t, - struct ieee80211_channel *chan, int chain, int offset) +mt76x2_get_power_info_2g(struct mt76x02_dev *dev, + struct mt76x2_tx_power_info *t, + struct ieee80211_channel *chan, + int chain, int offset) { int channel = chan->hw_value; int delta_idx; @@ -518,15 +388,17 @@ mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t, t->chain[chain].tssi_slope = data[0]; t->chain[chain].tssi_offset = data[1]; t->chain[chain].target_power = data[2]; - t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7); + t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7); - val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER); + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_TSSI_OFF_TXPOWER); t->target_power = val >> 8; } static void -mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t, - struct ieee80211_channel *chan, int chain, int offset) +mt76x2_get_power_info_5g(struct mt76x02_dev *dev, + struct mt76x2_tx_power_info *t, + struct ieee80211_channel *chan, + int chain, int offset) { int channel = chan->hw_value; enum mt76x2_cal_channel_group group; @@ -567,13 +439,13 @@ mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t, t->chain[chain].tssi_slope = data[0]; t->chain[chain].tssi_offset = data[1]; t->chain[chain].target_power = data[2]; - t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7); + t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7); - val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN); + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_RX_HIGH_GAIN); t->target_power = val & 0xff; } -void mt76x2_get_power_info(struct mt76x2_dev *dev, +void mt76x2_get_power_info(struct mt76x02_dev *dev, struct mt76x2_tx_power_info *t, struct ieee80211_channel *chan) { @@ -581,8 +453,8 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev, memset(t, 0, sizeof(*t)); - bw40 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40); - bw80 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80); + bw40 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW40); + bw80 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW80); if (chan->band == NL80211_BAND_5GHZ) { bw40 >>= 8; @@ -597,15 +469,16 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev, MT_EE_TX_POWER_1_START_2G); } - if (mt76x2_tssi_enabled(dev) || !field_valid(t->target_power)) + if (mt76x02_tssi_enabled(&dev->mt76) || + !mt76x02_field_valid(t->target_power)) t->target_power = t->chain[0].target_power; - t->delta_bw40 = mt76x2_rate_power_val(bw40); - t->delta_bw80 = mt76x2_rate_power_val(bw80); + t->delta_bw40 = mt76x02_rate_power_val(bw40); + t->delta_bw80 = mt76x02_rate_power_val(bw80); } EXPORT_SYMBOL_GPL(mt76x2_get_power_info); -int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t) +int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t) { enum nl80211_band band = dev->mt76.chandef.chan->band; u16 val, slope; @@ -613,20 +486,24 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t) memset(t, 0, sizeof(*t)); - if (!mt76x2_temp_tx_alc_enabled(dev)) + if (!mt76x02_temp_tx_alc_enabled(&dev->mt76)) return -EINVAL; - if (!mt76x2_ext_pa_enabled(dev, band)) + if (!mt76x02_ext_pa_enabled(&dev->mt76, band)) return -EINVAL; - val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8; + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_EXT_PA_5G) >> 8; t->temp_25_ref = val & 0x7f; if (band == NL80211_BAND_5GHZ) { - slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G); - bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G); + slope = mt76x02_eeprom_get(&dev->mt76, + MT_EE_RF_TEMP_COMP_SLOPE_5G); + bounds = mt76x02_eeprom_get(&dev->mt76, + MT_EE_TX_POWER_EXT_PA_5G); } else { - slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G); - bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80) >> 8; + slope = mt76x02_eeprom_get(&dev->mt76, + MT_EE_RF_TEMP_COMP_SLOPE_2G); + bounds = mt76x02_eeprom_get(&dev->mt76, + MT_EE_TX_POWER_DELTA_BW80) >> 8; } t->high_slope = slope & 0xff; @@ -638,18 +515,7 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t) } EXPORT_SYMBOL_GPL(mt76x2_get_temp_comp); -bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band) -{ - u16 conf0 = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0); - - if (band == NL80211_BAND_5GHZ) - return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_5G); - else - return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G); -} -EXPORT_SYMBOL_GPL(mt76x2_ext_pa_enabled); - -int mt76x2_eeprom_init(struct mt76x2_dev *dev) +int mt76x2_eeprom_init(struct mt76x02_dev *dev) { int ret; @@ -657,7 +523,7 @@ int mt76x2_eeprom_init(struct mt76x2_dev *dev) if (ret) return ret; - mt76x2_eeprom_parse_hw_cap(dev); + mt76x02_eeprom_parse_hw_cap(&dev->mt76); mt76x2_eeprom_get_macaddr(dev); mt76_eeprom_override(&dev->mt76); dev->mt76.macaddr[0] &= ~BIT(1); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h new file mode 100644 index 000000000000..c97b31c77d83 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x2_EEPROM_H +#define __MT76x2_EEPROM_H + +#include "../mt76x02_eeprom.h" + +enum mt76x2_cal_channel_group { + MT_CH_5G_JAPAN, + MT_CH_5G_UNII_1, + MT_CH_5G_UNII_2, + MT_CH_5G_UNII_2E_1, + MT_CH_5G_UNII_2E_2, + MT_CH_5G_UNII_3, + __MT_CH_MAX +}; + +struct mt76x2_tx_power_info { + u8 target_power; + + s8 delta_bw40; + s8 delta_bw80; + + struct { + s8 tssi_slope; + s8 tssi_offset; + s8 target_power; + s8 delta; + } chain[MT_MAX_CHAINS]; +}; + +struct mt76x2_temp_comp { + u8 temp_25_ref; + int lower_bound; /* J */ + int upper_bound; /* J */ + unsigned int high_slope; /* J / dB */ + unsigned int low_slope; /* J / dB */ +}; + +void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t, + struct ieee80211_channel *chan); +void mt76x2_get_power_info(struct mt76x02_dev *dev, + struct mt76x2_tx_power_info *t, + struct ieee80211_channel *chan); +int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t); +void mt76x2_read_rx_gain(struct mt76x02_dev *dev); + +static inline bool +mt76x2_has_ext_lna(struct mt76x02_dev *dev) +{ + u32 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1); + + if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) + return val & MT_EE_NIC_CONF_1_LNA_EXT_2G; + else + return val & MT_EE_NIC_CONF_1_LNA_EXT_5G; +} + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c index 324b2a4b8b67..ccd9bc9d3e1e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c @@ -16,44 +16,11 @@ */ #include "mt76x2.h" -#include "mt76x2_eeprom.h" - -#define CCK_RATE(_idx, _rate) { \ - .bitrate = _rate, \ - .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ - .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \ - .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \ -} - -#define OFDM_RATE(_idx, _rate) { \ - .bitrate = _rate, \ - .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \ - .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \ -} - -struct ieee80211_rate mt76x2_rates[] = { - CCK_RATE(0, 10), - CCK_RATE(1, 20), - CCK_RATE(2, 55), - CCK_RATE(3, 110), - OFDM_RATE(0, 60), - OFDM_RATE(1, 90), - OFDM_RATE(2, 120), - OFDM_RATE(3, 180), - OFDM_RATE(4, 240), - OFDM_RATE(5, 360), - OFDM_RATE(6, 480), - OFDM_RATE(7, 540), -}; -EXPORT_SYMBOL_GPL(mt76x2_rates); - -struct mt76x2_reg_pair { - u32 reg; - u32 value; -}; +#include "eeprom.h" +#include "../mt76x02_phy.h" static void -mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable) +mt76x2_set_wlan_state(struct mt76x02_dev *dev, bool enable) { u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL); @@ -68,10 +35,13 @@ mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable) udelay(20); } -void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable) +void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable) { u32 val; + if (!enable) + goto out; + val = mt76_rr(dev, MT_WLAN_FUN_CTRL); val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL; @@ -87,22 +57,12 @@ void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable) mt76_wr(dev, MT_WLAN_FUN_CTRL, val); udelay(20); +out: mt76x2_set_wlan_state(dev, enable); } EXPORT_SYMBOL_GPL(mt76x2_reset_wlan); -static void -mt76x2_write_reg_pairs(struct mt76x2_dev *dev, - const struct mt76x2_reg_pair *data, int len) -{ - while (len > 0) { - mt76_wr(dev, data->reg, data->value); - len--; - data++; - } -} - -void mt76_write_mac_initvals(struct mt76x2_dev *dev) +void mt76_write_mac_initvals(struct mt76x02_dev *dev) { #define DEFAULT_PROT_CFG_CCK \ (FIELD_PREP(MT_PROT_CFG_RATE, 0x3) | \ @@ -128,7 +88,7 @@ void mt76_write_mac_initvals(struct mt76x2_dev *dev) FIELD_PREP(MT_PROT_CFG_NAV, 1) | \ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f)) - static const struct mt76x2_reg_pair vals[] = { + static const struct mt76_reg_pair vals[] = { /* Copied from MediaTek reference source */ { MT_PBF_SYS_CTRL, 0x00080c00 }, { MT_PBF_CFG, 0x1efebcff }, @@ -184,7 +144,7 @@ void mt76_write_mac_initvals(struct mt76x2_dev *dev) { MT_PROT_AUTO_TX_CFG, 0x00830083 }, { MT_HT_CTRL_CFG, 0x000001ff }, }; - struct mt76x2_reg_pair prot_vals[] = { + struct mt76_reg_pair prot_vals[] = { { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG_CCK }, { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG_OFDM }, { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 }, @@ -193,12 +153,12 @@ void mt76_write_mac_initvals(struct mt76x2_dev *dev) { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 }, }; - mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals)); - mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals)); + mt76_wr_rp(dev, 0, vals, ARRAY_SIZE(vals)); + mt76_wr_rp(dev, 0, prot_vals, ARRAY_SIZE(prot_vals)); } EXPORT_SYMBOL_GPL(mt76_write_mac_initvals); -void mt76x2_init_device(struct mt76x2_dev *dev) +void mt76x2_init_device(struct mt76x02_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); @@ -208,8 +168,8 @@ void mt76x2_init_device(struct mt76x2_dev *dev) hw->max_rate_tries = 1; hw->extra_tx_headroom = 2; - hw->sta_data_size = sizeof(struct mt76x2_sta); - hw->vif_data_size = sizeof(struct mt76x2_vif); + hw->sta_data_size = sizeof(struct mt76x02_sta); + hw->vif_data_size = sizeof(struct mt76x02_vif); ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); @@ -217,9 +177,9 @@ void mt76x2_init_device(struct mt76x2_dev *dev) dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; - dev->chainmask = 0x202; - dev->global_wcid.idx = 255; - dev->global_wcid.hw_key_idx = -1; + dev->mt76.chainmask = 0x202; + dev->mt76.global_wcid.idx = 255; + dev->mt76.global_wcid.hw_key_idx = -1; dev->slottime = 9; /* init antenna configuration */ @@ -227,7 +187,7 @@ void mt76x2_init_device(struct mt76x2_dev *dev) } EXPORT_SYMBOL_GPL(mt76x2_init_device); -void mt76x2_init_txpower(struct mt76x2_dev *dev, +void mt76x2_init_txpower(struct mt76x02_dev *dev, struct ieee80211_supported_band *sband) { struct ieee80211_channel *chan; @@ -248,7 +208,7 @@ void mt76x2_init_txpower(struct mt76x2_dev *dev, mt76x2_get_rate_power(dev, &t, chan); - chan->max_power = mt76x2_get_max_rate_power(&t) + + chan->max_power = mt76x02_get_max_rate_power(&t) + target_power; chan->max_power /= 2; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c new file mode 100644 index 000000000000..e25905c91ee2 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76x2.h" + +void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force) +{ + bool stopped = false; + u32 rts_cfg; + int i; + + mt76_wr(dev, MT_MAC_SYS_CTRL, 0); + + rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG); + mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT); + + /* Wait for MAC to become idle */ + for (i = 0; i < 300; i++) { + if ((mt76_rr(dev, MT_MAC_STATUS) & + (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) || + mt76_rr(dev, MT_BBP(IBI, 12))) { + udelay(1); + continue; + } + + stopped = true; + break; + } + + if (force && !stopped) { + mt76_set(dev, MT_BBP(CORE, 4), BIT(1)); + mt76_clear(dev, MT_BBP(CORE, 4), BIT(1)); + + mt76_set(dev, MT_BBP(CORE, 4), BIT(0)); + mt76_clear(dev, MT_BBP(CORE, 4), BIT(0)); + } + + mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg); +} +EXPORT_SYMBOL_GPL(mt76x2_mac_stop); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h new file mode 100644 index 000000000000..a31bd49ae6cb --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x2_MAC_H +#define __MT76x2_MAC_H + +#include "mt76x2.h" + +struct mt76x02_dev; +struct mt76x2_sta; +struct mt76x02_vif; + +int mt76x2_mac_start(struct mt76x02_dev *dev); +void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force); +void mt76x2_mac_resume(struct mt76x02_dev *dev); +void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr); + +int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx, + struct sk_buff *skb); +void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx, bool val); + +void mt76x2_mac_work(struct work_struct *work); + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c new file mode 100644 index 000000000000..134037a227d7 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/firmware.h> +#include <linux/delay.h> + +#include "mt76x2.h" +#include "mcu.h" +#include "eeprom.h" + +int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw, + u8 bw_index, bool scan) +{ + struct sk_buff *skb; + struct { + u8 idx; + u8 scan; + u8 bw; + u8 _pad0; + + __le16 chainmask; + u8 ext_chan; + u8 _pad1; + + } __packed __aligned(4) msg = { + .idx = channel, + .scan = scan, + .bw = bw, + .chainmask = cpu_to_le16(dev->mt76.chainmask), + }; + + /* first set the channel without the extension channel info */ + skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); + mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true); + + usleep_range(5000, 10000); + + msg.ext_chan = 0xe0 + bw_index; + skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); + return mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true); +} +EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel); + +int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level, + u8 channel) +{ + struct mt76_dev *mdev = &dev->mt76; + struct sk_buff *skb; + struct { + u8 cr_mode; + u8 temp; + u8 ch; + u8 _pad0; + + __le32 cfg; + } __packed __aligned(4) msg = { + .cr_mode = type, + .temp = temp_level, + .ch = channel, + }; + u32 val; + + val = BIT(31); + val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff; + val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_1) << 8) & 0xff00; + msg.cfg = cpu_to_le32(val); + + /* first set the channel without the extension channel info */ + skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); + return mt76_mcu_send_msg(dev, skb, CMD_LOAD_CR, true); +} +EXPORT_SYMBOL_GPL(mt76x2_mcu_load_cr); + +int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain, + bool force) +{ + struct sk_buff *skb; + struct { + __le32 channel; + __le32 gain_val; + } __packed __aligned(4) msg = { + .channel = cpu_to_le32(channel), + .gain_val = cpu_to_le32(gain), + }; + + if (force) + msg.channel |= cpu_to_le32(BIT(31)); + + skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); + return mt76_mcu_send_msg(dev, skb, CMD_INIT_GAIN_OP, true); +} +EXPORT_SYMBOL_GPL(mt76x2_mcu_init_gain); + +int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev, + struct mt76x2_tssi_comp *tssi_data) +{ + struct sk_buff *skb; + struct { + __le32 id; + struct mt76x2_tssi_comp data; + } __packed __aligned(4) msg = { + .id = cpu_to_le32(MCU_CAL_TSSI_COMP), + .data = *tssi_data, + }; + + skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); + return mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true); +} +EXPORT_SYMBOL_GPL(mt76x2_mcu_tssi_comp); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h index e40293f21417..acfa2b570c7c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h @@ -17,19 +17,14 @@ #ifndef __MT76x2_MCU_H #define __MT76x2_MCU_H +#include "../mt76x02_mcu.h" + /* Register definitions */ #define MT_MCU_CPU_CTL 0x0704 #define MT_MCU_CLOCK_CTL 0x0708 -#define MT_MCU_RESET_CTL 0x070C -#define MT_MCU_INT_LEVEL 0x0718 -#define MT_MCU_COM_REG0 0x0730 -#define MT_MCU_COM_REG1 0x0734 -#define MT_MCU_COM_REG2 0x0738 -#define MT_MCU_COM_REG3 0x073C #define MT_MCU_PCIE_REMAP_BASE1 0x0740 #define MT_MCU_PCIE_REMAP_BASE2 0x0744 #define MT_MCU_PCIE_REMAP_BASE3 0x0748 -#define MT_MCU_PCIE_REMAP_BASE4 0x074C #define MT_LED_CTRL 0x0770 #define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n))) @@ -54,62 +49,15 @@ #define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \ MT_LED_STATUS_DURATION_MASK) -#define MT_MCU_SEMAPHORE_00 0x07B0 -#define MT_MCU_SEMAPHORE_01 0x07B4 -#define MT_MCU_SEMAPHORE_02 0x07B8 -#define MT_MCU_SEMAPHORE_03 0x07BC - #define MT_MCU_ROM_PATCH_OFFSET 0x80000 #define MT_MCU_ROM_PATCH_ADDR 0x90000 #define MT_MCU_ILM_OFFSET 0x80000 -#define MT_MCU_ILM_ADDR 0x80000 #define MT_MCU_DLM_OFFSET 0x100000 #define MT_MCU_DLM_ADDR 0x90000 #define MT_MCU_DLM_ADDR_E3 0x90800 -enum mcu_cmd { - CMD_FUN_SET_OP = 1, - CMD_LOAD_CR = 2, - CMD_INIT_GAIN_OP = 3, - CMD_DYNC_VGA_OP = 6, - CMD_TDLS_CH_SW = 7, - CMD_BURST_WRITE = 8, - CMD_READ_MODIFY_WRITE = 9, - CMD_RANDOM_READ = 10, - CMD_BURST_READ = 11, - CMD_RANDOM_WRITE = 12, - CMD_LED_MODE_OP = 16, - CMD_POWER_SAVING_OP = 20, - CMD_WOW_CONFIG = 21, - CMD_WOW_QUERY = 22, - CMD_WOW_FEATURE = 24, - CMD_CARRIER_DETECT_OP = 28, - CMD_RADOR_DETECT_OP = 29, - CMD_SWITCH_CHANNEL_OP = 30, - CMD_CALIBRATION_OP = 31, - CMD_BEACON_OP = 32, - CMD_ANTENNA_OP = 33, -}; - -enum mcu_function { - Q_SELECT = 1, - BW_SETTING = 2, - USB2_SW_DISCONNECT = 2, - USB3_SW_DISCONNECT = 3, - LOG_FW_DEBUG_MSG = 4, - GET_FW_VERSION = 5, -}; - -enum mcu_power_mode { - RADIO_OFF = 0x30, - RADIO_ON = 0x31, - RADIO_OFF_AUTO_WAKEUP = 0x32, - RADIO_OFF_ADVANCE = 0x33, - RADIO_ON_ADVANCE = 0x34, -}; - enum mcu_calibration { MCU_CAL_R = 1, MCU_CAL_TEMP_SENSOR, @@ -146,27 +94,8 @@ struct mt76x2_tssi_comp { u8 offset1; } __packed __aligned(4); -struct mt76x2_fw_header { - __le32 ilm_len; - __le32 dlm_len; - __le16 build_ver; - __le16 fw_ver; - u8 pad[4]; - char build_time[16]; -}; - -struct mt76x2_patch_header { - char build_time[16]; - char platform[4]; - char hw_version[4]; - char patch_version[4]; - u8 pad[2]; -}; - -int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, - u32 param); -int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data); -int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain, +int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev, struct mt76x2_tssi_comp *tssi_data); +int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain, bool force); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h new file mode 100644 index 000000000000..cbec8c6f1b2d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x2_H +#define __MT76x2_H + +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/spinlock.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/bitops.h> + +#define MT7662_FIRMWARE "mt7662.bin" +#define MT7662_ROM_PATCH "mt7662_rom_patch.bin" +#define MT7662_EEPROM_SIZE 512 + +#define MT7662U_FIRMWARE "mediatek/mt7662u.bin" +#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin" + +#define MT_CALIBRATE_INTERVAL HZ + +#include "../mt76x02.h" +#include "mac.h" +#include "dfs.h" + +static inline bool is_mt7612(struct mt76x02_dev *dev) +{ + return mt76_chip(&dev->mt76) == 0x7612; +} + +static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev) +{ + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + + return ((chan->flags & IEEE80211_CHAN_RADAR) && + chan->dfs_state != NL80211_DFS_AVAILABLE); +} + +extern const struct ieee80211_ops mt76x2_ops; + +struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev); +int mt76x2_register_device(struct mt76x02_dev *dev); +void mt76x2_init_debugfs(struct mt76x02_dev *dev); +void mt76x2_init_device(struct mt76x02_dev *dev); + +void mt76x2_phy_power_on(struct mt76x02_dev *dev); +int mt76x2_init_hardware(struct mt76x02_dev *dev); +void mt76x2_stop_hardware(struct mt76x02_dev *dev); +int mt76x2_eeprom_init(struct mt76x02_dev *dev); +int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel); +void mt76x2_set_tx_ackto(struct mt76x02_dev *dev); + +void mt76x2_phy_set_antenna(struct mt76x02_dev *dev); +int mt76x2_phy_start(struct mt76x02_dev *dev); +int mt76x2_phy_set_channel(struct mt76x02_dev *dev, + struct cfg80211_chan_def *chandef); +void mt76x2_phy_calibrate(struct work_struct *work); +void mt76x2_phy_set_txpower(struct mt76x02_dev *dev); + +int mt76x2_mcu_init(struct mt76x02_dev *dev); +int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw, + u8 bw_index, bool scan); +int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level, + u8 channel); + +void mt76x2_cleanup(struct mt76x02_dev *dev); + +void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val); + +void mt76x2_pre_tbtt_tasklet(unsigned long arg); + +void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); + +void mt76x2_update_channel(struct mt76_dev *mdev); + +void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable); +void mt76x2_init_txpower(struct mt76x02_dev *dev, + struct ieee80211_supported_band *sband); +void mt76_write_mac_initvals(struct mt76x02_dev *dev); + +void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait); +void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev, + enum nl80211_band band); +void mt76x2_configure_tx_delay(struct mt76x02_dev *dev, + enum nl80211_band band, u8 bw); +void mt76x2_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl); +void mt76x2_phy_set_band(struct mt76x02_dev *dev, int band, bool primary_upper); +void mt76x2_apply_gain_adj(struct mt76x02_dev *dev); + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h new file mode 100644 index 000000000000..6e932b5010ef --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x2U_H +#define __MT76x2U_H + +#include <linux/device.h> + +#include "mt76x2.h" +#include "mcu.h" + +#define MT7612U_EEPROM_SIZE 512 + +#define MT_USB_AGGR_SIZE_LIMIT 21 /* 1024B unit */ +#define MT_USB_AGGR_TIMEOUT 0x80 /* 33ns unit */ + +extern const struct ieee80211_ops mt76x2u_ops; + +struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev); +int mt76x2u_register_device(struct mt76x02_dev *dev); +int mt76x2u_init_hardware(struct mt76x02_dev *dev); +void mt76x2u_cleanup(struct mt76x02_dev *dev); +void mt76x2u_stop_hw(struct mt76x02_dev *dev); + +int mt76x2u_mac_reset(struct mt76x02_dev *dev); +void mt76x2u_mac_resume(struct mt76x02_dev *dev); +int mt76x2u_mac_start(struct mt76x02_dev *dev); +int mt76x2u_mac_stop(struct mt76x02_dev *dev); + +int mt76x2u_phy_set_channel(struct mt76x02_dev *dev, + struct cfg80211_chan_def *chandef); +void mt76x2u_phy_calibrate(struct work_struct *work); +void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev); + +void mt76x2u_mcu_complete_urb(struct urb *urb); +int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap, + bool ext, int rssi, u32 false_cca); +int mt76x2u_mcu_init(struct mt76x02_dev *dev); +int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev); + +int mt76x2u_alloc_queues(struct mt76x02_dev *dev); +void mt76x2u_queues_deinit(struct mt76x02_dev *dev); +void mt76x2u_stop_queues(struct mt76x02_dev *dev); +int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port, + u32 flags); + +#endif /* __MT76x2U_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index e66f047ea448..92432fe97312 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -19,7 +19,6 @@ #include <linux/pci.h> #include "mt76x2.h" -#include "mt76x2_trace.h" static const struct pci_device_id mt76pci_device_table[] = { { PCI_DEVICE(0x14c3, 0x7662) }, @@ -31,7 +30,7 @@ static const struct pci_device_id mt76pci_device_table[] = { static int mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - struct mt76x2_dev *dev; + struct mt76x02_dev *dev; int ret; ret = pcim_enable_device(pdev); @@ -53,11 +52,12 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); + mt76x2_reset_wlan(dev, false); dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION); dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev); - ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x2_irq_handler, + ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (ret) goto error; @@ -88,7 +88,7 @@ static void mt76pci_remove(struct pci_dev *pdev) { struct mt76_dev *mdev = pci_get_drvdata(pdev); - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); mt76_unregister_device(mdev); mt76x2_cleanup(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c index 374cc655c11d..b56febae8945 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c @@ -36,7 +36,7 @@ .pwr_jmp = power_jmp \ } -static const struct mt76x2_radar_specs etsi_radar_specs[] = { +static const struct mt76x02_radar_specs etsi_radar_specs[] = { /* 20MHz */ RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0, 0x7fffffff, 0x155cc0, 0x19cc), @@ -66,7 +66,7 @@ static const struct mt76x2_radar_specs etsi_radar_specs[] = { 0x7fffffff, 0x2191c0, 0x15cc) }; -static const struct mt76x2_radar_specs fcc_radar_specs[] = { +static const struct mt76x02_radar_specs fcc_radar_specs[] = { /* 20MHz */ RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0, 0x7fffffff, 0xfe808, 0x13dc), @@ -96,7 +96,7 @@ static const struct mt76x2_radar_specs fcc_radar_specs[] = { 0x3938700, 0x57bcf00, 0x1289) }; -static const struct mt76x2_radar_specs jp_w56_radar_specs[] = { +static const struct mt76x02_radar_specs jp_w56_radar_specs[] = { /* 20MHz */ RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0, 0x7fffffff, 0x14c080, 0x13dc), @@ -126,7 +126,7 @@ static const struct mt76x2_radar_specs jp_w56_radar_specs[] = { 0x3938700, 0X57bcf00, 0x1289) }; -static const struct mt76x2_radar_specs jp_w53_radar_specs[] = { +static const struct mt76x02_radar_specs jp_w53_radar_specs[] = { /* 20MHz */ RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0, 0x7fffffff, 0x14c080, 0x16cc), @@ -150,8 +150,9 @@ static const struct mt76x2_radar_specs jp_w53_radar_specs[] = { { 0 } }; -static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev, - u8 enable) +static void +mt76x2_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev, + u8 enable) { u32 data; @@ -159,10 +160,10 @@ static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev, mt76_wr(dev, MT_BBP(DFS, 36), data); } -static void mt76x2_dfs_seq_pool_put(struct mt76x2_dev *dev, - struct mt76x2_dfs_sequence *seq) +static void mt76x2_dfs_seq_pool_put(struct mt76x02_dev *dev, + struct mt76x02_dfs_sequence *seq) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; list_add(&seq->head, &dfs_pd->seq_pool); @@ -170,17 +171,17 @@ static void mt76x2_dfs_seq_pool_put(struct mt76x2_dev *dev, dfs_pd->seq_stats.seq_len--; } -static -struct mt76x2_dfs_sequence *mt76x2_dfs_seq_pool_get(struct mt76x2_dev *dev) +static struct mt76x02_dfs_sequence * +mt76x2_dfs_seq_pool_get(struct mt76x02_dev *dev) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_sequence *seq; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_sequence *seq; if (list_empty(&dfs_pd->seq_pool)) { seq = devm_kzalloc(dev->mt76.dev, sizeof(*seq), GFP_ATOMIC); } else { seq = list_first_entry(&dfs_pd->seq_pool, - struct mt76x2_dfs_sequence, + struct mt76x02_dfs_sequence, head); list_del(&seq->head); dfs_pd->seq_stats.seq_pool_len--; @@ -213,10 +214,10 @@ static int mt76x2_dfs_get_multiple(int val, int frac, int margin) return factor; } -static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev) +static void mt76x2_dfs_detector_reset(struct mt76x02_dev *dev) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_sequence *seq, *tmp_seq; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_sequence *seq, *tmp_seq; int i; /* reset hw detector */ @@ -234,11 +235,11 @@ static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev) } } -static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev) +static bool mt76x2_dfs_check_chirp(struct mt76x02_dev *dev) { bool ret = false; u32 current_ts, delta_ts; - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER); delta_ts = current_ts - dfs_pd->chirp_pulse_ts; @@ -255,8 +256,8 @@ static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev) return ret; } -static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev, - struct mt76x2_dfs_hw_pulse *pulse) +static void mt76x2_dfs_get_hw_pulse(struct mt76x02_dev *dev, + struct mt76x02_dfs_hw_pulse *pulse) { u32 data; @@ -275,8 +276,8 @@ static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev, pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22)); } -static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev, - struct mt76x2_dfs_hw_pulse *pulse) +static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev, + struct mt76x02_dfs_hw_pulse *pulse) { bool ret = false; @@ -370,8 +371,8 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev, return ret; } -static bool mt76x2_dfs_fetch_event(struct mt76x2_dev *dev, - struct mt76x2_dfs_event *event) +static bool mt76x2_dfs_fetch_event(struct mt76x02_dev *dev, + struct mt76x02_dfs_event *event) { u32 data; @@ -397,12 +398,12 @@ static bool mt76x2_dfs_fetch_event(struct mt76x2_dev *dev, return true; } -static bool mt76x2_dfs_check_event(struct mt76x2_dev *dev, - struct mt76x2_dfs_event *event) +static bool mt76x2_dfs_check_event(struct mt76x02_dev *dev, + struct mt76x02_dfs_event *event) { if (event->engine == 2) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_event_rb *event_buff = &dfs_pd->event_rb[1]; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_event_rb *event_buff = &dfs_pd->event_rb[1]; u16 last_event_idx; u32 delta_ts; @@ -416,11 +417,11 @@ static bool mt76x2_dfs_check_event(struct mt76x2_dev *dev, return true; } -static void mt76x2_dfs_queue_event(struct mt76x2_dev *dev, - struct mt76x2_dfs_event *event) +static void mt76x2_dfs_queue_event(struct mt76x02_dev *dev, + struct mt76x02_dfs_event *event) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_event_rb *event_buff; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_event_rb *event_buff; /* add radar event to ring buffer */ event_buff = event->engine == 2 ? &dfs_pd->event_rb[1] @@ -434,16 +435,16 @@ static void mt76x2_dfs_queue_event(struct mt76x2_dev *dev, MT_DFS_EVENT_BUFLEN); } -static int mt76x2_dfs_create_sequence(struct mt76x2_dev *dev, - struct mt76x2_dfs_event *event, +static int mt76x2_dfs_create_sequence(struct mt76x02_dev *dev, + struct mt76x02_dfs_event *event, u16 cur_len) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_sw_detector_params *sw_params; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_sw_detector_params *sw_params; u32 width_delta, with_sum, factor, cur_pri; - struct mt76x2_dfs_sequence seq, *seq_p; - struct mt76x2_dfs_event_rb *event_rb; - struct mt76x2_dfs_event *cur_event; + struct mt76x02_dfs_sequence seq, *seq_p; + struct mt76x02_dfs_event_rb *event_rb; + struct mt76x02_dfs_event *cur_event; int i, j, end, pri; event_rb = event->engine == 2 ? &dfs_pd->event_rb[1] @@ -521,12 +522,12 @@ next: return 0; } -static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x2_dev *dev, - struct mt76x2_dfs_event *event) +static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev, + struct mt76x02_dfs_event *event) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_sw_detector_params *sw_params; - struct mt76x2_dfs_sequence *seq, *tmp_seq; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_sw_detector_params *sw_params; + struct mt76x02_dfs_sequence *seq, *tmp_seq; u16 max_seq_len = 0; u32 factor, pri; @@ -553,10 +554,10 @@ static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x2_dev *dev, return max_seq_len; } -static bool mt76x2_dfs_check_detection(struct mt76x2_dev *dev) +static bool mt76x2_dfs_check_detection(struct mt76x02_dev *dev) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_sequence *seq; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_sequence *seq; if (list_empty(&dfs_pd->sequences)) return false; @@ -570,10 +571,10 @@ static bool mt76x2_dfs_check_detection(struct mt76x2_dev *dev) return false; } -static void mt76x2_dfs_add_events(struct mt76x2_dev *dev) +static void mt76x2_dfs_add_events(struct mt76x02_dev *dev) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_event event; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_event event; int i, seq_len; /* disable debug mode */ @@ -597,11 +598,11 @@ static void mt76x2_dfs_add_events(struct mt76x2_dev *dev) mt76x2_dfs_set_capture_mode_ctrl(dev, true); } -static void mt76x2_dfs_check_event_window(struct mt76x2_dev *dev) +static void mt76x2_dfs_check_event_window(struct mt76x02_dev *dev) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_event_rb *event_buff; - struct mt76x2_dfs_event *event; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_event_rb *event_buff; + struct mt76x02_dfs_event *event; int i; for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) { @@ -622,8 +623,8 @@ static void mt76x2_dfs_check_event_window(struct mt76x2_dev *dev) static void mt76x2_dfs_tasklet(unsigned long arg) { - struct mt76x2_dev *dev = (struct mt76x2_dev *)arg; - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dev *dev = (struct mt76x02_dev *)arg; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; u32 engine_mask; int i; @@ -653,7 +654,7 @@ static void mt76x2_dfs_tasklet(unsigned long arg) goto out; for (i = 0; i < MT_DFS_NUM_ENGINES; i++) { - struct mt76x2_dfs_hw_pulse pulse; + struct mt76x02_dfs_hw_pulse pulse; if (!(engine_mask & (1 << i))) continue; @@ -678,12 +679,12 @@ static void mt76x2_dfs_tasklet(unsigned long arg) mt76_wr(dev, MT_BBP(DFS, 1), 0xf); out: - mt76x2_irq_enable(dev, MT_INT_GPTIMER); + mt76x02_irq_enable(dev, MT_INT_GPTIMER); } -static void mt76x2_dfs_init_sw_detector(struct mt76x2_dev *dev) +static void mt76x2_dfs_init_sw_detector(struct mt76x02_dev *dev) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; switch (dev->dfs_pd.region) { case NL80211_DFS_FCC: @@ -707,11 +708,11 @@ static void mt76x2_dfs_init_sw_detector(struct mt76x2_dev *dev) } } -static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev) +static void mt76x2_dfs_set_bbp_params(struct mt76x02_dev *dev) { - u32 data; + const struct mt76x02_radar_specs *radar_specs; u8 i, shift; - const struct mt76x2_radar_specs *radar_specs; + u32 data; switch (dev->mt76.chandef.width) { case NL80211_CHAN_WIDTH_40: @@ -802,7 +803,7 @@ static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev) mt76_wr(dev, 0x212c, 0x0c350001); } -void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev) +void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev) { u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31; @@ -823,7 +824,7 @@ void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev) mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071); } -void mt76x2_dfs_init_params(struct mt76x2_dev *dev) +void mt76x2_dfs_init_params(struct mt76x02_dev *dev) { struct cfg80211_chan_def *chandef = &dev->mt76.chandef; @@ -834,7 +835,7 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev) /* enable debug mode */ mt76x2_dfs_set_capture_mode_ctrl(dev, true); - mt76x2_irq_enable(dev, MT_INT_GPTIMER); + mt76x02_irq_enable(dev, MT_INT_GPTIMER); mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_GP_TIMER_EN, 1); } else { @@ -844,15 +845,15 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev) mt76_wr(dev, MT_BBP(DFS, 1), 0xf); mt76_wr(dev, 0x212c, 0); - mt76x2_irq_disable(dev, MT_INT_GPTIMER); + mt76x02_irq_disable(dev, MT_INT_GPTIMER); mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_GP_TIMER_EN, 0); } } -void mt76x2_dfs_init_detector(struct mt76x2_dev *dev) +void mt76x2_dfs_init_detector(struct mt76x02_dev *dev) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; INIT_LIST_HEAD(&dfs_pd->sequences); INIT_LIST_HEAD(&dfs_pd->seq_pool); @@ -862,10 +863,10 @@ void mt76x2_dfs_init_detector(struct mt76x2_dev *dev) (unsigned long)dev); } -void mt76x2_dfs_set_domain(struct mt76x2_dev *dev, +void mt76x2_dfs_set_domain(struct mt76x02_dev *dev, enum nl80211_dfs_regions region) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; if (dfs_pd->region != region) { tasklet_disable(&dfs_pd->dfs_tasklet); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index b814391f79ac..f229c6eb65dc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c @@ -16,11 +16,11 @@ #include <linux/delay.h> #include "mt76x2.h" -#include "mt76x2_eeprom.h" -#include "mt76x2_mcu.h" +#include "eeprom.h" +#include "mcu.h" static void -mt76x2_mac_pbf_init(struct mt76x2_dev *dev) +mt76x2_mac_pbf_init(struct mt76x02_dev *dev) { u32 val; @@ -38,12 +38,12 @@ mt76x2_mac_pbf_init(struct mt76x2_dev *dev) } static void -mt76x2_fixup_xtal(struct mt76x2_dev *dev) +mt76x2_fixup_xtal(struct mt76x02_dev *dev) { u16 eep_val; s8 offset = 0; - eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2); + eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2); offset = eep_val & 0x7f; if ((eep_val & 0xff) == 0xff) @@ -53,7 +53,7 @@ mt76x2_fixup_xtal(struct mt76x2_dev *dev) eep_val >>= 8; if (eep_val == 0x00 || eep_val == 0xff) { - eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1); + eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1); eep_val &= 0xff; if (eep_val == 0x00 || eep_val == 0xff) @@ -64,7 +64,7 @@ mt76x2_fixup_xtal(struct mt76x2_dev *dev) mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset); mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL); - eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2); + eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2); switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) { case 0: mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80); @@ -77,31 +77,14 @@ mt76x2_fixup_xtal(struct mt76x2_dev *dev) } } -static void -mt76x2_init_beacon_offsets(struct mt76x2_dev *dev) -{ - u16 base = MT_BEACON_BASE; - u32 regs[4] = {}; - int i; - - for (i = 0; i < 16; i++) { - u16 addr = dev->beacon_offsets[i]; - - regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4)); - } - - for (i = 0; i < 4; i++) - mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]); -} - -static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard) +static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) { static const u8 null_addr[ETH_ALEN] = {}; const u8 *macaddr = dev->mt76.macaddr; u32 val; int i, k; - if (!mt76x2_wait_for_mac(dev)) + if (!mt76x02_wait_for_mac(&dev->mt76)) return -ETIMEDOUT; val = mt76_rr(dev, MT_WPDMA_GLO_CFG); @@ -160,14 +143,14 @@ static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard) mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0); for (i = 0; i < 256; i++) - mt76x2_mac_wcid_setup(dev, i, 0, NULL); + mt76x02_mac_wcid_setup(&dev->mt76, i, 0, NULL); for (i = 0; i < MT_MAX_VIFS; i++) - mt76x2_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL); + mt76x02_mac_wcid_setup(&dev->mt76, MT_VIF_WCID(i), i, NULL); for (i = 0; i < 16; i++) for (k = 0; k < 4; k++) - mt76x2_mac_shared_key_setup(dev, i, k, NULL); + mt76x02_mac_shared_key_setup(&dev->mt76, i, k, NULL); for (i = 0; i < 8; i++) { mt76x2_mac_set_bssid(dev, i, null_addr); @@ -185,14 +168,14 @@ static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard) MT_CH_TIME_CFG_EIFS_AS_BUSY | FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1)); - mt76x2_init_beacon_offsets(dev); + mt76x02_set_beacon_offsets(&dev->mt76); mt76x2_set_tx_ackto(dev); return 0; } -int mt76x2_mac_start(struct mt76x2_dev *dev) +int mt76x2_mac_start(struct mt76x02_dev *dev) { int i; @@ -203,30 +186,12 @@ int mt76x2_mac_start(struct mt76x2_dev *dev) mt76_rr(dev, MT_TX_STAT_FIFO); memset(dev->aggr_stats, 0, sizeof(dev->aggr_stats)); - - mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); - wait_for_wpdma(dev); - usleep_range(50, 100); - - mt76_set(dev, MT_WPDMA_GLO_CFG, - MT_WPDMA_GLO_CFG_TX_DMA_EN | - MT_WPDMA_GLO_CFG_RX_DMA_EN); - - mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); - - mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); - - mt76_wr(dev, MT_MAC_SYS_CTRL, - MT_MAC_SYS_CTRL_ENABLE_TX | - MT_MAC_SYS_CTRL_ENABLE_RX); - - mt76x2_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | - MT_INT_TX_STAT); + mt76x02_mac_start(dev); return 0; } -void mt76x2_mac_resume(struct mt76x2_dev *dev) +void mt76x2_mac_resume(struct mt76x02_dev *dev) { mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX | @@ -234,7 +199,7 @@ void mt76x2_mac_resume(struct mt76x2_dev *dev) } static void -mt76x2_power_on_rf_patch(struct mt76x2_dev *dev) +mt76x2_power_on_rf_patch(struct mt76x02_dev *dev) { mt76_set(dev, 0x10130, BIT(0) | BIT(16)); udelay(1); @@ -255,7 +220,7 @@ mt76x2_power_on_rf_patch(struct mt76x2_dev *dev) } static void -mt76x2_power_on_rf(struct mt76x2_dev *dev, int unit) +mt76x2_power_on_rf(struct mt76x02_dev *dev, int unit) { int shift = unit ? 8 : 0; @@ -277,7 +242,7 @@ mt76x2_power_on_rf(struct mt76x2_dev *dev, int unit) } static void -mt76x2_power_on(struct mt76x2_dev *dev) +mt76x2_power_on(struct mt76x02_dev *dev) { u32 val; @@ -312,7 +277,7 @@ mt76x2_power_on(struct mt76x2_dev *dev) mt76x2_power_on_rf(dev, 1); } -void mt76x2_set_tx_ackto(struct mt76x2_dev *dev) +void mt76x2_set_tx_ackto(struct mt76x02_dev *dev) { u8 ackto, sifs, slottime = dev->slottime; @@ -329,43 +294,14 @@ void mt76x2_set_tx_ackto(struct mt76x2_dev *dev) MT_TX_TIMEOUT_CFG_ACKTO, ackto); } -int mt76x2_init_hardware(struct mt76x2_dev *dev) +int mt76x2_init_hardware(struct mt76x02_dev *dev) { - static const u16 beacon_offsets[16] = { - /* 1024 byte per beacon */ - 0xc000, - 0xc400, - 0xc800, - 0xcc00, - 0xd000, - 0xd400, - 0xd800, - 0xdc00, - - /* BSS idx 8-15 not used for beacons */ - 0xc000, - 0xc000, - 0xc000, - 0xc000, - 0xc000, - 0xc000, - 0xc000, - 0xc000, - }; - u32 val; int ret; - dev->beacon_offsets = beacon_offsets; tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet, (unsigned long) dev); - val = mt76_rr(dev, MT_WPDMA_GLO_CFG); - val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE | - MT_WPDMA_GLO_CFG_BIG_ENDIAN | - MT_WPDMA_GLO_CFG_HDR_SEG_LEN; - val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE; - mt76_wr(dev, MT_WPDMA_GLO_CFG, val); - + mt76x02_dma_disable(dev); mt76x2_reset_wlan(dev, true); mt76x2_power_on(dev); @@ -377,9 +313,9 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev) if (ret) return ret; - dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); + dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); - ret = mt76x2_dma_init(dev); + ret = mt76x02_dma_init(dev); if (ret) return ret; @@ -397,46 +333,44 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev) return 0; } -void mt76x2_stop_hardware(struct mt76x2_dev *dev) +void mt76x2_stop_hardware(struct mt76x02_dev *dev) { cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->mac_work); - mt76x2_mcu_set_radio_state(dev, false); + mt76x02_mcu_set_radio_state(&dev->mt76, false, true); mt76x2_mac_stop(dev, false); } -void mt76x2_cleanup(struct mt76x2_dev *dev) +void mt76x2_cleanup(struct mt76x02_dev *dev) { tasklet_disable(&dev->dfs_pd.dfs_tasklet); tasklet_disable(&dev->pre_tbtt_tasklet); mt76x2_stop_hardware(dev); - mt76x2_dma_cleanup(dev); - mt76x2_mcu_cleanup(dev); + mt76x02_dma_cleanup(dev); + mt76x02_mcu_cleanup(&dev->mt76); } -struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev) +struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev) { static const struct mt76_driver_ops drv_ops = { - .txwi_size = sizeof(struct mt76x2_txwi), + .txwi_size = sizeof(struct mt76x02_txwi), .update_survey = mt76x2_update_channel, - .tx_prepare_skb = mt76x2_tx_prepare_skb, - .tx_complete_skb = mt76x2_tx_complete_skb, - .rx_skb = mt76x2_queue_rx_skb, - .rx_poll_complete = mt76x2_rx_poll_complete, + .tx_prepare_skb = mt76x02_tx_prepare_skb, + .tx_complete_skb = mt76x02_tx_complete_skb, + .rx_skb = mt76x02_queue_rx_skb, + .rx_poll_complete = mt76x02_rx_poll_complete, .sta_ps = mt76x2_sta_ps, }; - struct mt76x2_dev *dev; + struct mt76x02_dev *dev; struct mt76_dev *mdev; mdev = mt76_alloc_device(sizeof(*dev), &mt76x2_ops); if (!mdev) return NULL; - dev = container_of(mdev, struct mt76x2_dev, mt76); + dev = container_of(mdev, struct mt76x02_dev, mt76); mdev->dev = pdev; mdev->drv = &drv_ops; - mutex_init(&dev->mutex); - spin_lock_init(&dev->irq_lock); return dev; } @@ -445,7 +379,7 @@ static void mt76x2_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; mt76x2_dfs_set_domain(dev, request->dfs_region); } @@ -481,8 +415,8 @@ static const struct ieee80211_iface_combination if_comb[] = { static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on, u8 delay_off) { - struct mt76x2_dev *dev = container_of(mt76, struct mt76x2_dev, - mt76); + struct mt76x02_dev *dev = container_of(mt76, struct mt76x02_dev, + mt76); u32 val; val = MT_LED_STATUS_DURATION(0xff) | @@ -526,20 +460,12 @@ static void mt76x2_led_set_brightness(struct led_classdev *led_cdev, mt76x2_led_set_config(mt76, 0xff, 0); } -int mt76x2_register_device(struct mt76x2_dev *dev) +int mt76x2_register_device(struct mt76x02_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); struct wiphy *wiphy = hw->wiphy; - void *status_fifo; - int fifo_size; int i, ret; - fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x2_tx_status)); - status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL); - if (!status_fifo) - return -ENOMEM; - - kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate); INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work); @@ -584,8 +510,8 @@ int mt76x2_register_device(struct mt76x2_dev *dev) dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; - ret = mt76_register_device(&dev->mt76, true, mt76x2_rates, - ARRAY_SIZE(mt76x2_rates)); + ret = mt76_register_device(&dev->mt76, true, mt76x02_rates, + ARRAY_SIZE(mt76x02_rates)); if (ret) goto fail; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c index 23cf437d14f9..08366c5988ea 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c @@ -16,11 +16,10 @@ #include <linux/delay.h> #include "mt76x2.h" -#include "mt76x2_mcu.h" -#include "mt76x2_eeprom.h" -#include "mt76x2_trace.h" +#include "mcu.h" +#include "eeprom.h" -void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr) +void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr) { idx &= 7; mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr)); @@ -28,84 +27,16 @@ void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr) get_unaligned_le16(addr + 4)); } -void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq) -{ - struct mt76x2_tx_status stat = {}; - unsigned long flags; - u8 update = 1; - bool ret; - - if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) - return; - - trace_mac_txstat_poll(dev); - - while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) { - spin_lock_irqsave(&dev->irq_lock, flags); - ret = mt76x2_mac_load_tx_status(dev, &stat); - spin_unlock_irqrestore(&dev->irq_lock, flags); - - if (!ret) - break; - - trace_mac_txstat_fetch(dev, &stat); - - if (!irq) { - mt76x2_send_tx_status(dev, &stat, &update); - continue; - } - - kfifo_put(&dev->txstatus_fifo, stat); - } -} - -static void -mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb, - void *txwi_ptr) -{ - struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb); - struct mt76x2_txwi *txwi = txwi_ptr; - - mt76x2_mac_poll_tx_status(dev, false); - - txi->tries = 0; - txi->jiffies = jiffies; - txi->wcid = txwi->wcid; - txi->pktid = txwi->pktid; - trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid); - mt76x2_tx_complete(dev, skb); -} - -void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev) -{ - struct mt76x2_tx_status stat; - u8 update = 1; - - while (kfifo_get(&dev->txstatus_fifo, &stat)) - mt76x2_send_tx_status(dev, &stat, &update); -} - -void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, - struct mt76_queue_entry *e, bool flush) -{ - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); - - if (e->txwi) - mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi); - else - dev_kfree_skb_any(e->skb); -} - static int -mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb) +mt76_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb) { - int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0]; - struct mt76x2_txwi txwi; + int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0]; + struct mt76x02_txwi txwi; - if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi))) + if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi))) return -ENOSPC; - mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len); + mt76x02_mac_write_txwi(&dev->mt76, &txwi, skb, NULL, NULL, skb->len); mt76_wr_copy(dev, offset, &txwi, sizeof(txwi)); offset += sizeof(txwi); @@ -115,10 +46,10 @@ mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb) } static int -__mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb) +__mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx, struct sk_buff *skb) { - int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0]; - int beacon_addr = dev->beacon_offsets[bcn_idx]; + int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0]; + int beacon_addr = mt76x02_beacon_offsets[bcn_idx]; int ret = 0; int i; @@ -128,8 +59,7 @@ __mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb) if (skb) { ret = mt76_write_beacon(dev, beacon_addr, skb); if (!ret) - dev->beacon_data_mask |= BIT(bcn_idx) & - dev->beacon_mask; + dev->beacon_data_mask |= BIT(bcn_idx); } else { dev->beacon_data_mask &= ~BIT(bcn_idx); for (i = 0; i < beacon_len; i += 4) @@ -141,7 +71,7 @@ __mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb) return ret; } -int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx, +int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx, struct sk_buff *skb) { bool force_update = false; @@ -176,7 +106,8 @@ int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx, return 0; } -void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val) +void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev, + u8 vif_idx, bool val) { u8 old_mask = dev->beacon_mask; bool en; @@ -201,14 +132,14 @@ void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val) mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en); if (en) - mt76x2_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); + mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); else - mt76x2_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); + mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); } void mt76x2_update_channel(struct mt76_dev *mdev) { - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); struct mt76_channel_state *state; u32 active, busy; @@ -225,8 +156,8 @@ void mt76x2_update_channel(struct mt76_dev *mdev) void mt76x2_mac_work(struct work_struct *work) { - struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev, - mac_work.work); + struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, + mac_work.work); int i, idx; mt76x2_update_channel(&dev->mt76); @@ -241,7 +172,7 @@ void mt76x2_mac_work(struct work_struct *work) MT_CALIBRATE_INTERVAL); } -void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val) +void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val) { u32 data = 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c index 680a89f8aa87..65fef082e7cc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c @@ -19,10 +19,10 @@ static int mt76x2_start(struct ieee80211_hw *hw) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; int ret; - mutex_lock(&dev->mutex); + mutex_lock(&dev->mt76.mutex); ret = mt76x2_mac_start(dev); if (ret) @@ -38,57 +38,23 @@ mt76x2_start(struct ieee80211_hw *hw) set_bit(MT76_STATE_RUNNING, &dev->mt76.state); out: - mutex_unlock(&dev->mutex); + mutex_unlock(&dev->mt76.mutex); return ret; } static void mt76x2_stop(struct ieee80211_hw *hw) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; - mutex_lock(&dev->mutex); + mutex_lock(&dev->mt76.mutex); clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); mt76x2_stop_hardware(dev); - mutex_unlock(&dev->mutex); -} - -static int -mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct mt76x2_dev *dev = hw->priv; - struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; - unsigned int idx = 0; - - if (vif->addr[0] & BIT(1)) - idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7); - - /* - * Client mode typically only has one configurable BSSID register, - * which is used for bssidx=0. This is linked to the MAC address. - * Since mac80211 allows changing interface types, and we cannot - * force the use of the primary MAC address for a station mode - * interface, we need some other way of configuring a per-interface - * remote BSSID. - * The hardware provides an AP-Client feature, where bssidx 0-7 are - * used for AP mode and bssidx 8-15 for client mode. - * We shift the station interface bss index by 8 to force the - * hardware to recognize the BSSID. - * The resulting bssidx mismatch for unicast frames is ignored by hw. - */ - if (vif->type == NL80211_IFTYPE_STATION) - idx += 8; - - mvif->idx = idx; - mvif->group_wcid.idx = MT_VIF_WCID(idx); - mvif->group_wcid.hw_key_idx = -1; - mt76x2_txq_init(dev, vif->txq); - - return 0; + mutex_unlock(&dev->mt76.mutex); } static int -mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef) +mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) { int ret; @@ -124,29 +90,29 @@ mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef) static int mt76x2_config(struct ieee80211_hw *hw, u32 changed) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; int ret = 0; - mutex_lock(&dev->mutex); + mutex_lock(&dev->mt76.mutex); if (changed & IEEE80211_CONF_CHANGE_MONITOR) { if (!(hw->conf.flags & IEEE80211_CONF_MONITOR)) - dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC; + dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC; else - dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC; + dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC; - mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); } if (changed & IEEE80211_CONF_CHANGE_POWER) { - dev->txpower_conf = hw->conf.power_level * 2; + dev->mt76.txpower_conf = hw->conf.power_level * 2; /* convert to per-chain power for 2x2 devices */ - dev->txpower_conf -= 6; + dev->mt76.txpower_conf -= 6; if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) { mt76x2_phy_set_txpower(dev); - mt76x2_tx_set_txpwr_auto(dev, dev->txpower_conf); + mt76x02_tx_set_txpwr_auto(dev, dev->mt76.txpower_conf); } } @@ -156,7 +122,7 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed) ieee80211_wake_queues(hw); } - mutex_unlock(&dev->mutex); + mutex_unlock(&dev->mt76.mutex); return ret; } @@ -165,10 +131,10 @@ static void mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed) { - struct mt76x2_dev *dev = hw->priv; - struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; + struct mt76x02_dev *dev = hw->priv; + struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; - mutex_lock(&dev->mutex); + mutex_lock(&dev->mt76.mutex); if (changed & BSS_CHANGED_BSSID) mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid); @@ -195,25 +161,25 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt76x2_set_tx_ackto(dev); } - mutex_unlock(&dev->mutex); + mutex_unlock(&dev->mt76.mutex); } void mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) { - struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); int idx = msta->wcid.idx; mt76_stop_tx_queues(&dev->mt76, sta, true); - mt76x2_mac_wcid_set_drop(dev, idx, ps); + mt76x02_mac_wcid_set_drop(&dev->mt76, idx, ps); } static void mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; tasklet_disable(&dev->pre_tbtt_tasklet); set_bit(MT76_SCANNING, &dev->mt76.state); @@ -222,7 +188,7 @@ mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, static void mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; clear_bit(MT76_SCANNING, &dev->mt76.state); tasklet_enable(&dev->pre_tbtt_tasklet); @@ -237,9 +203,9 @@ mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, static int mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; - *dbm = dev->txpower_cur / 2; + *dbm = dev->mt76.txpower_cur / 2; /* convert from per-chain power to combined output on 2x2 devices */ *dbm += 3; @@ -250,12 +216,12 @@ mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm) static void mt76x2_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; - mutex_lock(&dev->mutex); + mutex_lock(&dev->mt76.mutex); dev->coverage_class = coverage_class; mt76x2_set_tx_ackto(dev); - mutex_unlock(&dev->mutex); + mutex_unlock(&dev->mt76.mutex); } static int @@ -267,20 +233,20 @@ mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; if (!tx_ant || tx_ant > 3 || tx_ant != rx_ant) return -EINVAL; - mutex_lock(&dev->mutex); + mutex_lock(&dev->mt76.mutex); - dev->chainmask = (tx_ant == 3) ? 0x202 : 0x101; + dev->mt76.chainmask = (tx_ant == 3) ? 0x202 : 0x101; dev->mt76.antenna_mask = tx_ant; mt76_set_stream_caps(&dev->mt76, true); mt76x2_phy_set_antenna(dev); - mutex_unlock(&dev->mutex); + mutex_unlock(&dev->mt76.mutex); return 0; } @@ -288,12 +254,12 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; - mutex_lock(&dev->mutex); + mutex_lock(&dev->mt76.mutex); *tx_ant = dev->mt76.antenna_mask; *rx_ant = dev->mt76.antenna_mask; - mutex_unlock(&dev->mutex); + mutex_unlock(&dev->mt76.mutex); return 0; } @@ -301,7 +267,7 @@ static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, static int mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; if (val != ~0 && val > 0xffff) return -EINVAL; @@ -314,25 +280,25 @@ mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val) } const struct ieee80211_ops mt76x2_ops = { - .tx = mt76x2_tx, + .tx = mt76x02_tx, .start = mt76x2_start, .stop = mt76x2_stop, - .add_interface = mt76x2_add_interface, - .remove_interface = mt76x2_remove_interface, + .add_interface = mt76x02_add_interface, + .remove_interface = mt76x02_remove_interface, .config = mt76x2_config, - .configure_filter = mt76x2_configure_filter, + .configure_filter = mt76x02_configure_filter, .bss_info_changed = mt76x2_bss_info_changed, - .sta_add = mt76x2_sta_add, - .sta_remove = mt76x2_sta_remove, - .set_key = mt76x2_set_key, - .conf_tx = mt76x2_conf_tx, + .sta_add = mt76x02_sta_add, + .sta_remove = mt76x02_sta_remove, + .set_key = mt76x02_set_key, + .conf_tx = mt76x02_conf_tx, .sw_scan_start = mt76x2_sw_scan, .sw_scan_complete = mt76x2_sw_scan_complete, .flush = mt76x2_flush, - .ampdu_action = mt76x2_ampdu_action, + .ampdu_action = mt76x02_ampdu_action, .get_txpower = mt76x2_get_txpower, .wake_tx_queue = mt76_wake_tx_queue, - .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update, + .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .release_buffered_frames = mt76_release_buffered_frames, .set_coverage_class = mt76x2_set_coverage_class, .get_survey = mt76_get_survey, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c new file mode 100644 index 000000000000..898aa229671c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c @@ -0,0 +1,188 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/firmware.h> +#include <linux/delay.h> + +#include "mt76x2.h" +#include "mcu.h" +#include "eeprom.h" + +static int +mt76pci_load_rom_patch(struct mt76x02_dev *dev) +{ + const struct firmware *fw = NULL; + struct mt76x02_patch_header *hdr; + bool rom_protect = !is_mt7612(dev); + int len, ret = 0; + __le32 *cur; + u32 patch_mask, patch_reg; + + if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) { + dev_err(dev->mt76.dev, + "Could not get hardware semaphore for ROM PATCH\n"); + return -ETIMEDOUT; + } + + if (mt76xx_rev(dev) >= MT76XX_REV_E3) { + patch_mask = BIT(0); + patch_reg = MT_MCU_CLOCK_CTL; + } else { + patch_mask = BIT(1); + patch_reg = MT_MCU_COM_REG0; + } + + if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) { + dev_info(dev->mt76.dev, "ROM patch already applied\n"); + goto out; + } + + ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev); + if (ret) + goto out; + + if (!fw || !fw->data || fw->size <= sizeof(*hdr)) { + ret = -EIO; + dev_err(dev->mt76.dev, "Failed to load firmware\n"); + goto out; + } + + hdr = (struct mt76x02_patch_header *)fw->data; + dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time); + + mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET); + + cur = (__le32 *) (fw->data + sizeof(*hdr)); + len = fw->size - sizeof(*hdr); + mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len); + + mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0); + + /* Trigger ROM */ + mt76_wr(dev, MT_MCU_INT_LEVEL, 4); + + if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) { + dev_err(dev->mt76.dev, "Failed to load ROM patch\n"); + ret = -ETIMEDOUT; + } + +out: + /* release semaphore */ + if (rom_protect) + mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1); + release_firmware(fw); + return ret; +} + +static int +mt76pci_load_firmware(struct mt76x02_dev *dev) +{ + const struct firmware *fw; + const struct mt76x02_fw_header *hdr; + int len, ret; + __le32 *cur; + u32 offset, val; + + ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) + goto error; + + hdr = (const struct mt76x02_fw_header *)fw->data; + + len = sizeof(*hdr); + len += le32_to_cpu(hdr->ilm_len); + len += le32_to_cpu(hdr->dlm_len); + + if (fw->size != len) + goto error; + + val = le16_to_cpu(hdr->fw_ver); + dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n", + (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf); + + val = le16_to_cpu(hdr->build_ver); + dev_info(dev->mt76.dev, "Build: %x\n", val); + dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time); + + cur = (__le32 *) (fw->data + sizeof(*hdr)); + len = le32_to_cpu(hdr->ilm_len); + + mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET); + mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len); + + cur += len / sizeof(*cur); + len = le32_to_cpu(hdr->dlm_len); + + if (mt76xx_rev(dev) >= MT76XX_REV_E3) + offset = MT_MCU_DLM_ADDR_E3; + else + offset = MT_MCU_DLM_ADDR; + + mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET); + mt76_wr_copy(dev, offset, cur, len); + + mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0); + + val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2); + if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1) + mt76_set(dev, MT_MCU_COM_REG0, BIT(30)); + + /* trigger firmware */ + mt76_wr(dev, MT_MCU_INT_LEVEL, 2); + if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 200)) { + dev_err(dev->mt76.dev, "Firmware failed to start\n"); + release_firmware(fw); + return -ETIMEDOUT; + } + + dev_info(dev->mt76.dev, "Firmware running!\n"); + mt76x02_set_ethtool_fwver(&dev->mt76, hdr); + + release_firmware(fw); + + return ret; + +error: + dev_err(dev->mt76.dev, "Invalid firmware\n"); + release_firmware(fw); + return -ENOENT; +} + +int mt76x2_mcu_init(struct mt76x02_dev *dev) +{ + static const struct mt76_mcu_ops mt76x2_mcu_ops = { + .mcu_msg_alloc = mt76x02_mcu_msg_alloc, + .mcu_send_msg = mt76x02_mcu_msg_send, + }; + int ret; + + dev->mt76.mcu_ops = &mt76x2_mcu_ops; + + ret = mt76pci_load_rom_patch(dev); + if (ret) + return ret; + + ret = mt76pci_load_firmware(dev); + if (ret) + return ret; + + mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, true); + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c index 84c96c0415b6..40ea5f7480fb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c @@ -16,16 +16,17 @@ #include <linux/delay.h> #include "mt76x2.h" -#include "mt76x2_mcu.h" -#include "mt76x2_eeprom.h" +#include "mcu.h" +#include "eeprom.h" +#include "../mt76x02_phy.h" static bool -mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev) +mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev) { struct ieee80211_channel *chan = dev->mt76.chandef.chan; u32 flag = 0; - if (!mt76x2_tssi_enabled(dev)) + if (!mt76x02_tssi_enabled(&dev->mt76)) return false; if (mt76x2_channel_silent(dev)) @@ -34,16 +35,16 @@ mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev) if (chan->band == NL80211_BAND_5GHZ) flag |= BIT(0); - if (mt76x2_ext_pa_enabled(dev, chan->band)) + if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band)) flag |= BIT(8); - mt76x2_mcu_calibrate(dev, MCU_CAL_TSSI, flag); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI, flag, true); dev->cal.tssi_cal_done = true; return true; } static void -mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped) +mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped) { struct ieee80211_channel *chan = dev->mt76.chandef.chan; bool is_5ghz = chan->band == NL80211_BAND_5GHZ; @@ -61,13 +62,13 @@ mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped) mt76x2_mac_stop(dev, false); if (is_5ghz) - mt76x2_mcu_calibrate(dev, MCU_CAL_LC, 0); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, true); - mt76x2_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz); - mt76x2_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz); - mt76x2_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz); - mt76x2_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0); - mt76x2_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, true); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, true); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, true); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, true); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_SHAPING, 0, true); if (!mac_stopped) mt76x2_mac_resume(dev); @@ -77,7 +78,7 @@ mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped) dev->cal.channel_cal_done = true; } -void mt76x2_phy_set_antenna(struct mt76x2_dev *dev) +void mt76x2_phy_set_antenna(struct mt76x02_dev *dev) { u32 val; @@ -124,14 +125,14 @@ void mt76x2_phy_set_antenna(struct mt76x2_dev *dev) } static void -mt76x2_get_agc_gain(struct mt76x2_dev *dev, u8 *dest) +mt76x2_get_agc_gain(struct mt76x02_dev *dev, u8 *dest) { dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN); dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN); } static int -mt76x2_get_rssi_gain_thresh(struct mt76x2_dev *dev) +mt76x2_get_rssi_gain_thresh(struct mt76x02_dev *dev) { switch (dev->mt76.chandef.width) { case NL80211_CHAN_WIDTH_80: @@ -144,7 +145,7 @@ mt76x2_get_rssi_gain_thresh(struct mt76x2_dev *dev) } static int -mt76x2_get_low_rssi_gain_thresh(struct mt76x2_dev *dev) +mt76x2_get_low_rssi_gain_thresh(struct mt76x02_dev *dev) { switch (dev->mt76.chandef.width) { case NL80211_CHAN_WIDTH_80: @@ -157,7 +158,7 @@ mt76x2_get_low_rssi_gain_thresh(struct mt76x2_dev *dev) } static void -mt76x2_phy_set_gain_val(struct mt76x2_dev *dev) +mt76x2_phy_set_gain_val(struct mt76x02_dev *dev) { u32 val; u8 gain_val[2]; @@ -182,7 +183,7 @@ mt76x2_phy_set_gain_val(struct mt76x2_dev *dev) } static void -mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev) +mt76x2_phy_adjust_vga_gain(struct mt76x02_dev *dev) { u32 false_cca; u8 limit = dev->cal.low_gain > 0 ? 16 : 4; @@ -201,7 +202,7 @@ mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev) } static void -mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) +mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) { u8 *gain = dev->cal.agc_gain_init; u8 low_gain_delta, gain_delta; @@ -209,7 +210,7 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) int low_gain; u32 val; - dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev); + dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(&dev->mt76); low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) + (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev)); @@ -264,7 +265,7 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) mt76_rr(dev, MT_RX_STAT_1); } -int mt76x2_phy_set_channel(struct mt76x2_dev *dev, +int mt76x2_phy_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) { struct ieee80211_channel *chan = chandef->chan; @@ -360,17 +361,17 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev, mt76_set(dev, MT_BBP(RXO, 13), BIT(10)); if (!dev->cal.init_cal_done) { - u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT); + u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT); if (val != 0xff) - mt76x2_mcu_calibrate(dev, MCU_CAL_R, 0); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, true); } - mt76x2_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, true); /* Rx LPF calibration */ if (!dev->cal.init_cal_done) - mt76x2_mcu_calibrate(dev, MCU_CAL_RC, 0); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, true); dev->cal.init_cal_done = true; @@ -390,7 +391,7 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev, sizeof(dev->cal.agc_gain_cur)); /* init default values for temp compensation */ - if (mt76x2_tssi_enabled(dev)) { + if (mt76x02_tssi_enabled(&dev->mt76)) { mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, 0x38); mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP, @@ -404,48 +405,7 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev, } static void -mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev) -{ - struct ieee80211_channel *chan = dev->mt76.chandef.chan; - struct mt76x2_tx_power_info txp; - struct mt76x2_tssi_comp t = {}; - - if (!dev->cal.tssi_cal_done) - return; - - if (!dev->cal.tssi_comp_pending) { - /* TSSI trigger */ - t.cal_mode = BIT(0); - mt76x2_mcu_tssi_comp(dev, &t); - dev->cal.tssi_comp_pending = true; - } else { - if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4)) - return; - - dev->cal.tssi_comp_pending = false; - mt76x2_get_power_info(dev, &txp, chan); - - if (mt76x2_ext_pa_enabled(dev, chan->band)) - t.pa_mode = 1; - - t.cal_mode = BIT(1); - t.slope0 = txp.chain[0].tssi_slope; - t.offset0 = txp.chain[0].tssi_offset; - t.slope1 = txp.chain[1].tssi_slope; - t.offset1 = txp.chain[1].tssi_offset; - mt76x2_mcu_tssi_comp(dev, &t); - - if (t.pa_mode || dev->cal.dpd_cal_done) - return; - - usleep_range(10000, 20000); - mt76x2_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value); - dev->cal.dpd_cal_done = true; - } -} - -static void -mt76x2_phy_temp_compensate(struct mt76x2_dev *dev) +mt76x2_phy_temp_compensate(struct mt76x02_dev *dev) { struct mt76x2_temp_comp t; int temp, db_diff; @@ -474,22 +434,22 @@ mt76x2_phy_temp_compensate(struct mt76x2_dev *dev) void mt76x2_phy_calibrate(struct work_struct *work) { - struct mt76x2_dev *dev; + struct mt76x02_dev *dev; - dev = container_of(work, struct mt76x2_dev, cal_work.work); + dev = container_of(work, struct mt76x02_dev, cal_work.work); mt76x2_phy_channel_calibrate(dev, false); - mt76x2_phy_tssi_compensate(dev); + mt76x2_phy_tssi_compensate(dev, true); mt76x2_phy_temp_compensate(dev); mt76x2_phy_update_channel_gain(dev); ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, MT_CALIBRATE_INTERVAL); } -int mt76x2_phy_start(struct mt76x2_dev *dev) +int mt76x2_phy_start(struct mt76x02_dev *dev) { int ret; - ret = mt76x2_mcu_set_radio_state(dev, true); + ret = mt76x02_mcu_set_radio_state(&dev->mt76, true, true); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c index 4c907882e8b0..3a2ec86d3e88 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c @@ -15,50 +15,18 @@ */ #include "mt76x2.h" -#include "mt76x2_dma.h" struct beacon_bc_data { - struct mt76x2_dev *dev; + struct mt76x02_dev *dev; struct sk_buff_head q; struct sk_buff *tail[8]; }; -int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, - struct sk_buff *skb, struct mt76_queue *q, - struct mt76_wcid *wcid, struct ieee80211_sta *sta, - u32 *tx_info) -{ - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - int qsel = MT_QSEL_EDCA; - int ret; - - if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128) - mt76x2_mac_wcid_set_drop(dev, wcid->idx, false); - - mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len); - - ret = mt76x2_insert_hdr_pad(skb); - if (ret < 0) - return ret; - - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) - qsel = MT_QSEL_MGMT; - - *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | - MT_TXD_INFO_80211; - - if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv) - *tx_info |= MT_TXD_INFO_WIV; - - return 0; -} - static void mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { - struct mt76x2_dev *dev = (struct mt76x2_dev *) priv; - struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; + struct mt76x02_dev *dev = (struct mt76x02_dev *) priv; + struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; struct sk_buff *skb = NULL; if (!(dev->beacon_mask & BIT(mvif->idx))) @@ -75,8 +43,8 @@ static void mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) { struct beacon_bc_data *data = priv; - struct mt76x2_dev *dev = data->dev; - struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; + struct mt76x02_dev *dev = data->dev; + struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; struct ieee80211_tx_info *info; struct sk_buff *skb; @@ -96,7 +64,7 @@ mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) } static void -mt76x2_resync_beacon_timer(struct mt76x2_dev *dev) +mt76x2_resync_beacon_timer(struct mt76x02_dev *dev) { u32 timer_val = dev->beacon_int << 4; @@ -128,7 +96,7 @@ mt76x2_resync_beacon_timer(struct mt76x2_dev *dev) void mt76x2_pre_tbtt_tasklet(unsigned long arg) { - struct mt76x2_dev *dev = (struct mt76x2_dev *) arg; + struct mt76x02_dev *dev = (struct mt76x02_dev *) arg; struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD]; struct beacon_bc_data data = {}; struct sk_buff *skb; @@ -164,7 +132,7 @@ void mt76x2_pre_tbtt_tasklet(unsigned long arg) while ((skb = __skb_dequeue(&data.q)) != NULL) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; - struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; + struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c index 9fd6ab4cbb94..f00aed915ee8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c @@ -16,10 +16,12 @@ */ #include "mt76x2.h" -#include "mt76x2_eeprom.h" +#include "eeprom.h" +#include "mcu.h" +#include "../mt76x02_phy.h" static void -mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset) +mt76x2_adjust_high_lna_gain(struct mt76x02_dev *dev, int reg, s8 offset) { s8 gain; @@ -29,7 +31,7 @@ mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset) } static void -mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset) +mt76x2_adjust_agc_gain(struct mt76x02_dev *dev, int reg, s8 offset) { s8 gain; @@ -38,7 +40,7 @@ mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset) mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain); } -void mt76x2_apply_gain_adj(struct mt76x2_dev *dev) +void mt76x2_apply_gain_adj(struct mt76x02_dev *dev) { s8 *gain_adj = dev->cal.rx.high_gain; @@ -50,7 +52,7 @@ void mt76x2_apply_gain_adj(struct mt76x2_dev *dev) } EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj); -void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, +void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev, enum nl80211_band band) { u32 pa_mode[2]; @@ -63,7 +65,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00); mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06); - if (mt76x2_ext_pa_enabled(dev, band)) { + if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00); mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00); } else { @@ -74,7 +76,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, pa_mode[0] = 0x0000ffff; pa_mode[1] = 0x00ff00ff; - if (mt76x2_ext_pa_enabled(dev, band)) { + if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400); mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476); } else { @@ -82,7 +84,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476); } - if (mt76x2_ext_pa_enabled(dev, band)) + if (mt76x02_ext_pa_enabled(&dev->mt76, band)) pa_mode_adj = 0x04000000; else pa_mode_adj = 0; @@ -96,7 +98,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]); mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]); - if (mt76x2_ext_pa_enabled(dev, band)) { + if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { u32 val; if (band == NL80211_BAND_2GHZ) @@ -123,37 +125,6 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, } EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs); -static void -mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit) -{ - int i; - - for (i = 0; i < sizeof(r->all); i++) - if (r->all[i] > limit) - r->all[i] = limit; -} - -static u32 -mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4) -{ - u32 val = 0; - - val |= (v1 & (BIT(6) - 1)) << 0; - val |= (v2 & (BIT(6) - 1)) << 8; - val |= (v3 & (BIT(6) - 1)) << 16; - val |= (v4 & (BIT(6) - 1)) << 24; - return val; -} - -static void -mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset) -{ - int i; - - for (i = 0; i < sizeof(r->all); i++) - r->all[i] += offset; -} - static int mt76x2_get_min_rate_power(struct mt76_rate_power *r) { @@ -173,7 +144,7 @@ mt76x2_get_min_rate_power(struct mt76_rate_power *r) return ret; } -void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) +void mt76x2_phy_set_txpower(struct mt76x02_dev *dev) { enum nl80211_chan_width width = dev->mt76.chandef.width; struct ieee80211_channel *chan = dev->mt76.chandef.chan; @@ -190,9 +161,9 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) delta = txp.delta_bw80; mt76x2_get_rate_power(dev, &t, chan); - mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power); - mt76x2_limit_rate_power(&t, dev->txpower_conf); - dev->txpower_cur = mt76x2_get_max_rate_power(&t); + mt76x02_add_rate_power_offset(&t, txp.chain[0].target_power); + mt76x02_limit_rate_power(&t, dev->mt76.txpower_conf); + dev->mt76.txpower_cur = mt76x02_get_max_rate_power(&t); base_power = mt76x2_get_min_rate_power(&t); delta += base_power - txp.chain[0].target_power; @@ -210,40 +181,22 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) txp_1 = 0x2f; } - mt76x2_add_rate_power_offset(&t, -base_power); + mt76x02_add_rate_power_offset(&t, -base_power); dev->target_power = txp.chain[0].target_power; dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power; dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power; - dev->rate_power = t; - - mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0); - mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1); - - mt76_wr(dev, MT_TX_PWR_CFG_0, - mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2])); - mt76_wr(dev, MT_TX_PWR_CFG_1, - mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2])); - mt76_wr(dev, MT_TX_PWR_CFG_2, - mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10])); - mt76_wr(dev, MT_TX_PWR_CFG_3, - mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2])); - mt76_wr(dev, MT_TX_PWR_CFG_4, - mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0)); - mt76_wr(dev, MT_TX_PWR_CFG_7, - mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8])); - mt76_wr(dev, MT_TX_PWR_CFG_8, - mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0)); - mt76_wr(dev, MT_TX_PWR_CFG_9, - mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0)); + dev->mt76.rate_power = t; + + mt76x02_phy_set_txpower(&dev->mt76, txp_0, txp_1); } EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower); -void mt76x2_configure_tx_delay(struct mt76x2_dev *dev, +void mt76x2_configure_tx_delay(struct mt76x02_dev *dev, enum nl80211_band band, u8 bw) { u32 cfg0, cfg1; - if (mt76x2_ext_pa_enabled(dev, band)) { + if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { cfg0 = bw ? 0x000b0c01 : 0x00101101; cfg1 = 0x00011414; } else { @@ -257,7 +210,7 @@ void mt76x2_configure_tx_delay(struct mt76x2_dev *dev, } EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay); -void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl) +void mt76x2_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl) { int core_val, agc_val; @@ -283,7 +236,7 @@ void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl) } EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw); -void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper) +void mt76x2_phy_set_band(struct mt76x02_dev *dev, int band, bool primary_upper) { switch (band) { case NL80211_BAND_2GHZ: @@ -301,49 +254,44 @@ void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper) } EXPORT_SYMBOL_GPL(mt76x2_phy_set_band); -int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev) +void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait) { - struct mt76x2_sta *sta; - struct mt76_wcid *wcid; - int i, j, min_rssi = 0; - s8 cur_rssi; + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + struct mt76x2_tx_power_info txp; + struct mt76x2_tssi_comp t = {}; - local_bh_disable(); - rcu_read_lock(); + if (!dev->cal.tssi_cal_done) + return; - for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) { - unsigned long mask = dev->wcid_mask[i]; + if (!dev->cal.tssi_comp_pending) { + /* TSSI trigger */ + t.cal_mode = BIT(0); + mt76x2_mcu_tssi_comp(dev, &t); + dev->cal.tssi_comp_pending = true; + } else { + if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4)) + return; - if (!mask) - continue; + dev->cal.tssi_comp_pending = false; + mt76x2_get_power_info(dev, &txp, chan); - for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) { - if (!(mask & 1)) - continue; + if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band)) + t.pa_mode = 1; - wcid = rcu_dereference(dev->wcid[j]); - if (!wcid) - continue; + t.cal_mode = BIT(1); + t.slope0 = txp.chain[0].tssi_slope; + t.offset0 = txp.chain[0].tssi_offset; + t.slope1 = txp.chain[1].tssi_slope; + t.offset1 = txp.chain[1].tssi_offset; + mt76x2_mcu_tssi_comp(dev, &t); - sta = container_of(wcid, struct mt76x2_sta, wcid); - spin_lock(&dev->mt76.rx_lock); - if (sta->inactive_count++ < 5) - cur_rssi = ewma_signal_read(&sta->rssi); - else - cur_rssi = 0; - spin_unlock(&dev->mt76.rx_lock); + if (t.pa_mode || dev->cal.dpd_cal_done) + return; - if (cur_rssi < min_rssi) - min_rssi = cur_rssi; - } + usleep_range(10000, 20000); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_DPD, + chan->hw_value, wait); + dev->cal.dpd_cal_done = true; } - - rcu_read_unlock(); - local_bh_enable(); - - if (!min_rssi) - return -75; - - return min_rssi; } -EXPORT_SYMBOL_GPL(mt76x2_phy_get_min_avg_rssi); +EXPORT_SYMBOL_GPL(mt76x2_phy_tssi_compensate); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index 1428cfdee579..57baf8d1c830 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -17,9 +17,11 @@ #include <linux/kernel.h> #include <linux/module.h> +#include "../mt76x02_usb.h" #include "mt76x2u.h" static const struct usb_device_id mt76x2u_device_table[] = { + { USB_DEVICE(0x0e8d, 0x7612) }, /* Alfa AWUS036ACM */ { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */ { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */ { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */ @@ -35,7 +37,7 @@ static int mt76x2u_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); - struct mt76x2_dev *dev; + struct mt76x02_dev *dev; int err; dev = mt76x2u_alloc_device(&intf->dev); @@ -45,6 +47,7 @@ static int mt76x2u_probe(struct usb_interface *intf, udev = usb_get_dev(udev); usb_reset_device(udev); + mt76x02u_init_mcu(&dev->mt76); err = mt76u_init(&dev->mt76, intf); if (err < 0) goto err; @@ -69,7 +72,7 @@ err: static void mt76x2u_disconnect(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); - struct mt76x2_dev *dev = usb_get_intfdata(intf); + struct mt76x02_dev *dev = usb_get_intfdata(intf); struct ieee80211_hw *hw = mt76_hw(dev); set_bit(MT76_REMOVED, &dev->mt76.state); @@ -84,7 +87,7 @@ static void mt76x2u_disconnect(struct usb_interface *intf) static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf, pm_message_t state) { - struct mt76x2_dev *dev = usb_get_intfdata(intf); + struct mt76x02_dev *dev = usb_get_intfdata(intf); struct mt76_usb *usb = &dev->mt76.usb; mt76u_stop_queues(&dev->mt76); @@ -96,7 +99,7 @@ static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf, static int __maybe_unused mt76x2u_resume(struct usb_interface *intf) { - struct mt76x2_dev *dev = usb_get_intfdata(intf); + struct mt76x02_dev *dev = usb_get_intfdata(intf); struct mt76_usb *usb = &dev->mt76.usb; int err; @@ -107,16 +110,24 @@ static int __maybe_unused mt76x2u_resume(struct usb_interface *intf) mt76u_mcu_complete_urb, &usb->mcu.cmpl); if (err < 0) - return err; + goto err; err = mt76u_submit_rx_buffers(&dev->mt76); if (err < 0) - return err; + goto err; tasklet_enable(&usb->rx_tasklet); tasklet_enable(&usb->tx_tasklet); - return mt76x2u_init_hardware(dev); + err = mt76x2u_init_hardware(dev); + if (err < 0) + goto err; + + return 0; + +err: + mt76x2u_cleanup(dev); + return err; } MODULE_DEVICE_TABLE(usb, mt76x2u_device_table); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c index 9b81e7641c06..c82f16efa327 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c @@ -17,9 +17,11 @@ #include <linux/delay.h> #include "mt76x2u.h" -#include "mt76x2_eeprom.h" +#include "eeprom.h" +#include "../mt76x02_phy.h" +#include "../mt76x02_usb.h" -static void mt76x2u_init_dma(struct mt76x2_dev *dev) +static void mt76x2u_init_dma(struct mt76x02_dev *dev) { u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG)); @@ -34,7 +36,7 @@ static void mt76x2u_init_dma(struct mt76x2_dev *dev) mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val); } -static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev) +static void mt76x2u_power_on_rf_patch(struct mt76x02_dev *dev) { mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16)); udelay(1); @@ -54,7 +56,7 @@ static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev) mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20)); } -static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit) +static void mt76x2u_power_on_rf(struct mt76x02_dev *dev, int unit) { int shift = unit ? 8 : 0; u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift; @@ -76,7 +78,7 @@ static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit) mt76_set(dev, 0x530, 0xf); } -static void mt76x2u_power_on(struct mt76x2_dev *dev) +static void mt76x2u_power_on(struct mt76x02_dev *dev) { u32 val; @@ -112,7 +114,7 @@ static void mt76x2u_power_on(struct mt76x2_dev *dev) mt76x2u_power_on_rf(dev, 1); } -static int mt76x2u_init_eeprom(struct mt76x2_dev *dev) +static int mt76x2u_init_eeprom(struct mt76x02_dev *dev) { u32 val, i; @@ -128,35 +130,33 @@ static int mt76x2u_init_eeprom(struct mt76x2_dev *dev) put_unaligned_le32(val, dev->mt76.eeprom.data + i); } - mt76x2_eeprom_parse_hw_cap(dev); + mt76x02_eeprom_parse_hw_cap(&dev->mt76); return 0; } -struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev) +struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev) { static const struct mt76_driver_ops drv_ops = { - .tx_prepare_skb = mt76x2u_tx_prepare_skb, - .tx_complete_skb = mt76x2u_tx_complete_skb, - .tx_status_data = mt76x2u_tx_status_data, - .rx_skb = mt76x2_queue_rx_skb, + .tx_prepare_skb = mt76x02u_tx_prepare_skb, + .tx_complete_skb = mt76x02u_tx_complete_skb, + .tx_status_data = mt76x02_tx_status_data, + .rx_skb = mt76x02_queue_rx_skb, }; - struct mt76x2_dev *dev; + struct mt76x02_dev *dev; struct mt76_dev *mdev; mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops); if (!mdev) return NULL; - dev = container_of(mdev, struct mt76x2_dev, mt76); + dev = container_of(mdev, struct mt76x02_dev, mt76); mdev->dev = pdev; mdev->drv = &drv_ops; - mutex_init(&dev->mutex); - return dev; } -static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev) +static void mt76x2u_init_beacon_offsets(struct mt76x02_dev *dev) { mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800); mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820); @@ -164,27 +164,18 @@ static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev) mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860); } -int mt76x2u_init_hardware(struct mt76x2_dev *dev) +int mt76x2u_init_hardware(struct mt76x02_dev *dev) { - static const u16 beacon_offsets[] = { - /* 512 byte per beacon */ - 0xc000, 0xc200, 0xc400, 0xc600, - 0xc800, 0xca00, 0xcc00, 0xce00, - 0xd000, 0xd200, 0xd400, 0xd600, - 0xd800, 0xda00, 0xdc00, 0xde00 - }; const struct mt76_wcid_addr addr = { .macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, .ba_mask = 0, }; int i, err; - dev->beacon_offsets = beacon_offsets; - mt76x2_reset_wlan(dev, true); mt76x2u_power_on(dev); - if (!mt76x2_wait_for_mac(dev)) + if (!mt76x02_wait_for_mac(&dev->mt76)) return -ETIMEDOUT; err = mt76x2u_mcu_fw_init(dev); @@ -197,7 +188,7 @@ int mt76x2u_init_hardware(struct mt76x2_dev *dev) return -EIO; /* wait for asic ready after fw load. */ - if (!mt76x2_wait_for_mac(dev)) + if (!mt76x02_wait_for_mac(&dev->mt76)) return -ETIMEDOUT; mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0); @@ -213,12 +204,13 @@ int mt76x2u_init_hardware(struct mt76x2_dev *dev) if (err < 0) return err; - mt76x2u_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR); - dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); + mt76x02_mac_setaddr(&dev->mt76, + dev->mt76.eeprom.data + MT_EE_MAC_ADDR); + dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); mt76x2u_init_beacon_offsets(dev); - if (!mt76x2_wait_for_bbp(dev)) + if (!mt76x02_wait_for_txrx_idle(&dev->mt76)) return -ETIMEDOUT; /* reset wcid table */ @@ -241,17 +233,17 @@ int mt76x2u_init_hardware(struct mt76x2_dev *dev) mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f); - err = mt76x2u_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0); + err = mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0); if (err < 0) return err; - mt76x2u_phy_set_rxpath(dev); - mt76x2u_phy_set_txdac(dev); + mt76x02_phy_set_rxpath(&dev->mt76); + mt76x02_phy_set_txdac(&dev->mt76); return mt76x2u_mac_stop(dev); } -int mt76x2u_register_device(struct mt76x2_dev *dev) +int mt76x2u_register_device(struct mt76x02_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); struct wiphy *wiphy = hw->wiphy; @@ -264,11 +256,11 @@ int mt76x2u_register_device(struct mt76x2_dev *dev) if (err < 0) return err; - err = mt76u_mcu_init_rx(&dev->mt76); + err = mt76u_alloc_queues(&dev->mt76); if (err < 0) - return err; + goto fail; - err = mt76u_alloc_queues(&dev->mt76); + err = mt76u_mcu_init_rx(&dev->mt76); if (err < 0) goto fail; @@ -278,8 +270,8 @@ int mt76x2u_register_device(struct mt76x2_dev *dev) wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); - err = mt76_register_device(&dev->mt76, true, mt76x2_rates, - ARRAY_SIZE(mt76x2_rates)); + err = mt76_register_device(&dev->mt76, true, mt76x02_rates, + ARRAY_SIZE(mt76x02_rates)); if (err) goto fail; @@ -302,17 +294,17 @@ fail: return err; } -void mt76x2u_stop_hw(struct mt76x2_dev *dev) +void mt76x2u_stop_hw(struct mt76x02_dev *dev) { mt76u_stop_stat_wk(&dev->mt76); cancel_delayed_work_sync(&dev->cal_work); mt76x2u_mac_stop(dev); } -void mt76x2u_cleanup(struct mt76x2_dev *dev) +void mt76x2u_cleanup(struct mt76x02_dev *dev) { - mt76x2u_mcu_set_radio_state(dev, false); + mt76x02_mcu_set_radio_state(&dev->mt76, false, false); mt76x2u_stop_hw(dev); mt76u_queues_deinit(&dev->mt76); - mt76x2u_mcu_deinit(dev); + mt76u_mcu_deinit(&dev->mt76); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c index eab7ab297aa6..dbd635aa763b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c @@ -15,9 +15,9 @@ */ #include "mt76x2u.h" -#include "mt76x2_eeprom.h" +#include "eeprom.h" -static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev) +static void mt76x2u_mac_reset_counters(struct mt76x02_dev *dev) { mt76_rr(dev, MT_RX_STAT_0); mt76_rr(dev, MT_RX_STAT_1); @@ -27,12 +27,12 @@ static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev) mt76_rr(dev, MT_TX_STA_2); } -static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev) +static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev) { s8 offset = 0; u16 eep_val; - eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2); + eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2); offset = eep_val & 0x7f; if ((eep_val & 0xff) == 0xff) @@ -42,7 +42,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev) eep_val >>= 8; if (eep_val == 0x00 || eep_val == 0xff) { - eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1); + eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1); eep_val &= 0xff; if (eep_val == 0x00 || eep_val == 0xff) @@ -67,7 +67,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev) /* init fce */ mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN); - eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2); + eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2); switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) { case 0: mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80); @@ -80,7 +80,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev) } } -int mt76x2u_mac_reset(struct mt76x2_dev *dev) +int mt76x2u_mac_reset(struct mt76x02_dev *dev) { mt76_wr(dev, MT_WPDMA_GLO_CFG, BIT(4) | BIT(5)); @@ -114,15 +114,15 @@ int mt76x2u_mac_reset(struct mt76x2_dev *dev) return 0; } -int mt76x2u_mac_start(struct mt76x2_dev *dev) +int mt76x2u_mac_start(struct mt76x02_dev *dev) { mt76x2u_mac_reset_counters(dev); mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); - wait_for_wpdma(dev); + mt76x02_wait_for_wpdma(&dev->mt76, 1000); usleep_range(50, 100); - mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX | @@ -131,7 +131,7 @@ int mt76x2u_mac_start(struct mt76x2_dev *dev) return 0; } -int mt76x2u_mac_stop(struct mt76x2_dev *dev) +int mt76x2u_mac_stop(struct mt76x02_dev *dev) { int i, count = 0, val; bool stopped = false; @@ -212,7 +212,7 @@ int mt76x2u_mac_stop(struct mt76x2_dev *dev) return 0; } -void mt76x2u_mac_resume(struct mt76x2_dev *dev) +void mt76x2u_mac_resume(struct mt76x02_dev *dev) { mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX | @@ -220,21 +220,3 @@ void mt76x2u_mac_resume(struct mt76x2_dev *dev) mt76_set(dev, MT_TXOP_CTRL_CFG, BIT(20)); mt76_set(dev, MT_TXOP_HLDR_ET, BIT(1)); } - -void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr) -{ - ether_addr_copy(dev->mt76.macaddr, addr); - - if (!is_valid_ether_addr(dev->mt76.macaddr)) { - eth_random_addr(dev->mt76.macaddr); - dev_info(dev->mt76.dev, - "Invalid MAC address, using random address %pM\n", - dev->mt76.macaddr); - } - - mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr)); - mt76_wr(dev, MT_MAC_ADDR_DW1, - get_unaligned_le16(dev->mt76.macaddr + 4) | - FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); -} - diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c index 7367ba111119..224609d6915f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c @@ -18,10 +18,10 @@ static int mt76x2u_start(struct ieee80211_hw *hw) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; int ret; - mutex_lock(&dev->mutex); + mutex_lock(&dev->mt76.mutex); ret = mt76x2u_mac_start(dev); if (ret) @@ -30,40 +30,34 @@ static int mt76x2u_start(struct ieee80211_hw *hw) set_bit(MT76_STATE_RUNNING, &dev->mt76.state); out: - mutex_unlock(&dev->mutex); + mutex_unlock(&dev->mt76.mutex); return ret; } static void mt76x2u_stop(struct ieee80211_hw *hw) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; - mutex_lock(&dev->mutex); + mutex_lock(&dev->mt76.mutex); clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); mt76x2u_stop_hw(dev); - mutex_unlock(&dev->mutex); + mutex_unlock(&dev->mt76.mutex); } static int mt76x2u_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct mt76x2_dev *dev = hw->priv; - struct mt76x2_vif *mvif = (struct mt76x2_vif *)vif->drv_priv; - unsigned int idx = 0; + struct mt76x02_dev *dev = hw->priv; if (!ether_addr_equal(dev->mt76.macaddr, vif->addr)) - mt76x2u_mac_setaddr(dev, vif->addr); - - mvif->idx = idx; - mvif->group_wcid.idx = MT_VIF_WCID(idx); - mvif->group_wcid.hw_key_idx = -1; - mt76x2_txq_init(dev, vif->txq); + mt76x02_mac_setaddr(&dev->mt76, vif->addr); + mt76x02_vif_init(&dev->mt76, vif, 0); return 0; } static int -mt76x2u_set_channel(struct mt76x2_dev *dev, +mt76x2u_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) { int err; @@ -91,9 +85,9 @@ static void mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; - mutex_lock(&dev->mutex); + mutex_lock(&dev->mt76.mutex); if (changed & BSS_CHANGED_ASSOC) { mt76x2u_phy_channel_calibrate(dev); @@ -107,23 +101,23 @@ mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, get_unaligned_le16(info->bssid + 4)); } - mutex_unlock(&dev->mutex); + mutex_unlock(&dev->mt76.mutex); } static int mt76x2u_config(struct ieee80211_hw *hw, u32 changed) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; int err = 0; - mutex_lock(&dev->mutex); + mutex_lock(&dev->mt76.mutex); if (changed & IEEE80211_CONF_CHANGE_MONITOR) { if (!(hw->conf.flags & IEEE80211_CONF_MONITOR)) - dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC; + dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC; else - dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC; - mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC; + mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); } if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { @@ -133,16 +127,16 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed) } if (changed & IEEE80211_CONF_CHANGE_POWER) { - dev->txpower_conf = hw->conf.power_level * 2; + dev->mt76.txpower_conf = hw->conf.power_level * 2; /* convert to per-chain power for 2x2 devices */ - dev->txpower_conf -= 6; + dev->mt76.txpower_conf -= 6; if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) mt76x2_phy_set_txpower(dev); } - mutex_unlock(&dev->mutex); + mutex_unlock(&dev->mt76.mutex); return err; } @@ -151,7 +145,7 @@ static void mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; set_bit(MT76_SCANNING, &dev->mt76.state); } @@ -159,27 +153,27 @@ mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, static void mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; clear_bit(MT76_SCANNING, &dev->mt76.state); } const struct ieee80211_ops mt76x2u_ops = { - .tx = mt76x2_tx, + .tx = mt76x02_tx, .start = mt76x2u_start, .stop = mt76x2u_stop, .add_interface = mt76x2u_add_interface, - .remove_interface = mt76x2_remove_interface, - .sta_add = mt76x2_sta_add, - .sta_remove = mt76x2_sta_remove, - .set_key = mt76x2_set_key, - .ampdu_action = mt76x2_ampdu_action, + .remove_interface = mt76x02_remove_interface, + .sta_add = mt76x02_sta_add, + .sta_remove = mt76x02_sta_remove, + .set_key = mt76x02_set_key, + .ampdu_action = mt76x02_ampdu_action, .config = mt76x2u_config, .wake_tx_queue = mt76_wake_tx_queue, .bss_info_changed = mt76x2u_bss_info_changed, - .configure_filter = mt76x2_configure_filter, - .conf_tx = mt76x2_conf_tx, + .configure_filter = mt76x02_configure_filter, + .conf_tx = mt76x02_conf_tx, .sw_scan_start = mt76x2u_sw_scan, .sw_scan_complete = mt76x2u_sw_scan_complete, - .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update, + .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c index 22c16d638baa..259ceae2a3a9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c @@ -17,11 +17,10 @@ #include <linux/firmware.h> #include "mt76x2u.h" -#include "mt76x2_eeprom.h" +#include "eeprom.h" +#include "../mt76x02_usb.h" #define MT_CMD_HDR_LEN 4 -#define MT_INBAND_PACKET_MAX_LEN 192 -#define MT_MCU_MEMMAP_WLAN 0x410000 #define MCU_FW_URB_MAX_PAYLOAD 0x3900 #define MCU_ROM_PATCH_MAX_PAYLOAD 2048 @@ -30,151 +29,7 @@ #define MT76U_MCU_DLM_OFFSET 0x110000 #define MT76U_MCU_ROM_PATCH_OFFSET 0x90000 -static int -mt76x2u_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func, - u32 val) -{ - struct { - __le32 id; - __le32 value; - } __packed __aligned(4) msg = { - .id = cpu_to_le32(func), - .value = cpu_to_le32(val), - }; - struct sk_buff *skb; - - skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); - if (!skb) - return -ENOMEM; - return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_FUN_SET_OP, - func != Q_SELECT); -} - -int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val) -{ - struct { - __le32 mode; - __le32 level; - } __packed __aligned(4) msg = { - .mode = cpu_to_le32(val ? RADIO_ON : RADIO_OFF), - .level = cpu_to_le32(0), - }; - struct sk_buff *skb; - - skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); - if (!skb) - return -ENOMEM; - return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_POWER_SAVING_OP, - false); -} - -int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level, - u8 channel) -{ - struct { - u8 cr_mode; - u8 temp; - u8 ch; - u8 _pad0; - __le32 cfg; - } __packed __aligned(4) msg = { - .cr_mode = type, - .temp = temp_level, - .ch = channel, - }; - struct sk_buff *skb; - u32 val; - - val = BIT(31); - val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff; - val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00; - msg.cfg = cpu_to_le32(val); - - /* first set the channel without the extension channel info */ - skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); - if (!skb) - return -ENOMEM; - return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_LOAD_CR, true); -} - -int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw, - u8 bw_index, bool scan) -{ - struct { - u8 idx; - u8 scan; - u8 bw; - u8 _pad0; - - __le16 chainmask; - u8 ext_chan; - u8 _pad1; - - } __packed __aligned(4) msg = { - .idx = channel, - .scan = scan, - .bw = bw, - .chainmask = cpu_to_le16(dev->chainmask), - }; - struct sk_buff *skb; - - /* first set the channel without the extension channel info */ - skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); - if (!skb) - return -ENOMEM; - - mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true); - - usleep_range(5000, 10000); - - msg.ext_chan = 0xe0 + bw_index; - skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); - if (!skb) - return -ENOMEM; - - return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true); -} - -int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, - u32 val) -{ - struct { - __le32 id; - __le32 value; - } __packed __aligned(4) msg = { - .id = cpu_to_le32(type), - .value = cpu_to_le32(val), - }; - struct sk_buff *skb; - - skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); - if (!skb) - return -ENOMEM; - return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true); -} - -int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain, - bool force) -{ - struct { - __le32 channel; - __le32 gain_val; - } __packed __aligned(4) msg = { - .channel = cpu_to_le32(channel), - .gain_val = cpu_to_le32(gain), - }; - struct sk_buff *skb; - - if (force) - msg.channel |= cpu_to_le32(BIT(31)); - - skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); - if (!skb) - return -ENOMEM; - return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_INIT_GAIN_OP, true); -} - -int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap, +int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap, bool ext, int rssi, u32 false_cca) { struct { @@ -194,38 +49,18 @@ int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap, val |= BIT(30); msg.channel = cpu_to_le32(val); - skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); - if (!skb) - return -ENOMEM; - return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_DYNC_VGA_OP, true); + skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); + return mt76_mcu_send_msg(dev, skb, CMD_DYNC_VGA_OP, true); } -int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev, - struct mt76x2_tssi_comp *tssi_data) -{ - struct { - __le32 id; - struct mt76x2_tssi_comp data; - } __packed __aligned(4) msg = { - .id = cpu_to_le32(MCU_CAL_TSSI_COMP), - .data = *tssi_data, - }; - struct sk_buff *skb; - - skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); - if (!skb) - return -ENOMEM; - return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true); -} - -static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev) +static void mt76x2u_mcu_load_ivb(struct mt76x02_dev *dev) { mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE, USB_DIR_OUT | USB_TYPE_VENDOR, 0x12, 0, NULL, 0); } -static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev) +static void mt76x2u_mcu_enable_patch(struct mt76x02_dev *dev) { struct mt76_usb *usb = &dev->mt76.usb; const u8 data[] = { @@ -240,7 +75,7 @@ static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev) 0x12, 0, usb->data, sizeof(data)); } -static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev) +static void mt76x2u_mcu_reset_wmt(struct mt76x02_dev *dev) { struct mt76_usb *usb = &dev->mt76.usb; u8 data[] = { @@ -254,10 +89,10 @@ static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev) 0x12, 0, usb->data, sizeof(data)); } -static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev) +static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev) { bool rom_protect = !is_mt7612(dev); - struct mt76x2_patch_header *hdr; + struct mt76x02_patch_header *hdr; u32 val, patch_mask, patch_reg; const struct firmware *fw; int err; @@ -292,7 +127,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev) goto out; } - hdr = (struct mt76x2_patch_header *)fw->data; + hdr = (struct mt76x02_patch_header *)fw->data; dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time); /* enable USB_DMA_CFG */ @@ -302,7 +137,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev) mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val); /* vendor reset */ - mt76u_mcu_fw_reset(&dev->mt76); + mt76x02u_mcu_fw_reset(&dev->mt76); usleep_range(5000, 10000); /* enable FCE to send in-band cmd */ @@ -316,10 +151,10 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev) /* FCE skip_fs_en */ mt76_wr(dev, MT_FCE_SKIP_FS, 0x3); - err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr), - fw->size - sizeof(*hdr), - MCU_ROM_PATCH_MAX_PAYLOAD, - MT76U_MCU_ROM_PATCH_OFFSET); + err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr), + fw->size - sizeof(*hdr), + MCU_ROM_PATCH_MAX_PAYLOAD, + MT76U_MCU_ROM_PATCH_OFFSET); if (err < 0) { err = -EIO; goto out; @@ -341,10 +176,10 @@ out: return err; } -static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev) +static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev) { u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET; - const struct mt76x2_fw_header *hdr; + const struct mt76x02_fw_header *hdr; int err, len, ilm_len, dlm_len; const struct firmware *fw; @@ -357,7 +192,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev) goto out; } - hdr = (const struct mt76x2_fw_header *)fw->data; + hdr = (const struct mt76x02_fw_header *)fw->data; ilm_len = le32_to_cpu(hdr->ilm_len); dlm_len = le32_to_cpu(hdr->dlm_len); len = sizeof(*hdr) + ilm_len + dlm_len; @@ -375,7 +210,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev) dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time); /* vendor reset */ - mt76u_mcu_fw_reset(&dev->mt76); + mt76x02u_mcu_fw_reset(&dev->mt76); usleep_range(5000, 10000); /* enable USB_DMA_CFG */ @@ -395,9 +230,9 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev) mt76_wr(dev, MT_FCE_SKIP_FS, 0x3); /* load ILM */ - err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr), - ilm_len, MCU_FW_URB_MAX_PAYLOAD, - MT76U_MCU_ILM_OFFSET); + err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr), + ilm_len, MCU_FW_URB_MAX_PAYLOAD, + MT76U_MCU_ILM_OFFSET); if (err < 0) { err = -EIO; goto out; @@ -406,10 +241,10 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev) /* load DLM */ if (mt76xx_rev(dev) >= MT76XX_REV_E3) dlm_offset += 0x800; - err = mt76u_mcu_fw_send_data(&dev->mt76, - fw->data + sizeof(*hdr) + ilm_len, - dlm_len, MCU_FW_URB_MAX_PAYLOAD, - dlm_offset); + err = mt76x02u_mcu_fw_send_data(&dev->mt76, + fw->data + sizeof(*hdr) + ilm_len, + dlm_len, MCU_FW_URB_MAX_PAYLOAD, + dlm_offset); if (err < 0) { err = -EIO; goto out; @@ -426,13 +261,14 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev) /* enable FCE to send in-band cmd */ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1); dev_dbg(dev->mt76.dev, "firmware running\n"); + mt76x02_set_ethtool_fwver(&dev->mt76, hdr); out: release_firmware(fw); return err; } -int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev) +int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev) { int err; @@ -443,21 +279,14 @@ int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev) return mt76x2u_mcu_load_firmware(dev); } -int mt76x2u_mcu_init(struct mt76x2_dev *dev) +int mt76x2u_mcu_init(struct mt76x02_dev *dev) { int err; - err = mt76x2u_mcu_function_select(dev, Q_SELECT, 1); + err = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, + 1, false); if (err < 0) return err; - return mt76x2u_mcu_set_radio_state(dev, true); -} - -void mt76x2u_mcu_deinit(struct mt76x2_dev *dev) -{ - struct mt76_usb *usb = &dev->mt76.usb; - - usb_kill_urb(usb->mcu.res.urb); - mt76u_buf_free(&usb->mcu.res); + return mt76x02_mcu_set_radio_state(&dev->mt76, true, false); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c index 5158063d0c2e..b11f8a6a6254 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c @@ -15,42 +15,10 @@ */ #include "mt76x2u.h" -#include "mt76x2_eeprom.h" +#include "eeprom.h" +#include "../mt76x02_phy.h" -void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev) -{ - u32 val; - - val = mt76_rr(dev, MT_BBP(AGC, 0)); - val &= ~BIT(4); - - switch (dev->chainmask & 0xf) { - case 2: - val |= BIT(3); - break; - default: - val &= ~BIT(3); - break; - } - mt76_wr(dev, MT_BBP(AGC, 0), val); -} - -void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev) -{ - int txpath; - - txpath = (dev->chainmask >> 8) & 0xf; - switch (txpath) { - case 2: - mt76_set(dev, MT_BBP(TXBE, 5), 0x3); - break; - default: - mt76_clear(dev, MT_BBP(TXBE, 5), 0x3); - break; - } -} - -void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev) +void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev) { struct ieee80211_channel *chan = dev->mt76.chandef.chan; bool is_5ghz = chan->band == NL80211_BAND_5GHZ; @@ -61,59 +29,18 @@ void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev) mt76x2u_mac_stop(dev); if (is_5ghz) - mt76x2u_mcu_calibrate(dev, MCU_CAL_LC, 0); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, false); - mt76x2u_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz); - mt76x2u_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz); - mt76x2u_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz); - mt76x2u_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, false); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, false); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, false); mt76x2u_mac_resume(dev); } static void -mt76x2u_phy_tssi_compensate(struct mt76x2_dev *dev) -{ - struct ieee80211_channel *chan = dev->mt76.chandef.chan; - struct mt76x2_tx_power_info txp; - struct mt76x2_tssi_comp t = {}; - - if (!dev->cal.tssi_cal_done) - return; - - if (!dev->cal.tssi_comp_pending) { - /* TSSI trigger */ - t.cal_mode = BIT(0); - mt76x2u_mcu_tssi_comp(dev, &t); - dev->cal.tssi_comp_pending = true; - } else { - if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4)) - return; - - dev->cal.tssi_comp_pending = false; - mt76x2_get_power_info(dev, &txp, chan); - - if (mt76x2_ext_pa_enabled(dev, chan->band)) - t.pa_mode = 1; - - t.cal_mode = BIT(1); - t.slope0 = txp.chain[0].tssi_slope; - t.offset0 = txp.chain[0].tssi_offset; - t.slope1 = txp.chain[1].tssi_slope; - t.offset1 = txp.chain[1].tssi_offset; - mt76x2u_mcu_tssi_comp(dev, &t); - - if (t.pa_mode || dev->cal.dpd_cal_done) - return; - - usleep_range(10000, 20000); - mt76x2u_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value); - dev->cal.dpd_cal_done = true; - } -} - -static void -mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev) +mt76x2u_phy_update_channel_gain(struct mt76x02_dev *dev) { u8 channel = dev->mt76.chandef.chan->hw_value; int freq, freq1; @@ -142,7 +69,7 @@ mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev) break; } - dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev); + dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(&dev->mt76); false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1)); @@ -152,17 +79,17 @@ mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev) void mt76x2u_phy_calibrate(struct work_struct *work) { - struct mt76x2_dev *dev; + struct mt76x02_dev *dev; - dev = container_of(work, struct mt76x2_dev, cal_work.work); - mt76x2u_phy_tssi_compensate(dev); + dev = container_of(work, struct mt76x02_dev, cal_work.work); + mt76x2_phy_tssi_compensate(dev, false); mt76x2u_phy_update_channel_gain(dev); ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, MT_CALIBRATE_INTERVAL); } -int mt76x2u_phy_set_channel(struct mt76x2_dev *dev, +int mt76x2u_phy_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) { u32 ext_cca_chan[4] = { @@ -239,28 +166,29 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev, MT_EXT_CCA_CFG_CCA_MASK), ext_cca_chan[ch_group_index]); - ret = mt76x2u_mcu_set_channel(dev, channel, bw, bw_index, scan); + ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan); if (ret) return ret; - mt76x2u_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true); + mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true); /* Enable LDPC Rx */ if (mt76xx_rev(dev) >= MT76XX_REV_E3) mt76_set(dev, MT_BBP(RXO, 13), BIT(10)); if (!dev->cal.init_cal_done) { - u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT); + u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT); if (val != 0xff) - mt76x2u_mcu_calibrate(dev, MCU_CAL_R, 0); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, + 0, false); } - mt76x2u_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, false); /* Rx LPF calibration */ if (!dev->cal.init_cal_done) - mt76x2u_mcu_calibrate(dev, MCU_CAL_RC, 0); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, false); dev->cal.init_cal_done = true; mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2); @@ -275,7 +203,7 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev, if (scan) return 0; - if (mt76x2_tssi_enabled(dev)) { + if (mt76x02_tssi_enabled(&dev->mt76)) { /* init default values for temp compensation */ mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, 0x38); @@ -290,9 +218,10 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev, chan = dev->mt76.chandef.chan; if (chan->band == NL80211_BAND_5GHZ) flag |= BIT(0); - if (mt76x2_ext_pa_enabled(dev, chan->band)) + if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band)) flag |= BIT(8); - mt76x2u_mcu_calibrate(dev, MCU_CAL_TSSI, flag); + mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI, + flag, false); dev->cal.tssi_cal_done = true; } } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c deleted file mode 100644 index a2338ba139b4..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include "mt76x2.h" - -void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq) -{ - struct mt76_txq *mtxq; - - if (!txq) - return; - - mtxq = (struct mt76_txq *) txq->drv_priv; - if (txq->sta) { - struct mt76x2_sta *sta; - - sta = (struct mt76x2_sta *) txq->sta->drv_priv; - mtxq->wcid = &sta->wcid; - } else { - struct mt76x2_vif *mvif; - - mvif = (struct mt76x2_vif *) txq->vif->drv_priv; - mtxq->wcid = &mvif->group_wcid; - } - - mt76_txq_init(&dev->mt76, txq); -} -EXPORT_SYMBOL_GPL(mt76x2_txq_init); - -int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_ampdu_params *params) -{ - enum ieee80211_ampdu_mlme_action action = params->action; - struct ieee80211_sta *sta = params->sta; - struct mt76x2_dev *dev = hw->priv; - struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; - struct ieee80211_txq *txq = sta->txq[params->tid]; - u16 tid = params->tid; - u16 *ssn = ¶ms->ssn; - struct mt76_txq *mtxq; - - if (!txq) - return -EINVAL; - - mtxq = (struct mt76_txq *)txq->drv_priv; - - switch (action) { - case IEEE80211_AMPDU_RX_START: - mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size); - mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); - break; - case IEEE80211_AMPDU_RX_STOP: - mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); - mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, - BIT(16 + tid)); - break; - case IEEE80211_AMPDU_TX_OPERATIONAL: - mtxq->aggr = true; - mtxq->send_bar = false; - ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); - break; - case IEEE80211_AMPDU_TX_STOP_FLUSH: - case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: - mtxq->aggr = false; - ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); - break; - case IEEE80211_AMPDU_TX_START: - mtxq->agg_ssn = *ssn << 4; - ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); - break; - case IEEE80211_AMPDU_TX_STOP_CONT: - mtxq->aggr = false; - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); - break; - } - - return 0; -} -EXPORT_SYMBOL_GPL(mt76x2_ampdu_action); - -int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct mt76x2_dev *dev = hw->priv; - struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; - struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; - int ret = 0; - int idx = 0; - int i; - - mutex_lock(&dev->mutex); - - idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid)); - if (idx < 0) { - ret = -ENOSPC; - goto out; - } - - msta->vif = mvif; - msta->wcid.sta = 1; - msta->wcid.idx = idx; - msta->wcid.hw_key_idx = -1; - mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr); - mt76x2_mac_wcid_set_drop(dev, idx, false); - for (i = 0; i < ARRAY_SIZE(sta->txq); i++) - mt76x2_txq_init(dev, sta->txq[i]); - - if (vif->type == NL80211_IFTYPE_AP) - set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags); - - ewma_signal_init(&msta->rssi); - - rcu_assign_pointer(dev->wcid[idx], &msta->wcid); - -out: - mutex_unlock(&dev->mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(mt76x2_sta_add); - -int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct mt76x2_dev *dev = hw->priv; - struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; - int idx = msta->wcid.idx; - int i; - - mutex_lock(&dev->mutex); - rcu_assign_pointer(dev->wcid[idx], NULL); - for (i = 0; i < ARRAY_SIZE(sta->txq); i++) - mt76_txq_remove(&dev->mt76, sta->txq[i]); - mt76x2_mac_wcid_set_drop(dev, idx, true); - mt76_wcid_free(dev->wcid_mask, idx); - mt76x2_mac_wcid_setup(dev, idx, 0, NULL); - mutex_unlock(&dev->mutex); - - return 0; -} -EXPORT_SYMBOL_GPL(mt76x2_sta_remove); - -void mt76x2_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct mt76x2_dev *dev = hw->priv; - - mt76_txq_remove(&dev->mt76, vif->txq); -} -EXPORT_SYMBOL_GPL(mt76x2_remove_interface); - -int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) -{ - struct mt76x2_dev *dev = hw->priv; - struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; - struct mt76x2_sta *msta; - struct mt76_wcid *wcid; - int idx = key->keyidx; - int ret; - - /* fall back to sw encryption for unsupported ciphers */ - switch (key->cipher) { - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - case WLAN_CIPHER_SUITE_TKIP: - case WLAN_CIPHER_SUITE_CCMP: - break; - default: - return -EOPNOTSUPP; - } - - /* - * The hardware does not support per-STA RX GTK, fall back - * to software mode for these. - */ - if ((vif->type == NL80211_IFTYPE_ADHOC || - vif->type == NL80211_IFTYPE_MESH_POINT) && - (key->cipher == WLAN_CIPHER_SUITE_TKIP || - key->cipher == WLAN_CIPHER_SUITE_CCMP) && - !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) - return -EOPNOTSUPP; - - msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL; - wcid = msta ? &msta->wcid : &mvif->group_wcid; - - if (cmd == SET_KEY) { - key->hw_key_idx = wcid->idx; - wcid->hw_key_idx = idx; - if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) { - key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; - wcid->sw_iv = true; - } - } else { - if (idx == wcid->hw_key_idx) { - wcid->hw_key_idx = -1; - wcid->sw_iv = true; - } - - key = NULL; - } - mt76_wcid_key_setup(&dev->mt76, wcid, key); - - if (!msta) { - if (key || wcid->hw_key_idx == idx) { - ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key); - if (ret) - return ret; - } - - return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key); - } - - return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key); -} -EXPORT_SYMBOL_GPL(mt76x2_set_key); - -int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u16 queue, const struct ieee80211_tx_queue_params *params) -{ - struct mt76x2_dev *dev = hw->priv; - u8 cw_min = 5, cw_max = 10, qid; - u32 val; - - qid = dev->mt76.q_tx[queue].hw_idx; - - if (params->cw_min) - cw_min = fls(params->cw_min); - if (params->cw_max) - cw_max = fls(params->cw_max); - - val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) | - FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) | - FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) | - FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max); - mt76_wr(dev, MT_EDCA_CFG_AC(qid), val); - - val = mt76_rr(dev, MT_WMM_TXOP(qid)); - val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid)); - val |= params->txop << MT_WMM_TXOP_SHIFT(qid); - mt76_wr(dev, MT_WMM_TXOP(qid), val); - - val = mt76_rr(dev, MT_WMM_AIFSN); - val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid)); - val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid); - mt76_wr(dev, MT_WMM_AIFSN, val); - - val = mt76_rr(dev, MT_WMM_CWMIN); - val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid)); - val |= cw_min << MT_WMM_CWMIN_SHIFT(qid); - mt76_wr(dev, MT_WMM_CWMIN, val); - - val = mt76_rr(dev, MT_WMM_CWMAX); - val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid)); - val |= cw_max << MT_WMM_CWMAX_SHIFT(qid); - mt76_wr(dev, MT_WMM_CWMAX, val); - - return 0; -} -EXPORT_SYMBOL_GPL(mt76x2_conf_tx); - -void mt76x2_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, u64 multicast) -{ - struct mt76x2_dev *dev = hw->priv; - u32 flags = 0; - -#define MT76_FILTER(_flag, _hw) do { \ - flags |= *total_flags & FIF_##_flag; \ - dev->rxfilter &= ~(_hw); \ - dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ - } while (0) - - mutex_lock(&dev->mutex); - - dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS; - - MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR); - MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR); - MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK | - MT_RX_FILTR_CFG_CTS | - MT_RX_FILTR_CFG_CFEND | - MT_RX_FILTR_CFG_CFACK | - MT_RX_FILTR_CFG_BA | - MT_RX_FILTR_CFG_CTRL_RSV); - MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL); - - *total_flags = flags; - mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); - - mutex_unlock(&dev->mutex); -} -EXPORT_SYMBOL_GPL(mt76x2_configure_filter); - -void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct mt76x2_dev *dev = hw->priv; - struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; - struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates); - struct ieee80211_tx_rate rate = {}; - - if (!rates) - return; - - rate.idx = rates->rate[0].idx; - rate.flags = rates->rate[0].flags; - mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate); - msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate); -} -EXPORT_SYMBOL_GPL(mt76x2_sta_rate_tbl_update); - -void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, - struct sk_buff *skb) -{ - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); - void *rxwi = skb->data; - - if (q == MT_RXQ_MCU) { - skb_queue_tail(&dev->mcu.res_q, skb); - wake_up(&dev->mcu.wait); - return; - } - - skb_pull(skb, sizeof(struct mt76x2_rxwi)); - if (mt76x2_mac_process_rx(dev, skb, rxwi)) { - dev_kfree_skb(skb); - return; - } - - mt76_rx(&dev->mt76, q, skb); -} -EXPORT_SYMBOL_GPL(mt76x2_queue_rx_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c deleted file mode 100644 index 2629779e8d3e..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_core.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include <linux/delay.h> -#include "mt76x2.h" -#include "mt76x2_trace.h" - -void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set) -{ - unsigned long flags; - - spin_lock_irqsave(&dev->irq_lock, flags); - dev->irqmask &= ~clear; - dev->irqmask |= set; - mt76_wr(dev, MT_INT_MASK_CSR, dev->irqmask); - spin_unlock_irqrestore(&dev->irq_lock, flags); -} - -void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) -{ - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); - - mt76x2_irq_enable(dev, MT_INT_RX_DONE(q)); -} - -irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance) -{ - struct mt76x2_dev *dev = dev_instance; - u32 intr; - - intr = mt76_rr(dev, MT_INT_SOURCE_CSR); - mt76_wr(dev, MT_INT_SOURCE_CSR, intr); - - if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state)) - return IRQ_NONE; - - trace_dev_irq(dev, intr, dev->irqmask); - - intr &= dev->irqmask; - - if (intr & MT_INT_TX_DONE_ALL) { - mt76x2_irq_disable(dev, MT_INT_TX_DONE_ALL); - tasklet_schedule(&dev->tx_tasklet); - } - - if (intr & MT_INT_RX_DONE(0)) { - mt76x2_irq_disable(dev, MT_INT_RX_DONE(0)); - napi_schedule(&dev->mt76.napi[0]); - } - - if (intr & MT_INT_RX_DONE(1)) { - mt76x2_irq_disable(dev, MT_INT_RX_DONE(1)); - napi_schedule(&dev->mt76.napi[1]); - } - - if (intr & MT_INT_PRE_TBTT) - tasklet_schedule(&dev->pre_tbtt_tasklet); - - /* send buffered multicast frames now */ - if (intr & MT_INT_TBTT) - mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]); - - if (intr & MT_INT_TX_STAT) { - mt76x2_mac_poll_tx_status(dev, true); - tasklet_schedule(&dev->tx_tasklet); - } - - if (intr & MT_INT_GPTIMER) { - mt76x2_irq_disable(dev, MT_INT_GPTIMER); - tasklet_schedule(&dev->dfs_pd.dfs_tasklet); - } - - return IRQ_HANDLED; -} - diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c deleted file mode 100644 index 6720a6a1313f..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include "mt76x2.h" -#include "mt76x2_dma.h" - -int -mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid, - struct sk_buff *skb, int cmd, int seq) -{ - struct mt76_queue *q = &dev->mt76.q_tx[qid]; - struct mt76_queue_buf buf; - dma_addr_t addr; - u32 tx_info; - - tx_info = MT_MCU_MSG_TYPE_CMD | - FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) | - FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) | - FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | - FIELD_PREP(MT_MCU_MSG_LEN, skb->len); - - addr = dma_map_single(dev->mt76.dev, skb->data, skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(dev->mt76.dev, addr)) - return -ENOMEM; - - buf.addr = addr; - buf.len = skb->len; - spin_lock_bh(&q->lock); - mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); - mt76_queue_kick(dev, q); - spin_unlock_bh(&q->lock); - - return 0; -} - -static int -mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q, - int idx, int n_desc) -{ - int ret; - - q->regs = dev->mt76.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE; - q->ndesc = n_desc; - q->hw_idx = idx; - - ret = mt76_queue_alloc(dev, q); - if (ret) - return ret; - - mt76x2_irq_enable(dev, MT_INT_TX_DONE(idx)); - - return 0; -} - -static int -mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q, - int idx, int n_desc, int bufsize) -{ - int ret; - - q->regs = dev->mt76.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE; - q->ndesc = n_desc; - q->buf_size = bufsize; - - ret = mt76_queue_alloc(dev, q); - if (ret) - return ret; - - mt76x2_irq_enable(dev, MT_INT_RX_DONE(idx)); - - return 0; -} - -static void -mt76x2_tx_tasklet(unsigned long data) -{ - struct mt76x2_dev *dev = (struct mt76x2_dev *) data; - int i; - - mt76x2_mac_process_tx_status_fifo(dev); - - for (i = MT_TXQ_MCU; i >= 0; i--) - mt76_queue_tx_cleanup(dev, i, false); - - mt76x2_mac_poll_tx_status(dev, false); - mt76x2_irq_enable(dev, MT_INT_TX_DONE_ALL); -} - -int mt76x2_dma_init(struct mt76x2_dev *dev) -{ - static const u8 wmm_queue_map[] = { - [IEEE80211_AC_BE] = 0, - [IEEE80211_AC_BK] = 1, - [IEEE80211_AC_VI] = 2, - [IEEE80211_AC_VO] = 3, - }; - int ret; - int i; - struct mt76_txwi_cache __maybe_unused *t; - struct mt76_queue *q; - - BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x2_txwi)); - BUILD_BUG_ON(sizeof(struct mt76x2_rxwi) > MT_RX_HEADROOM); - - mt76_dma_attach(&dev->mt76); - - init_waitqueue_head(&dev->mcu.wait); - skb_queue_head_init(&dev->mcu.res_q); - - tasklet_init(&dev->tx_tasklet, mt76x2_tx_tasklet, (unsigned long) dev); - - mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); - - for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) { - ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[i], - wmm_queue_map[i], MT_TX_RING_SIZE); - if (ret) - return ret; - } - - ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD], - MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE); - if (ret) - return ret; - - ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], - MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE); - if (ret) - return ret; - - ret = mt76x2_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, - MT_MCU_RING_SIZE, MT_RX_BUF_SIZE); - if (ret) - return ret; - - q = &dev->mt76.q_rx[MT_RXQ_MAIN]; - q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x2_rxwi); - ret = mt76x2_init_rx_queue(dev, q, 0, MT76x2_RX_RING_SIZE, MT_RX_BUF_SIZE); - if (ret) - return ret; - - return mt76_init_queues(dev); -} - -void mt76x2_dma_cleanup(struct mt76x2_dev *dev) -{ - tasklet_kill(&dev->tx_tasklet); - mt76_dma_cleanup(&dev->mt76); -} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c deleted file mode 100644 index 743da57760dc..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c +++ /dev/null @@ -1,427 +0,0 @@ -/* - * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include <linux/kernel.h> -#include <linux/firmware.h> -#include <linux/delay.h> - -#include "mt76x2.h" -#include "mt76x2_mcu.h" -#include "mt76x2_dma.h" -#include "mt76x2_eeprom.h" - -static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len) -{ - struct sk_buff *skb; - - skb = alloc_skb(len, GFP_KERNEL); - if (!skb) - return NULL; - memcpy(skb_put(skb, len), data, len); - - return skb; -} - -static struct sk_buff * -mt76x2_mcu_get_response(struct mt76x2_dev *dev, unsigned long expires) -{ - unsigned long timeout; - - if (!time_is_after_jiffies(expires)) - return NULL; - - timeout = expires - jiffies; - wait_event_timeout(dev->mcu.wait, !skb_queue_empty(&dev->mcu.res_q), - timeout); - return skb_dequeue(&dev->mcu.res_q); -} - -static int -mt76x2_mcu_msg_send(struct mt76x2_dev *dev, struct sk_buff *skb, - enum mcu_cmd cmd) -{ - unsigned long expires = jiffies + HZ; - int ret; - u8 seq; - - if (!skb) - return -EINVAL; - - mutex_lock(&dev->mcu.mutex); - - seq = ++dev->mcu.msg_seq & 0xf; - if (!seq) - seq = ++dev->mcu.msg_seq & 0xf; - - ret = mt76x2_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq); - if (ret) - goto out; - - while (1) { - u32 *rxfce; - bool check_seq = false; - - skb = mt76x2_mcu_get_response(dev, expires); - if (!skb) { - dev_err(dev->mt76.dev, - "MCU message %d (seq %d) timed out\n", cmd, - seq); - ret = -ETIMEDOUT; - break; - } - - rxfce = (u32 *) skb->cb; - - if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce)) - check_seq = true; - - dev_kfree_skb(skb); - if (check_seq) - break; - } - -out: - mutex_unlock(&dev->mcu.mutex); - - return ret; -} - -static int -mt76pci_load_rom_patch(struct mt76x2_dev *dev) -{ - const struct firmware *fw = NULL; - struct mt76x2_patch_header *hdr; - bool rom_protect = !is_mt7612(dev); - int len, ret = 0; - __le32 *cur; - u32 patch_mask, patch_reg; - - if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) { - dev_err(dev->mt76.dev, - "Could not get hardware semaphore for ROM PATCH\n"); - return -ETIMEDOUT; - } - - if (mt76xx_rev(dev) >= MT76XX_REV_E3) { - patch_mask = BIT(0); - patch_reg = MT_MCU_CLOCK_CTL; - } else { - patch_mask = BIT(1); - patch_reg = MT_MCU_COM_REG0; - } - - if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) { - dev_info(dev->mt76.dev, "ROM patch already applied\n"); - goto out; - } - - ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev); - if (ret) - goto out; - - if (!fw || !fw->data || fw->size <= sizeof(*hdr)) { - ret = -EIO; - dev_err(dev->mt76.dev, "Failed to load firmware\n"); - goto out; - } - - hdr = (struct mt76x2_patch_header *) fw->data; - dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time); - - mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET); - - cur = (__le32 *) (fw->data + sizeof(*hdr)); - len = fw->size - sizeof(*hdr); - mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len); - - mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0); - - /* Trigger ROM */ - mt76_wr(dev, MT_MCU_INT_LEVEL, 4); - - if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) { - dev_err(dev->mt76.dev, "Failed to load ROM patch\n"); - ret = -ETIMEDOUT; - } - -out: - /* release semaphore */ - if (rom_protect) - mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1); - release_firmware(fw); - return ret; -} - -static int -mt76pci_load_firmware(struct mt76x2_dev *dev) -{ - const struct firmware *fw; - const struct mt76x2_fw_header *hdr; - int len, ret; - __le32 *cur; - u32 offset, val; - - ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev); - if (ret) - return ret; - - if (!fw || !fw->data || fw->size < sizeof(*hdr)) - goto error; - - hdr = (const struct mt76x2_fw_header *) fw->data; - - len = sizeof(*hdr); - len += le32_to_cpu(hdr->ilm_len); - len += le32_to_cpu(hdr->dlm_len); - - if (fw->size != len) - goto error; - - val = le16_to_cpu(hdr->fw_ver); - dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n", - (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf); - - val = le16_to_cpu(hdr->build_ver); - dev_info(dev->mt76.dev, "Build: %x\n", val); - dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time); - - cur = (__le32 *) (fw->data + sizeof(*hdr)); - len = le32_to_cpu(hdr->ilm_len); - - mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET); - mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len); - - cur += len / sizeof(*cur); - len = le32_to_cpu(hdr->dlm_len); - - if (mt76xx_rev(dev) >= MT76XX_REV_E3) - offset = MT_MCU_DLM_ADDR_E3; - else - offset = MT_MCU_DLM_ADDR; - - mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET); - mt76_wr_copy(dev, offset, cur, len); - - mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0); - - val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2); - if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1) - mt76_set(dev, MT_MCU_COM_REG0, BIT(30)); - - /* trigger firmware */ - mt76_wr(dev, MT_MCU_INT_LEVEL, 2); - if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 200)) { - dev_err(dev->mt76.dev, "Firmware failed to start\n"); - release_firmware(fw); - return -ETIMEDOUT; - } - - dev_info(dev->mt76.dev, "Firmware running!\n"); - - release_firmware(fw); - - return ret; - -error: - dev_err(dev->mt76.dev, "Invalid firmware\n"); - release_firmware(fw); - return -ENOENT; -} - -static int -mt76x2_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func, - u32 val) -{ - struct sk_buff *skb; - struct { - __le32 id; - __le32 value; - } __packed __aligned(4) msg = { - .id = cpu_to_le32(func), - .value = cpu_to_le32(val), - }; - - skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); - return mt76x2_mcu_msg_send(dev, skb, CMD_FUN_SET_OP); -} - -int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level, - u8 channel) -{ - struct sk_buff *skb; - struct { - u8 cr_mode; - u8 temp; - u8 ch; - u8 _pad0; - - __le32 cfg; - } __packed __aligned(4) msg = { - .cr_mode = type, - .temp = temp_level, - .ch = channel, - }; - u32 val; - - val = BIT(31); - val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff; - val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00; - msg.cfg = cpu_to_le32(val); - - /* first set the channel without the extension channel info */ - skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); - return mt76x2_mcu_msg_send(dev, skb, CMD_LOAD_CR); -} - -int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw, - u8 bw_index, bool scan) -{ - struct sk_buff *skb; - struct { - u8 idx; - u8 scan; - u8 bw; - u8 _pad0; - - __le16 chainmask; - u8 ext_chan; - u8 _pad1; - - } __packed __aligned(4) msg = { - .idx = channel, - .scan = scan, - .bw = bw, - .chainmask = cpu_to_le16(dev->chainmask), - }; - - /* first set the channel without the extension channel info */ - skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); - mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP); - - usleep_range(5000, 10000); - - msg.ext_chan = 0xe0 + bw_index; - skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); - return mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP); -} - -int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on) -{ - struct sk_buff *skb; - struct { - __le32 mode; - __le32 level; - } __packed __aligned(4) msg = { - .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF), - .level = cpu_to_le32(0), - }; - - skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); - return mt76x2_mcu_msg_send(dev, skb, CMD_POWER_SAVING_OP); -} - -int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, - u32 param) -{ - struct sk_buff *skb; - struct { - __le32 id; - __le32 value; - } __packed __aligned(4) msg = { - .id = cpu_to_le32(type), - .value = cpu_to_le32(param), - }; - int ret; - - mt76_clear(dev, MT_MCU_COM_REG0, BIT(31)); - - skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); - ret = mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP); - if (ret) - return ret; - - if (WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0, - BIT(31), BIT(31), 100))) - return -ETIMEDOUT; - - return 0; -} - -int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, - struct mt76x2_tssi_comp *tssi_data) -{ - struct sk_buff *skb; - struct { - __le32 id; - struct mt76x2_tssi_comp data; - } __packed __aligned(4) msg = { - .id = cpu_to_le32(MCU_CAL_TSSI_COMP), - .data = *tssi_data, - }; - - skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); - return mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP); -} - -int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain, - bool force) -{ - struct sk_buff *skb; - struct { - __le32 channel; - __le32 gain_val; - } __packed __aligned(4) msg = { - .channel = cpu_to_le32(channel), - .gain_val = cpu_to_le32(gain), - }; - - if (force) - msg.channel |= cpu_to_le32(BIT(31)); - - skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); - return mt76x2_mcu_msg_send(dev, skb, CMD_INIT_GAIN_OP); -} - -int mt76x2_mcu_init(struct mt76x2_dev *dev) -{ - int ret; - - mutex_init(&dev->mcu.mutex); - - ret = mt76pci_load_rom_patch(dev); - if (ret) - return ret; - - ret = mt76pci_load_firmware(dev); - if (ret) - return ret; - - mt76x2_mcu_function_select(dev, Q_SELECT, 1); - return 0; -} - -int mt76x2_mcu_cleanup(struct mt76x2_dev *dev) -{ - struct sk_buff *skb; - - mt76_wr(dev, MT_MCU_INT_LEVEL, 1); - usleep_range(20000, 30000); - - while ((skb = skb_dequeue(&dev->mcu.res_q)) != NULL) - dev_kfree_skb(skb); - - return 0; -} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c deleted file mode 100644 index 36afb166fa3f..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include "mt76x2.h" -#include "mt76x2_dma.h" - -void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, - struct sk_buff *skb) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct mt76x2_dev *dev = hw->priv; - struct ieee80211_vif *vif = info->control.vif; - struct mt76_wcid *wcid = &dev->global_wcid; - - if (control->sta) { - struct mt76x2_sta *msta; - - msta = (struct mt76x2_sta *)control->sta->drv_priv; - wcid = &msta->wcid; - /* sw encrypted frames */ - if (!info->control.hw_key && wcid->hw_key_idx != -1) - control->sta = NULL; - } - - if (vif && !control->sta) { - struct mt76x2_vif *mvif; - - mvif = (struct mt76x2_vif *)vif->drv_priv; - wcid = &mvif->group_wcid; - } - - mt76_tx(&dev->mt76, control->sta, wcid, skb); -} -EXPORT_SYMBOL_GPL(mt76x2_tx); - -int mt76x2_insert_hdr_pad(struct sk_buff *skb) -{ - int len = ieee80211_get_hdrlen_from_skb(skb); - - if (len % 4 == 0) - return 0; - - skb_push(skb, 2); - memmove(skb->data, skb->data + 2, len); - - skb->data[len] = 0; - skb->data[len + 1] = 0; - return 2; -} -EXPORT_SYMBOL_GPL(mt76x2_insert_hdr_pad); - -s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev, - const struct ieee80211_tx_rate *rate) -{ - s8 max_txpwr; - - if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { - u8 mcs = ieee80211_rate_get_vht_mcs(rate); - - if (mcs == 8 || mcs == 9) { - max_txpwr = dev->rate_power.vht[8]; - } else { - u8 nss, idx; - - nss = ieee80211_rate_get_vht_nss(rate); - idx = ((nss - 1) << 3) + mcs; - max_txpwr = dev->rate_power.ht[idx & 0xf]; - } - } else if (rate->flags & IEEE80211_TX_RC_MCS) { - max_txpwr = dev->rate_power.ht[rate->idx & 0xf]; - } else { - enum nl80211_band band = dev->mt76.chandef.chan->band; - - if (band == NL80211_BAND_2GHZ) { - const struct ieee80211_rate *r; - struct wiphy *wiphy = mt76_hw(dev)->wiphy; - struct mt76_rate_power *rp = &dev->rate_power; - - r = &wiphy->bands[band]->bitrates[rate->idx]; - if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE) - max_txpwr = rp->cck[r->hw_value & 0x3]; - else - max_txpwr = rp->ofdm[r->hw_value & 0x7]; - } else { - max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7]; - } - } - - return max_txpwr; -} -EXPORT_SYMBOL_GPL(mt76x2_tx_get_max_txpwr_adj); - -s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj) -{ - txpwr = min_t(s8, txpwr, dev->txpower_conf); - txpwr -= (dev->target_power + dev->target_power_delta[0]); - txpwr = min_t(s8, txpwr, max_txpwr_adj); - - if (!dev->enable_tpc) - return 0; - else if (txpwr >= 0) - return min_t(s8, txpwr, 7); - else - return (txpwr < -16) ? 8 : (txpwr + 32) / 2; -} -EXPORT_SYMBOL_GPL(mt76x2_tx_get_txpwr_adj); - -void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr) -{ - s8 txpwr_adj; - - txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr, - dev->rate_power.ofdm[4]); - mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, - MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj); - mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, - MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj); -} -EXPORT_SYMBOL_GPL(mt76x2_tx_set_txpwr_auto); - -void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - if (info->flags & IEEE80211_TX_CTL_AMPDU) { - ieee80211_free_txskb(mt76_hw(dev), skb); - } else { - ieee80211_tx_info_clear_status(info); - info->status.rates[0].idx = -1; - info->flags |= IEEE80211_TX_STAT_ACK; - ieee80211_tx_status(mt76_hw(dev), skb); - } -} -EXPORT_SYMBOL_GPL(mt76x2_tx_complete); - diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2u.h deleted file mode 100644 index 008092f0cd8a..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2u.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef __MT76x2U_H -#define __MT76x2U_H - -#include <linux/device.h> - -#include "mt76x2.h" -#include "mt76x2_dma.h" -#include "mt76x2_mcu.h" - -#define MT7612U_EEPROM_SIZE 512 - -#define MT_USB_AGGR_SIZE_LIMIT 21 /* 1024B unit */ -#define MT_USB_AGGR_TIMEOUT 0x80 /* 33ns unit */ - -extern const struct ieee80211_ops mt76x2u_ops; - -struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev); -int mt76x2u_register_device(struct mt76x2_dev *dev); -int mt76x2u_init_hardware(struct mt76x2_dev *dev); -void mt76x2u_cleanup(struct mt76x2_dev *dev); -void mt76x2u_stop_hw(struct mt76x2_dev *dev); - -void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr); -int mt76x2u_mac_reset(struct mt76x2_dev *dev); -void mt76x2u_mac_resume(struct mt76x2_dev *dev); -int mt76x2u_mac_start(struct mt76x2_dev *dev); -int mt76x2u_mac_stop(struct mt76x2_dev *dev); - -int mt76x2u_phy_set_channel(struct mt76x2_dev *dev, - struct cfg80211_chan_def *chandef); -void mt76x2u_phy_calibrate(struct work_struct *work); -void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev); -void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev); -void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev); - -void mt76x2u_mcu_complete_urb(struct urb *urb); -int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw, - u8 bw_index, bool scan); -int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, - u32 val); -int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev, - struct mt76x2_tssi_comp *tssi_data); -int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain, - bool force); -int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap, - bool ext, int rssi, u32 false_cca); -int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val); -int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, - u8 temp_level, u8 channel); -int mt76x2u_mcu_init(struct mt76x2_dev *dev); -int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev); -void mt76x2u_mcu_deinit(struct mt76x2_dev *dev); - -int mt76x2u_alloc_queues(struct mt76x2_dev *dev); -void mt76x2u_queues_deinit(struct mt76x2_dev *dev); -void mt76x2u_stop_queues(struct mt76x2_dev *dev); -bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update); -int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data, - struct sk_buff *skb, struct mt76_queue *q, - struct mt76_wcid *wcid, struct ieee80211_sta *sta, - u32 *tx_info); -void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, - struct mt76_queue_entry *e, bool flush); -int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port, - u32 flags); - -#endif /* __MT76x2U_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c deleted file mode 100644 index 1ca5dd05b265..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include "mt76x2u.h" -#include "dma.h" - -static void mt76x2u_remove_dma_hdr(struct sk_buff *skb) -{ - int hdr_len; - - skb_pull(skb, sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN); - hdr_len = ieee80211_get_hdrlen_from_skb(skb); - if (hdr_len % 4) { - memmove(skb->data + 2, skb->data, hdr_len); - skb_pull(skb, 2); - } -} - -static int -mt76x2u_check_skb_rooms(struct sk_buff *skb) -{ - int hdr_len = ieee80211_get_hdrlen_from_skb(skb); - u32 need_head; - - need_head = sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN; - if (hdr_len % 4) - need_head += 2; - return skb_cow(skb, need_head); -} - -static int -mt76x2u_set_txinfo(struct sk_buff *skb, - struct mt76_wcid *wcid, u8 ep) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - enum mt76x2_qsel qsel; - u32 flags; - - if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) || - ep == MT_EP_OUT_HCCA) - qsel = MT_QSEL_MGMT; - else - qsel = MT_QSEL_EDCA; - - flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | - MT_TXD_INFO_80211; - if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv) - flags |= MT_TXD_INFO_WIV; - - return mt76u_skb_dma_info(skb, WLAN_PORT, flags); -} - -bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update) -{ - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); - struct mt76x2_tx_status stat; - - if (!mt76x2_mac_load_tx_status(dev, &stat)) - return false; - - mt76x2_send_tx_status(dev, &stat, update); - - return true; -} - -int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data, - struct sk_buff *skb, struct mt76_queue *q, - struct mt76_wcid *wcid, struct ieee80211_sta *sta, - u32 *tx_info) -{ - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); - struct mt76x2_txwi *txwi; - int err, len = skb->len; - - err = mt76x2u_check_skb_rooms(skb); - if (err < 0) - return -ENOMEM; - - mt76x2_insert_hdr_pad(skb); - - txwi = skb_push(skb, sizeof(struct mt76x2_txwi)); - mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, len); - - return mt76x2u_set_txinfo(skb, wcid, q2ep(q->hw_idx)); -} - -void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, - struct mt76_queue_entry *e, bool flush) -{ - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); - - mt76x2u_remove_dma_hdr(e->skb); - mt76x2_tx_complete(dev, e->skb); -} - diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index af48d43bb7dc..bf0e9e666bc4 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -91,11 +91,23 @@ mt76_txq_get_qid(struct ieee80211_txq *txq) return txq->ac; } +static void +mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return; + + mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; +} + void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct mt76_queue *q; int qid = skb_get_queue_mapping(skb); @@ -108,6 +120,19 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, ieee80211_get_tx_rates(info->control.vif, sta, skb, info->control.rates, 1); + if (sta && ieee80211_is_data_qos(hdr->frame_control)) { + struct ieee80211_txq *txq; + struct mt76_txq *mtxq; + u8 tid; + + tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + txq = sta->txq[tid]; + mtxq = (struct mt76_txq *) txq->drv_priv; + + if (mtxq->aggr) + mt76_check_agg_ssn(mtxq, skb); + } + q = &dev->q_tx[qid]; spin_lock_bh(&q->lock); @@ -144,17 +169,6 @@ mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps) } static void -mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - - if (!ieee80211_is_data_qos(hdr->frame_control)) - return; - - mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; -} - -static void mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, struct sk_buff *skb, bool last) { @@ -442,3 +456,19 @@ void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)]; } EXPORT_SYMBOL_GPL(mt76_txq_init); + +u8 mt76_ac_to_hwq(u8 ac) +{ + static const u8 wmm_queue_map[] = { + [IEEE80211_AC_BE] = 0, + [IEEE80211_AC_BK] = 1, + [IEEE80211_AC_VI] = 2, + [IEEE80211_AC_VO] = 3, + }; + + if (WARN_ON(ac >= IEEE80211_NUM_ACS)) + return 0; + + return wmm_queue_map[ac]; +} +EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 79e59f2379a2..6a255643c1f0 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -14,6 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include <linux/module.h> #include "mt76.h" #include "usb_trace.h" #include "dma.h" @@ -109,6 +110,7 @@ u32 mt76u_rr(struct mt76_dev *dev, u32 addr) return ret; } +EXPORT_SYMBOL_GPL(mt76u_rr); /* should be called with usb_ctrl_mtx locked */ static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) @@ -140,6 +142,7 @@ void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) __mt76u_wr(dev, addr, val); mutex_unlock(&dev->usb.usb_ctrl_mtx); } +EXPORT_SYMBOL_GPL(mt76u_wr); static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr, u32 mask, u32 val) @@ -187,6 +190,60 @@ void mt76u_single_wr(struct mt76_dev *dev, const u8 req, EXPORT_SYMBOL_GPL(mt76u_single_wr); static int +mt76u_req_wr_rp(struct mt76_dev *dev, u32 base, + const struct mt76_reg_pair *data, int len) +{ + struct mt76_usb *usb = &dev->usb; + + mutex_lock(&usb->usb_ctrl_mtx); + while (len > 0) { + __mt76u_wr(dev, base + data->reg, data->value); + len--; + data++; + } + mutex_unlock(&usb->usb_ctrl_mtx); + + return 0; +} + +static int +mt76u_wr_rp(struct mt76_dev *dev, u32 base, + const struct mt76_reg_pair *data, int n) +{ + if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state)) + return dev->mcu_ops->mcu_wr_rp(dev, base, data, n); + else + return mt76u_req_wr_rp(dev, base, data, n); +} + +static int +mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data, + int len) +{ + struct mt76_usb *usb = &dev->usb; + + mutex_lock(&usb->usb_ctrl_mtx); + while (len > 0) { + data->value = __mt76u_rr(dev, base + data->reg); + len--; + data++; + } + mutex_unlock(&usb->usb_ctrl_mtx); + + return 0; +} + +static int +mt76u_rd_rp(struct mt76_dev *dev, u32 base, + struct mt76_reg_pair *data, int n) +{ + if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state)) + return dev->mcu_ops->mcu_rd_rp(dev, base, data, n); + else + return mt76u_req_rd_rp(dev, base, data, n); +} + +static int mt76u_set_endpoints(struct usb_interface *intf, struct mt76_usb *usb) { @@ -219,15 +276,17 @@ static int mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf, int nsgs, int len, int sglen) { + struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; struct urb *urb = buf->urb; int i; + spin_lock_bh(&q->rx_page_lock); for (i = 0; i < nsgs; i++) { struct page *page; void *data; int offset; - data = netdev_alloc_frag(len); + data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC); if (!data) break; @@ -235,6 +294,7 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf, offset = data - page_address(page); sg_set_page(&urb->sg[i], page, sglen, offset); } + spin_unlock_bh(&q->rx_page_lock); if (i < nsgs) { int j; @@ -326,9 +386,9 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len) min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN; - if (data_len < min_len || WARN_ON(!dma_len) || - WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) || - WARN_ON(dma_len & 0x3)) + if (data_len < min_len || !dma_len || + dma_len + MT_DMA_HDR_LEN > data_len || + (dma_len & 0x3)) return -EINVAL; return dma_len; } @@ -463,6 +523,7 @@ static int mt76u_alloc_rx(struct mt76_dev *dev) struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; int i, err, nsgs; + spin_lock_init(&q->rx_page_lock); spin_lock_init(&q->lock); q->entry = devm_kcalloc(dev->dev, MT_NUM_RX_ENTRIES, sizeof(*q->entry), @@ -494,10 +555,21 @@ static int mt76u_alloc_rx(struct mt76_dev *dev) static void mt76u_free_rx(struct mt76_dev *dev) { struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; + struct page *page; int i; for (i = 0; i < q->ndesc; i++) mt76u_buf_free(&q->entry[i].ubuf); + + spin_lock_bh(&q->rx_page_lock); + if (!q->rx_page.va) + goto out; + + page = virt_to_page(q->rx_page.va); + __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); + memset(&q->rx_page, 0, sizeof(q->rx_page)); +out: + spin_unlock_bh(&q->rx_page_lock); } static void mt76u_stop_rx(struct mt76_dev *dev) @@ -509,40 +581,6 @@ static void mt76u_stop_rx(struct mt76_dev *dev) usb_kill_urb(q->entry[i].ubuf.urb); } -int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) -{ - struct sk_buff *iter, *last = skb; - u32 info, pad; - - /* Buffer layout: - * | 4B | xfer len | pad | 4B | - * | TXINFO | pkt/cmd | zero pad to 4B | zero | - * - * length field of TXINFO should be set to 'xfer len'. - */ - info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) | - FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags; - put_unaligned_le32(info, skb_push(skb, sizeof(info))); - - pad = round_up(skb->len, 4) + 4 - skb->len; - skb_walk_frags(skb, iter) { - last = iter; - if (!iter->next) { - skb->data_len += pad; - skb->len += pad; - break; - } - } - - if (unlikely(pad)) { - if (__skb_pad(last, pad, true)) - return -ENOMEM; - __skb_put(last, pad); - } - return 0; -} -EXPORT_SYMBOL_GPL(mt76u_skb_dma_info); - static void mt76u_tx_tasklet(unsigned long data) { struct mt76_dev *dev = (struct mt76_dev *)data; @@ -715,7 +753,7 @@ static int mt76u_alloc_tx(struct mt76_dev *dev) q = &dev->q_tx[i]; spin_lock_init(&q->lock); INIT_LIST_HEAD(&q->swq); - q->hw_idx = q2hwq(i); + q->hw_idx = mt76_ac_to_hwq(i); q->entry = devm_kcalloc(dev->dev, MT_NUM_TX_ENTRIES, sizeof(*q->entry), @@ -822,6 +860,8 @@ int mt76u_init(struct mt76_dev *dev, .wr = mt76u_wr, .rmw = mt76u_rmw, .copy = mt76u_copy, + .wr_rp = mt76u_wr_rp, + .rd_rp = mt76u_rd_rp, }; struct mt76_usb *usb = &dev->usb; diff --git a/drivers/net/wireless/mediatek/mt76/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/usb_mcu.c index 070be803d463..036be4163e69 100644 --- a/drivers/net/wireless/mediatek/mt76/usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/usb_mcu.c @@ -14,32 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include <linux/firmware.h> - #include "mt76.h" -#include "dma.h" - -#define MT_CMD_HDR_LEN 4 - -#define MT_FCE_DMA_ADDR 0x0230 -#define MT_FCE_DMA_LEN 0x0234 - -#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8 - -struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len) -{ - struct sk_buff *skb; - - skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL); - if (!skb) - return NULL; - - skb_reserve(skb, MT_CMD_HDR_LEN); - skb_put_data(skb, data, len); - - return skb; -} -EXPORT_SYMBOL_GPL(mt76u_mcu_msg_alloc); void mt76u_mcu_complete_urb(struct urb *urb) { @@ -49,176 +24,6 @@ void mt76u_mcu_complete_urb(struct urb *urb) } EXPORT_SYMBOL_GPL(mt76u_mcu_complete_urb); -static int mt76u_mcu_wait_resp(struct mt76_dev *dev, u8 seq) -{ - struct mt76_usb *usb = &dev->usb; - struct mt76u_buf *buf = &usb->mcu.res; - int i, ret; - u32 rxfce; - - for (i = 0; i < 5; i++) { - if (!wait_for_completion_timeout(&usb->mcu.cmpl, - msecs_to_jiffies(300))) - continue; - - if (buf->urb->status) - return -EIO; - - rxfce = get_unaligned_le32(sg_virt(&buf->urb->sg[0])); - ret = mt76u_submit_buf(dev, USB_DIR_IN, - MT_EP_IN_CMD_RESP, - buf, GFP_KERNEL, - mt76u_mcu_complete_urb, - &usb->mcu.cmpl); - if (ret) - return ret; - - if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce)) - return 0; - - dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n", - FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce), - seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce)); - } - - dev_err(dev->dev, "error: %s timed out\n", __func__); - return -ETIMEDOUT; -} - -int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, - int cmd, bool wait_resp) -{ - struct usb_interface *intf = to_usb_interface(dev->dev); - struct usb_device *udev = interface_to_usbdev(intf); - struct mt76_usb *usb = &dev->usb; - unsigned int pipe; - int ret, sent; - u8 seq = 0; - u32 info; - - if (test_bit(MT76_REMOVED, &dev->state)) - return 0; - - mutex_lock(&usb->mcu.mutex); - - pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]); - if (wait_resp) { - seq = ++usb->mcu.msg_seq & 0xf; - if (!seq) - seq = ++usb->mcu.msg_seq & 0xf; - } - - info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) | - FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) | - MT_MCU_MSG_TYPE_CMD; - ret = mt76u_skb_dma_info(skb, CPU_TX_PORT, info); - if (ret) - goto out; - - ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500); - if (ret) - goto out; - - if (wait_resp) - ret = mt76u_mcu_wait_resp(dev, seq); - -out: - mutex_unlock(&usb->mcu.mutex); - - consume_skb(skb); - - return ret; -} -EXPORT_SYMBOL_GPL(mt76u_mcu_send_msg); - -void mt76u_mcu_fw_reset(struct mt76_dev *dev) -{ - mt76u_vendor_request(dev, MT_VEND_DEV_MODE, - USB_DIR_OUT | USB_TYPE_VENDOR, - 0x1, 0, NULL, 0); -} -EXPORT_SYMBOL_GPL(mt76u_mcu_fw_reset); - -static int -__mt76u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf, - const void *fw_data, int len, u32 dst_addr) -{ - u8 *data = sg_virt(&buf->urb->sg[0]); - DECLARE_COMPLETION_ONSTACK(cmpl); - __le32 info; - u32 val; - int err; - - info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | - FIELD_PREP(MT_MCU_MSG_LEN, len) | - MT_MCU_MSG_TYPE_CMD); - - memcpy(data, &info, sizeof(info)); - memcpy(data + sizeof(info), fw_data, len); - memset(data + sizeof(info) + len, 0, 4); - - mt76u_single_wr(dev, MT_VEND_WRITE_FCE, - MT_FCE_DMA_ADDR, dst_addr); - len = roundup(len, 4); - mt76u_single_wr(dev, MT_VEND_WRITE_FCE, - MT_FCE_DMA_LEN, len << 16); - - buf->len = MT_CMD_HDR_LEN + len + sizeof(info); - err = mt76u_submit_buf(dev, USB_DIR_OUT, - MT_EP_OUT_INBAND_CMD, - buf, GFP_KERNEL, - mt76u_mcu_complete_urb, &cmpl); - if (err < 0) - return err; - - if (!wait_for_completion_timeout(&cmpl, - msecs_to_jiffies(1000))) { - dev_err(dev->dev, "firmware upload timed out\n"); - usb_kill_urb(buf->urb); - return -ETIMEDOUT; - } - - if (mt76u_urb_error(buf->urb)) { - dev_err(dev->dev, "firmware upload failed: %d\n", - buf->urb->status); - return buf->urb->status; - } - - val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); - val++; - mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val); - - return 0; -} - -int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data, - int data_len, u32 max_payload, u32 offset) -{ - int err, len, pos = 0, max_len = max_payload - 8; - struct mt76u_buf buf; - - err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload, - GFP_KERNEL); - if (err < 0) - return err; - - while (data_len > 0) { - len = min_t(int, data_len, max_len); - err = __mt76u_mcu_fw_send_data(dev, &buf, data + pos, - len, offset + pos); - if (err < 0) - break; - - data_len -= len; - pos += len; - usleep_range(5000, 10000); - } - mt76u_buf_free(&buf); - - return err; -} -EXPORT_SYMBOL_GPL(mt76u_mcu_fw_send_data); - int mt76u_mcu_init_rx(struct mt76_dev *dev) { struct mt76_usb *usb = &dev->usb; @@ -240,3 +45,12 @@ int mt76u_mcu_init_rx(struct mt76_dev *dev) return err; } EXPORT_SYMBOL_GPL(mt76u_mcu_init_rx); + +void mt76u_mcu_deinit(struct mt76_dev *dev) +{ + struct mt76_usb *usb = &dev->usb; + + usb_kill_urb(usb->mcu.res.urb); + mt76u_buf_free(&usb->mcu.res); +} +EXPORT_SYMBOL_GPL(mt76u_mcu_deinit); diff --git a/drivers/net/wireless/quantenna/qtnfmac/Makefile b/drivers/net/wireless/quantenna/qtnfmac/Makefile index 97f760a3d599..17cd7adb4109 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/Makefile +++ b/drivers/net/wireless/quantenna/qtnfmac/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_QTNFMAC_PEARL_PCIE) += qtnfmac_pearl_pcie.o qtnfmac_pearl_pcie-objs += \ shm_ipc.o \ - pearl/pcie.o + pcie/pcie.o \ + pcie/pearl_pcie.o qtnfmac_pearl_pcie-$(CONFIG_DEBUG_FS) += debug.o diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h index 323e47cea1e2..528ca7f5e070 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/bus.h +++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h @@ -20,6 +20,9 @@ #include <linux/netdevice.h> #include <linux/workqueue.h> +#include "trans.h" +#include "core.h" + #define QTNF_MAX_MAC 3 enum qtnf_fw_state { @@ -57,10 +60,8 @@ struct qtnf_bus { struct qtnf_wmac *mac[QTNF_MAX_MAC]; struct qtnf_qlink_transport trans; struct qtnf_hw_info hw_info; - char fwname[32]; struct napi_struct mux_napi; struct net_device mux_dev; - struct completion firmware_init_complete; struct workqueue_struct *workqueue; struct work_struct fw_work; struct work_struct event_work; diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 4aa332f4646b..51b33ec78fac 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -141,8 +141,8 @@ qtnf_change_virtual_intf(struct wiphy *wiphy, ret = qtnf_cmd_send_change_intf_type(vif, type, mac_addr); if (ret) { - pr_err("VIF%u.%u: failed to change VIF type: %d\n", - vif->mac->macid, vif->vifid, ret); + pr_err("VIF%u.%u: failed to change type to %d\n", + vif->mac->macid, vif->vifid, type); return ret; } @@ -216,7 +216,6 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy, eth_zero_addr(vif->mac_addr); eth_zero_addr(vif->bssid); vif->bss_priority = QTNF_DEF_BSS_PRIORITY; - vif->sta_state = QTNF_STA_DISCONNECTED; memset(&vif->wdev, 0, sizeof(vif->wdev)); vif->wdev.wiphy = wiphy; vif->wdev.iftype = type; @@ -229,18 +228,22 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy, if (params) mac_addr = params->macaddr; - if (qtnf_cmd_send_add_intf(vif, type, mac_addr)) { - pr_err("VIF%u.%u: failed to add VIF\n", mac->macid, vif->vifid); + ret = qtnf_cmd_send_add_intf(vif, type, mac_addr); + if (ret) { + pr_err("VIF%u.%u: failed to add VIF %pM\n", + mac->macid, vif->vifid, mac_addr); goto err_cmd; } if (!is_valid_ether_addr(vif->mac_addr)) { pr_err("VIF%u.%u: FW reported bad MAC: %pM\n", mac->macid, vif->vifid, vif->mac_addr); + ret = -EINVAL; goto err_mac; } - if (qtnf_core_net_attach(mac, vif, name, name_assign_t)) { + ret = qtnf_core_net_attach(mac, vif, name, name_assign_t); + if (ret) { pr_err("VIF%u.%u: failed to attach netdev\n", mac->macid, vif->vifid); goto err_net; @@ -256,7 +259,7 @@ err_mac: err_cmd: vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; - return ERR_PTR(-EFAULT); + return ERR_PTR(ret); } static int qtnf_mgmt_set_appie(struct qtnf_vif *vif, @@ -335,12 +338,11 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev) qtnf_scan_done(vif->mac, true); ret = qtnf_cmd_send_stop_ap(vif); - if (ret) { + if (ret) pr_err("VIF%u.%u: failed to stop AP operation in FW\n", vif->mac->macid, vif->vifid); - netif_carrier_off(vif->netdev); - } + netif_carrier_off(vif->netdev); return ret; } @@ -478,19 +480,31 @@ qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev, const struct qtnf_sta_node *sta_node; int ret; - sta_node = qtnf_sta_list_lookup_index(&vif->sta_list, idx); + switch (vif->wdev.iftype) { + case NL80211_IFTYPE_STATION: + if (idx != 0 || !vif->wdev.current_bss) + return -ENOENT; - if (unlikely(!sta_node)) - return -ENOENT; + ether_addr_copy(mac, vif->bssid); + break; + case NL80211_IFTYPE_AP: + sta_node = qtnf_sta_list_lookup_index(&vif->sta_list, idx); + if (unlikely(!sta_node)) + return -ENOENT; - ether_addr_copy(mac, sta_node->mac_addr); + ether_addr_copy(mac, sta_node->mac_addr); + break; + default: + return -ENOTSUPP; + } - ret = qtnf_cmd_get_sta_info(vif, sta_node->mac_addr, sinfo); + ret = qtnf_cmd_get_sta_info(vif, mac, sinfo); - if (unlikely(ret == -ENOENT)) { - qtnf_sta_list_del(vif, mac); - cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL); - sinfo->filled = 0; + if (vif->wdev.iftype == NL80211_IFTYPE_AP) { + if (ret == -ENOENT) { + cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL); + sinfo->filled = 0; + } } sinfo->generation = vif->generation; @@ -521,9 +535,16 @@ static int qtnf_del_key(struct wiphy *wiphy, struct net_device *dev, int ret; ret = qtnf_cmd_send_del_key(vif, key_index, pairwise, mac_addr); - if (ret) - pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n", - vif->mac->macid, vif->vifid, key_index, pairwise); + if (ret) { + if (ret == -ENOENT) { + pr_debug("VIF%u.%u: key index %d out of bounds\n", + vif->mac->macid, vif->vifid, key_index); + } else { + pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n", + vif->mac->macid, vif->vifid, + key_index, pairwise); + } + } return ret; } @@ -590,6 +611,7 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev, if (ret) pr_err("VIF%u.%u: failed to delete STA %pM\n", vif->mac->macid, vif->vifid, params->mac); + return ret; } @@ -597,21 +619,25 @@ static int qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct qtnf_wmac *mac = wiphy_priv(wiphy); + int ret; cancel_delayed_work_sync(&mac->scan_timeout); mac->scan_req = request; - if (qtnf_cmd_send_scan(mac)) { + ret = qtnf_cmd_send_scan(mac); + if (ret) { pr_err("MAC%u: failed to start scan\n", mac->macid); mac->scan_req = NULL; - return -EFAULT; + goto out; } + pr_debug("MAC%u: scan started\n", mac->macid); queue_delayed_work(mac->bus->workqueue, &mac->scan_timeout, QTNF_SCAN_TIMEOUT_SEC * HZ); - return 0; +out: + return ret; } static int @@ -624,9 +650,6 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev, if (vif->wdev.iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; - if (vif->sta_state != QTNF_STA_DISCONNECTED) - return -EBUSY; - if (sme->bssid) ether_addr_copy(vif->bssid, sme->bssid); else @@ -634,13 +657,13 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev, ret = qtnf_cmd_send_connect(vif, sme); if (ret) { - pr_err("VIF%u.%u: failed to connect\n", vif->mac->macid, - vif->vifid); - return ret; + pr_err("VIF%u.%u: failed to connect\n", + vif->mac->macid, vif->vifid); + goto out; } - vif->sta_state = QTNF_STA_CONNECTING; - return 0; +out: + return ret; } static int @@ -662,22 +685,18 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, goto out; } - qtnf_scan_done(mac, true); - - if (vif->sta_state == QTNF_STA_DISCONNECTED) - goto out; - ret = qtnf_cmd_send_disconnect(vif, reason_code); - if (ret) { - pr_err("VIF%u.%u: failed to disconnect\n", mac->macid, - vif->vifid); - goto out; + if (ret) + pr_err("VIF%u.%u: failed to disconnect\n", + mac->macid, vif->vifid); + + if (vif->wdev.current_bss) { + netif_carrier_off(vif->netdev); + cfg80211_disconnected(vif->netdev, reason_code, + NULL, 0, true, GFP_KERNEL); } out: - if (vif->sta_state == QTNF_STA_CONNECTING) - vif->sta_state = QTNF_STA_DISCONNECTED; - return ret; } @@ -691,11 +710,8 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev, const struct cfg80211_chan_def *chandef = &wdev->chandef; struct ieee80211_channel *chan; struct qtnf_chan_stats stats; - struct qtnf_vif *vif; int ret; - vif = qtnf_netdev_get_priv(dev); - sband = wiphy->bands[NL80211_BAND_2GHZ]; if (sband && idx >= sband->n_channels) { idx -= sband->n_channels; @@ -750,7 +766,6 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev, default: pr_debug("failed to get chan(%d) stats from card\n", chan->hw_value); - ret = -EINVAL; break; } @@ -773,6 +788,7 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, ret = qtnf_cmd_get_channel(vif, chandef); if (ret) { pr_err("%s: failed to get channel: %d\n", ndev->name, ret); + ret = -ENODATA; goto out; } @@ -782,6 +798,7 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, chandef->center_freq1, chandef->center_freq2, chandef->width); ret = -ENODATA; + goto out; } out: @@ -851,10 +868,8 @@ static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, ret = qtnf_cmd_send_pm_set(vif, enabled ? QLINK_PM_AUTO_STANDBY : QLINK_PM_OFF, timeout); - if (ret) { + if (ret) pr_err("%s: failed to set PM mode ret=%d\n", dev->name, ret); - return ret; - } return ret; } @@ -974,9 +989,16 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in, ret = qtnf_cmd_reg_notify(bus, req); if (ret) { - if (ret != -EOPNOTSUPP && ret != -EALREADY) + if (ret == -EOPNOTSUPP) { + pr_warn("reg update not supported\n"); + } else if (ret == -EALREADY) { + pr_info("regulatory domain is already set to %c%c", + req->alpha2[0], req->alpha2[1]); + } else { pr_err("failed to update reg domain to %c%c\n", req->alpha2[0], req->alpha2[1]); + } + return; } @@ -1091,6 +1113,10 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD); + if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_DWELL) + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_SET_SCAN_DWELL); + wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2; @@ -1109,6 +1135,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR) wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + if (!(hw_info->hw_capab & QLINK_HW_CAPAB_OBSS_SCAN)) + wiphy->features |= NL80211_FEATURE_NEED_OBSS_SCAN; + #ifdef CONFIG_PM if (macinfo->wowlan) wiphy->wowlan = macinfo->wowlan; @@ -1123,6 +1152,15 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; } + if (mac->macinfo.extended_capabilities_len) { + wiphy->extended_capabilities = + mac->macinfo.extended_capabilities; + wiphy->extended_capabilities_mask = + mac->macinfo.extended_capabilities_mask; + wiphy->extended_capabilities_len = + mac->macinfo.extended_capabilities_len; + } + strlcpy(wiphy->fw_version, hw_info->fw_version, sizeof(wiphy->fw_version)); wiphy->hw_version = hw_info->hw_version; @@ -1146,7 +1184,8 @@ void qtnf_netdev_updown(struct net_device *ndev, bool up) struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); if (qtnf_cmd_send_updown_intf(vif, up)) - pr_err("failed to send up/down command to FW\n"); + pr_err("failed to send %s command to VIF%u.%u\n", + up ? "UP" : "DOWN", vif->mac->macid, vif->vifid); } void qtnf_virtual_intf_cleanup(struct net_device *ndev) @@ -1154,57 +1193,20 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev) struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); struct qtnf_wmac *mac = wiphy_priv(vif->wdev.wiphy); - if (vif->wdev.iftype == NL80211_IFTYPE_STATION) { - switch (vif->sta_state) { - case QTNF_STA_DISCONNECTED: - break; - case QTNF_STA_CONNECTING: - cfg80211_connect_result(vif->netdev, - vif->bssid, NULL, 0, - NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - GFP_KERNEL); - qtnf_disconnect(vif->wdev.wiphy, ndev, - WLAN_REASON_DEAUTH_LEAVING); - break; - case QTNF_STA_CONNECTED: - cfg80211_disconnected(vif->netdev, - WLAN_REASON_DEAUTH_LEAVING, - NULL, 0, 1, GFP_KERNEL); - qtnf_disconnect(vif->wdev.wiphy, ndev, - WLAN_REASON_DEAUTH_LEAVING); - break; - } - - vif->sta_state = QTNF_STA_DISCONNECTED; - } + if (vif->wdev.iftype == NL80211_IFTYPE_STATION) + qtnf_disconnect(vif->wdev.wiphy, ndev, + WLAN_REASON_DEAUTH_LEAVING); qtnf_scan_done(mac, true); } void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif) { - if (vif->wdev.iftype == NL80211_IFTYPE_STATION) { - switch (vif->sta_state) { - case QTNF_STA_CONNECTING: - cfg80211_connect_result(vif->netdev, - vif->bssid, NULL, 0, - NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - GFP_KERNEL); - break; - case QTNF_STA_CONNECTED: - cfg80211_disconnected(vif->netdev, - WLAN_REASON_DEAUTH_LEAVING, - NULL, 0, 1, GFP_KERNEL); - break; - case QTNF_STA_DISCONNECTED: - break; - } - } + if (vif->wdev.iftype == NL80211_IFTYPE_STATION) + cfg80211_disconnected(vif->netdev, WLAN_REASON_DEAUTH_LEAVING, + NULL, 0, 1, GFP_KERNEL); cfg80211_shutdown_all_interfaces(vif->wdev.wiphy); - vif->sta_state = QTNF_STA_DISCONNECTED; } void qtnf_band_init_rates(struct ieee80211_supported_band *band) diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index ae9e77300533..bfdc1ad30c13 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -80,7 +80,6 @@ static int qtnf_cmd_resp_result_decode(enum qlink_cmd_result qcode) static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, struct sk_buff *cmd_skb, struct sk_buff **response_skb, - u16 *result_code, size_t const_resp_size, size_t *var_resp_size) { @@ -88,7 +87,8 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, const struct qlink_resp *resp; struct sk_buff *resp_skb = NULL; u16 cmd_id; - u8 mac_id, vif_id; + u8 mac_id; + u8 vif_id; int ret; cmd = (struct qlink_cmd *)cmd_skb->data; @@ -97,8 +97,11 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, vif_id = cmd->vifid; cmd->mhdr.len = cpu_to_le16(cmd_skb->len); - if (unlikely(bus->fw_state != QTNF_FW_STATE_ACTIVE && - le16_to_cpu(cmd->cmd_id) != QLINK_CMD_FW_INIT)) { + pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id, + le16_to_cpu(cmd->cmd_id)); + + if (bus->fw_state != QTNF_FW_STATE_ACTIVE && + le16_to_cpu(cmd->cmd_id) != QLINK_CMD_FW_INIT) { pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n", mac_id, vif_id, le16_to_cpu(cmd->cmd_id), bus->fw_state); @@ -106,24 +109,16 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, return -ENODEV; } - pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id, - le16_to_cpu(cmd->cmd_id)); - ret = qtnf_trans_send_cmd_with_resp(bus, cmd_skb, &resp_skb); - - if (unlikely(ret)) + if (ret) goto out; resp = (const struct qlink_resp *)resp_skb->data; ret = qtnf_cmd_check_reply_header(resp, cmd_id, mac_id, vif_id, const_resp_size); - - if (unlikely(ret)) + if (ret) goto out; - if (likely(result_code)) - *result_code = le16_to_cpu(resp->result); - /* Return length of variable part of response */ if (response_skb && var_resp_size) *var_resp_size = le16_to_cpu(resp->mhdr.len) - const_resp_size; @@ -134,14 +129,18 @@ out: else consume_skb(resp_skb); + if (!ret && resp) + return qtnf_cmd_resp_result_decode(le16_to_cpu(resp->result)); + + pr_warn("VIF%u.%u: cmd 0x%.4X failed: %d\n", + mac_id, vif_id, le16_to_cpu(cmd->cmd_id), ret); + return ret; } -static inline int qtnf_cmd_send(struct qtnf_bus *bus, - struct sk_buff *cmd_skb, - u16 *result_code) +static inline int qtnf_cmd_send(struct qtnf_bus *bus, struct sk_buff *cmd_skb) { - return qtnf_cmd_send_with_reply(bus, cmd_skb, NULL, result_code, + return qtnf_cmd_send_with_reply(bus, cmd_skb, NULL, sizeof(struct qlink_resp), NULL); } @@ -228,7 +227,6 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif, struct sk_buff *cmd_skb; struct qlink_cmd_start_ap *cmd; struct qlink_auth_encr *aen; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; int i; @@ -329,30 +327,21 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif, } qtnf_bus_lock(vif->mac->bus); - - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } netif_carrier_on(vif->netdev); out: qtnf_bus_unlock(vif->mac->bus); + return ret; } int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif) { struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -362,23 +351,13 @@ int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); - - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } - - netif_carrier_off(vif->netdev); out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -386,7 +365,6 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg) { struct sk_buff *cmd_skb; struct qlink_cmd_mgmt_frame_register *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -401,20 +379,13 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg) cmd->frame_type = cpu_to_le16(frame_type); cmd->do_register = reg; - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -423,7 +394,6 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags, { struct sk_buff *cmd_skb; struct qlink_cmd_mgmt_frame_tx *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; if (sizeof(*cmd) + len > QTNF_MAX_CMD_BUF_SIZE) { @@ -448,20 +418,13 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags, if (len && buf) qtnf_cmd_skb_put_buffer(cmd_skb, buf, len); - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; - goto out; - } - out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -469,7 +432,6 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type, const u8 *buf, size_t len) { struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; if (len > QTNF_MAX_CMD_BUF_SIZE) { @@ -487,21 +449,13 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type, qtnf_cmd_tlv_ie_set_add(cmd_skb, frame_type, buf, len); qtnf_bus_lock(vif->mac->bus); - - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u frame %u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, frame_type, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -544,6 +498,9 @@ qtnf_sta_info_parse_rate(struct rate_info *rate_dst, rate_dst->flags |= RATE_INFO_FLAGS_MCS; else if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_VHT_MCS) rate_dst->flags |= RATE_INFO_FLAGS_VHT_MCS; + + if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_SHORT_GI) + rate_dst->flags |= RATE_INFO_FLAGS_SHORT_GI; } static void @@ -730,7 +687,6 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac, struct qlink_cmd_get_sta_info *cmd; const struct qlink_resp_get_sta_info *resp; size_t var_resp_len; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -745,31 +701,13 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac, ether_addr_copy(cmd->sta_addr, sta_mac); ret = qtnf_cmd_send_with_reply(vif->mac->bus, cmd_skb, &resp_skb, - &res_code, sizeof(*resp), - &var_resp_len); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - switch (res_code) { - case QLINK_CMD_RESULT_ENOTFOUND: - pr_warn("VIF%u.%u: %pM STA not found\n", - vif->mac->macid, vif->vifid, sta_mac); - ret = -ENOENT; - break; - default: - pr_err("VIF%u.%u: can't get info for %pM: %u\n", - vif->mac->macid, vif->vifid, sta_mac, res_code); - ret = -EFAULT; - break; - } + sizeof(*resp), &var_resp_len); + if (ret) goto out; - } resp = (const struct qlink_resp_get_sta_info *)resp_skb->data; - if (unlikely(!ether_addr_equal(sta_mac, resp->sta_addr))) { + if (!ether_addr_equal(sta_mac, resp->sta_addr)) { pr_err("VIF%u.%u: wrong mac in reply: %pM != %pM\n", vif->mac->macid, vif->vifid, resp->sta_addr, sta_mac); ret = -EINVAL; @@ -795,7 +733,6 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif, struct sk_buff *cmd_skb, *resp_skb = NULL; struct qlink_cmd_manage_intf *cmd; const struct qlink_resp_manage_intf *resp; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -828,17 +765,9 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif, eth_zero_addr(cmd->intf_info.mac_addr); ret = qtnf_cmd_send_with_reply(vif->mac->bus, cmd_skb, &resp_skb, - &res_code, sizeof(*resp), NULL); - - if (unlikely(ret)) - goto out; - - ret = qtnf_cmd_resp_result_decode(res_code); - if (ret) { - pr_err("VIF%u.%u: CMD %d failed: %u\n", vif->mac->macid, - vif->vifid, cmd_type, res_code); + sizeof(*resp), NULL); + if (ret) goto out; - } resp = (const struct qlink_resp_manage_intf *)resp_skb->data; ether_addr_copy(vif->mac_addr, resp->intf_info.mac_addr); @@ -868,7 +797,6 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif) { struct sk_buff *cmd_skb; struct qlink_cmd_manage_intf *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -897,17 +825,9 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif) eth_zero_addr(cmd->intf_info.mac_addr); - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(vif->mac->bus); @@ -1353,8 +1273,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, ext_capa_mask = NULL; } - kfree(mac->macinfo.extended_capabilities); - kfree(mac->macinfo.extended_capabilities_mask); + qtnf_mac_ext_caps_free(mac); mac->macinfo.extended_capabilities = ext_capa; mac->macinfo.extended_capabilities_mask = ext_capa_mask; mac->macinfo.extended_capabilities_len = ext_capa_len; @@ -1732,7 +1651,6 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac) struct sk_buff *cmd_skb, *resp_skb = NULL; const struct qlink_resp_get_mac_info *resp; size_t var_data_len; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, @@ -1742,18 +1660,11 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac) return -ENOMEM; qtnf_bus_lock(mac->bus); - - ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code, + ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, sizeof(*resp), &var_data_len); - if (unlikely(ret)) + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code); - ret = -EFAULT; - goto out; - } - resp = (const struct qlink_resp_get_mac_info *)resp_skb->data; qtnf_cmd_resp_proc_mac_info(mac, resp); ret = qtnf_parse_variable_mac_info(mac, resp->var_info, var_data_len); @@ -1769,7 +1680,6 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus) { struct sk_buff *cmd_skb, *resp_skb = NULL; const struct qlink_resp_get_hw_info *resp; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; size_t info_len; @@ -1780,18 +1690,10 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus) return -ENOMEM; qtnf_bus_lock(bus); - - ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, &res_code, + ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, sizeof(*resp), &info_len); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("cmd exec failed: 0x%.4X\n", res_code); - ret = -EFAULT; + if (ret) goto out; - } resp = (const struct qlink_resp_get_hw_info *)resp_skb->data; ret = qtnf_cmd_resp_proc_hw_info(bus, resp, info_len); @@ -1810,7 +1712,6 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac, size_t info_len; struct qlink_cmd_band_info_get *cmd; struct qlink_resp_band_info_get *resp; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; u8 qband; @@ -1838,18 +1739,10 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac, cmd->band = qband; qtnf_bus_lock(mac->bus); - - ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code, + ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, sizeof(*resp), &info_len); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code); - ret = -EFAULT; + if (ret) goto out; - } resp = (struct qlink_resp_band_info_get *)resp_skb->data; if (resp->band != qband) { @@ -1873,7 +1766,6 @@ int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac) struct sk_buff *cmd_skb, *resp_skb = NULL; size_t response_size; struct qlink_resp_phy_params *resp; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0, @@ -1883,19 +1775,11 @@ int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac) return -ENOMEM; qtnf_bus_lock(mac->bus); - - ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code, + ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, sizeof(*resp), &response_size); - - if (unlikely(ret)) + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code); - ret = -EFAULT; - goto out; - } - resp = (struct qlink_resp_phy_params *)resp_skb->data; ret = qtnf_cmd_resp_proc_phy_params(mac, resp->info, response_size); @@ -1910,7 +1794,6 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed) { struct wiphy *wiphy = priv_to_wiphy(mac); struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0, @@ -1931,26 +1814,19 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed) qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS, wiphy->coverage_class); - ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(mac->bus); + return ret; } int qtnf_cmd_send_init_fw(struct qtnf_bus *bus) { struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD, @@ -1960,20 +1836,13 @@ int qtnf_cmd_send_init_fw(struct qtnf_bus *bus) return -ENOMEM; qtnf_bus_lock(bus); - - ret = qtnf_cmd_send(bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("cmd exec failed: 0x%.4X\n", res_code); - ret = -EFAULT; - goto out; - } - out: qtnf_bus_unlock(bus); + return ret; } @@ -1988,9 +1857,7 @@ void qtnf_cmd_send_deinit_fw(struct qtnf_bus *bus) return; qtnf_bus_lock(bus); - - qtnf_cmd_send(bus, cmd_skb, NULL); - + qtnf_cmd_send(bus, cmd_skb); qtnf_bus_unlock(bus); } @@ -1999,7 +1866,6 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise, { struct sk_buff *cmd_skb; struct qlink_cmd_add_key *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2031,19 +1897,13 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise, params->seq, params->seq_len); - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - if (unlikely(ret)) + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", - vif->mac->macid, vif->vifid, res_code); - ret = -EFAULT; - goto out; - } - out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2052,7 +1912,6 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise, { struct sk_buff *cmd_skb; struct qlink_cmd_del_key *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2072,19 +1931,14 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise, cmd->key_index = key_index; cmd->pairwise = pairwise; - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - if (unlikely(ret)) - goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", - vif->mac->macid, vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2093,7 +1947,6 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index, { struct sk_buff *cmd_skb; struct qlink_cmd_set_def_key *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2108,19 +1961,14 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index, cmd->key_index = key_index; cmd->unicast = unicast; cmd->multicast = multicast; - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - if (unlikely(ret)) - goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2128,7 +1976,6 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index) { struct sk_buff *cmd_skb; struct qlink_cmd_set_def_mgmt_key *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2141,19 +1988,14 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index) cmd = (struct qlink_cmd_set_def_mgmt_key *)cmd_skb->data; cmd->key_index = key_index; - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - if (unlikely(ret)) - goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2183,7 +2025,6 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, { struct sk_buff *cmd_skb; struct qlink_cmd_change_sta *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2214,19 +2055,13 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, goto out; } - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2235,7 +2070,6 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif, { struct sk_buff *cmd_skb; struct qlink_cmd_del_sta *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2256,19 +2090,13 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif, cmd->subtype = params->subtype; cmd->reason_code = cpu_to_le16(params->reason_code); - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - if (unlikely(ret)) + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; - goto out; - } - out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2312,7 +2140,6 @@ static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb, int qtnf_cmd_send_scan(struct qtnf_wmac *mac) { struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; struct ieee80211_channel *sc; struct cfg80211_scan_request *scan_req = mac->scan_req; int n_channels; @@ -2370,20 +2197,28 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac) scan_req->mac_addr_mask); } - ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code); + if (scan_req->flags & NL80211_SCAN_FLAG_FLUSH) { + pr_debug("MAC%u: flush cache before scan\n", mac->macid); - if (unlikely(ret)) - goto out; + qtnf_cmd_skb_put_tlv_tag(cmd_skb, QTN_TLV_ID_SCAN_FLUSH); + } - pr_debug("MAC%u: scan started\n", mac->macid); + if (scan_req->duration) { + pr_debug("MAC%u: %s scan duration %u\n", mac->macid, + scan_req->duration_mandatory ? "mandatory" : "max", + scan_req->duration); - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code); - ret = -EFAULT; - goto out; + qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_SCAN_DWELL, + scan_req->duration); } + + ret = qtnf_cmd_send(mac->bus, cmd_skb); + if (ret) + goto out; + out: qtnf_bus_unlock(mac->bus); + return ret; } @@ -2393,7 +2228,6 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif, struct sk_buff *cmd_skb; struct qlink_cmd_connect *cmd; struct qlink_auth_encr *aen; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; int i; u32 connect_flags = 0; @@ -2474,20 +2308,13 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif, qtnf_cmd_channel_tlv_add(cmd_skb, sme->channel); qtnf_bus_lock(vif->mac->bus); - - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; - goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2495,7 +2322,6 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code) { struct sk_buff *cmd_skb; struct qlink_cmd_disconnect *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2509,19 +2335,13 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code) cmd = (struct qlink_cmd_disconnect *)cmd_skb->data; cmd->reason = cpu_to_le16(reason_code); - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; - goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2529,7 +2349,6 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up) { struct sk_buff *cmd_skb; struct qlink_cmd_updown *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2542,20 +2361,13 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up) cmd->if_up = !!up; qtnf_bus_lock(vif->mac->bus); - - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; - goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2563,7 +2375,6 @@ int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req) { struct sk_buff *cmd_skb; int ret; - u16 res_code; struct qlink_cmd_reg_notify *cmd; cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD, @@ -2604,29 +2415,10 @@ int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req) } qtnf_bus_lock(bus); - - ret = qtnf_cmd_send(bus, cmd_skb, &res_code); + ret = qtnf_cmd_send(bus, cmd_skb); if (ret) goto out; - switch (res_code) { - case QLINK_CMD_RESULT_ENOTSUPP: - pr_warn("reg update not supported\n"); - ret = -EOPNOTSUPP; - break; - case QLINK_CMD_RESULT_EALREADY: - pr_info("regulatory domain is already set to %c%c", - req->alpha2[0], req->alpha2[1]); - ret = -EALREADY; - break; - case QLINK_CMD_RESULT_OK: - ret = 0; - break; - default: - ret = -EFAULT; - break; - } - out: qtnf_bus_unlock(bus); @@ -2640,7 +2432,6 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, struct qlink_cmd_get_chan_stats *cmd; struct qlink_resp_get_chan_stats *resp; size_t var_data_len; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, @@ -2654,25 +2445,10 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, cmd = (struct qlink_cmd_get_chan_stats *)cmd_skb->data; cmd->channel = cpu_to_le16(channel); - ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code, + ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, sizeof(*resp), &var_data_len); - if (unlikely(ret)) { - qtnf_bus_unlock(mac->bus); - return ret; - } - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - switch (res_code) { - case QLINK_CMD_RESULT_ENOTFOUND: - ret = -ENOENT; - break; - default: - pr_err("cmd exec failed: 0x%.4X\n", res_code); - ret = -EFAULT; - break; - } + if (ret) goto out; - } resp = (struct qlink_resp_get_chan_stats *)resp_skb->data; ret = qtnf_cmd_resp_proc_chan_stat_info(stats, resp->info, @@ -2681,6 +2457,7 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, out: qtnf_bus_unlock(mac->bus); consume_skb(resp_skb); + return ret; } @@ -2690,7 +2467,6 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif, struct qtnf_wmac *mac = vif->mac; struct qlink_cmd_chan_switch *cmd; struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, vif->vifid, @@ -2707,32 +2483,13 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif, cmd->block_tx = params->block_tx; cmd->beacon_count = params->count; - ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(mac->bus, cmd_skb); + if (ret) goto out; - switch (res_code) { - case QLINK_CMD_RESULT_OK: - ret = 0; - break; - case QLINK_CMD_RESULT_ENOTFOUND: - ret = -ENOENT; - break; - case QLINK_CMD_RESULT_ENOTSUPP: - ret = -EOPNOTSUPP; - break; - case QLINK_CMD_RESULT_EALREADY: - ret = -EALREADY; - break; - case QLINK_CMD_RESULT_INVALID: - default: - ret = -EFAULT; - break; - } - out: qtnf_bus_unlock(mac->bus); + return ret; } @@ -2742,7 +2499,6 @@ int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef) const struct qlink_resp_channel_get *resp; struct sk_buff *cmd_skb; struct sk_buff *resp_skb = NULL; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2752,25 +2508,18 @@ int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef) return -ENOMEM; qtnf_bus_lock(bus); - - ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, &res_code, + ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, sizeof(*resp), NULL); - - qtnf_bus_unlock(bus); - - if (unlikely(ret)) + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - ret = -ENODATA; - goto out; - } - resp = (const struct qlink_resp_channel_get *)resp_skb->data; qlink_chandef_q2cfg(priv_to_wiphy(vif->mac), &resp->chan, chdef); out: + qtnf_bus_unlock(bus); consume_skb(resp_skb); + return ret; } @@ -2782,7 +2531,6 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif, struct sk_buff *cmd_skb; struct qlink_cmd_start_cac *cmd; int ret; - u16 res_code; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_START_CAC, @@ -2795,19 +2543,12 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif, qlink_chandef_cfg2q(chdef, &cmd->chan); qtnf_bus_lock(bus); - ret = qtnf_cmd_send(bus, cmd_skb, &res_code); - qtnf_bus_unlock(bus); - + ret = qtnf_cmd_send(bus, cmd_skb); if (ret) - return ret; + goto out; - switch (res_code) { - case QLINK_CMD_RESULT_OK: - break; - default: - ret = -EOPNOTSUPP; - break; - } +out: + qtnf_bus_unlock(bus); return ret; } @@ -2819,7 +2560,6 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif, struct sk_buff *cmd_skb; struct qlink_tlv_hdr *tlv; size_t acl_size = qtnf_cmd_acl_data_size(params); - u16 res_code; int ret; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2834,22 +2574,12 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif, qlink_acl_data_cfg2q(params, (struct qlink_acl_data *)tlv->val); qtnf_bus_lock(bus); - ret = qtnf_cmd_send(bus, cmd_skb, &res_code); - qtnf_bus_unlock(bus); - - if (unlikely(ret)) - return ret; + ret = qtnf_cmd_send(bus, cmd_skb); + if (ret) + goto out; - switch (res_code) { - case QLINK_CMD_RESULT_OK: - break; - case QLINK_CMD_RESULT_INVALID: - ret = -EINVAL; - break; - default: - ret = -EOPNOTSUPP; - break; - } +out: + qtnf_bus_unlock(bus); return ret; } @@ -2858,7 +2588,6 @@ int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout) { struct qtnf_bus *bus = vif->mac->bus; struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; struct qlink_cmd_pm_set *cmd; int ret = 0; @@ -2873,18 +2602,13 @@ int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout) qtnf_bus_lock(bus); - ret = qtnf_cmd_send(bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("cmd exec failed: 0x%.4X\n", res_code); - ret = -EFAULT; - } - out: qtnf_bus_unlock(bus); + return ret; } @@ -2893,7 +2617,6 @@ int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif, { struct qtnf_bus *bus = vif->mac->bus; struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; struct qlink_cmd_wowlan_set *cmd; u32 triggers = 0; int count = 0; @@ -2929,16 +2652,10 @@ int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif, cmd->triggers = cpu_to_le32(triggers); - ret = qtnf_cmd_send(bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("cmd exec failed: 0x%.4X\n", res_code); - ret = -EFAULT; - } - out: qtnf_bus_unlock(bus); return ret; diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index 19abbc4e23e0..5d18a4a917c9 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -304,6 +304,19 @@ void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac) } } +void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac) +{ + if (mac->macinfo.extended_capabilities_len) { + kfree(mac->macinfo.extended_capabilities); + mac->macinfo.extended_capabilities = NULL; + + kfree(mac->macinfo.extended_capabilities_mask); + mac->macinfo.extended_capabilities_mask = NULL; + + mac->macinfo.extended_capabilities_len = 0; + } +} + static void qtnf_vif_reset_handler(struct work_struct *work) { struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work); @@ -370,6 +383,7 @@ static void qtnf_mac_scan_timeout(struct work_struct *work) static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus, unsigned int macid) { + struct qtnf_vif *vif; struct wiphy *wiphy; struct qtnf_wmac *mac; unsigned int i; @@ -382,18 +396,20 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus, mac->macid = macid; mac->bus = bus; + mutex_init(&mac->mac_lock); + INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout); for (i = 0; i < QTNF_MAX_INTF; i++) { - memset(&mac->iflist[i], 0, sizeof(struct qtnf_vif)); - mac->iflist[i].wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; - mac->iflist[i].mac = mac; - mac->iflist[i].vifid = i; - qtnf_sta_list_init(&mac->iflist[i].sta_list); - mutex_init(&mac->mac_lock); - INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout); - mac->iflist[i].stats64 = - netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!mac->iflist[i].stats64) + vif = &mac->iflist[i]; + + memset(vif, 0, sizeof(*vif)); + vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; + vif->mac = mac; + vif->vifid = i; + qtnf_sta_list_init(&vif->sta_list); + + vif->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!vif->stats64) pr_warn("VIF%u.%u: per cpu stats allocation failed\n", macid, i); } @@ -493,8 +509,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid) } qtnf_mac_iface_comb_free(mac); - kfree(mac->macinfo.extended_capabilities); - kfree(mac->macinfo.extended_capabilities_mask); + qtnf_mac_ext_caps_free(mac); kfree(mac->macinfo.wowlan); wiphy_free(wiphy); bus->mac[macid] = NULL; diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index a1e338a1f055..293055049caa 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -64,12 +64,6 @@ struct qtnf_sta_list { atomic_t size; }; -enum qtnf_sta_state { - QTNF_STA_DISCONNECTED, - QTNF_STA_CONNECTING, - QTNF_STA_CONNECTED -}; - struct qtnf_vif { struct wireless_dev wdev; u8 bssid[ETH_ALEN]; @@ -77,7 +71,6 @@ struct qtnf_vif { u8 vifid; u8 bss_priority; u8 bss_status; - enum qtnf_sta_state sta_state; u16 mgmt_frames_bitmask; struct net_device *netdev; struct qtnf_wmac *mac; @@ -151,6 +144,7 @@ struct qtnf_hw_info { struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac); struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac); void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac); +void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac); struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus); int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv, const char *name, unsigned char name_assign_type); diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 68da81bec4e9..8b542b431b75 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -171,24 +171,14 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif, return -EPROTO; } - if (vif->sta_state != QTNF_STA_CONNECTING) { - pr_err("VIF%u.%u: BSS_JOIN event when STA is not connecting\n", - vif->mac->macid, vif->vifid); - return -EPROTO; - } - pr_debug("VIF%u.%u: BSSID:%pM\n", vif->mac->macid, vif->vifid, join_info->bssid); cfg80211_connect_result(vif->netdev, join_info->bssid, NULL, 0, NULL, 0, le16_to_cpu(join_info->status), GFP_KERNEL); - if (le16_to_cpu(join_info->status) == WLAN_STATUS_SUCCESS) { - vif->sta_state = QTNF_STA_CONNECTED; + if (le16_to_cpu(join_info->status) == WLAN_STATUS_SUCCESS) netif_carrier_on(vif->netdev); - } else { - vif->sta_state = QTNF_STA_DISCONNECTED; - } return 0; } @@ -211,16 +201,10 @@ qtnf_event_handle_bss_leave(struct qtnf_vif *vif, return -EPROTO; } - if (vif->sta_state != QTNF_STA_CONNECTED) - pr_warn("VIF%u.%u: BSS_LEAVE event when STA is not connected\n", - vif->mac->macid, vif->vifid); - pr_debug("VIF%u.%u: disconnected\n", vif->mac->macid, vif->vifid); cfg80211_disconnected(vif->netdev, le16_to_cpu(leave_info->reason), NULL, 0, 0, GFP_KERNEL); - - vif->sta_state = QTNF_STA_DISCONNECTED; netif_carrier_off(vif->netdev); return 0; diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c new file mode 100644 index 000000000000..16795dbe475b --- /dev/null +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c @@ -0,0 +1,392 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018 Quantenna Communications, Inc. All rights reserved. */ + +#include <linux/printk.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/netdevice.h> +#include <linux/seq_file.h> +#include <linux/workqueue.h> +#include <linux/completion.h> + +#include "pcie_priv.h" +#include "bus.h" +#include "shm_ipc.h" +#include "core.h" +#include "debug.h" + +#undef pr_fmt +#define pr_fmt(fmt) "qtnf_pcie: %s: " fmt, __func__ + +#define QTN_SYSCTL_BAR 0 +#define QTN_SHMEM_BAR 2 +#define QTN_DMA_BAR 3 + +int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb) +{ + struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + int ret; + + ret = qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len); + + if (ret == -ETIMEDOUT) { + pr_err("EP firmware is dead\n"); + bus->fw_state = QTNF_FW_STATE_EP_DEAD; + } + + return ret; +} + +int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv) +{ + struct sk_buff **vaddr; + int len; + + len = priv->tx_bd_num * sizeof(*priv->tx_skb) + + priv->rx_bd_num * sizeof(*priv->rx_skb); + vaddr = devm_kzalloc(&priv->pdev->dev, len, GFP_KERNEL); + + if (!vaddr) + return -ENOMEM; + + priv->tx_skb = vaddr; + + vaddr += priv->tx_bd_num; + priv->rx_skb = vaddr; + + return 0; +} + +void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus) +{ + struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + struct pci_dev *pdev = priv->pdev; + + get_device(&pdev->dev); + schedule_work(&bus->fw_work); +} + +static int qtnf_dbg_mps_show(struct seq_file *s, void *data) +{ + struct qtnf_bus *bus = dev_get_drvdata(s->private); + struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + + seq_printf(s, "%d\n", priv->mps); + + return 0; +} + +static int qtnf_dbg_msi_show(struct seq_file *s, void *data) +{ + struct qtnf_bus *bus = dev_get_drvdata(s->private); + struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + + seq_printf(s, "%u\n", priv->msi_enabled); + + return 0; +} + +static int qtnf_dbg_shm_stats(struct seq_file *s, void *data) +{ + struct qtnf_bus *bus = dev_get_drvdata(s->private); + struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + + seq_printf(s, "shm_ipc_ep_in.tx_packet_count(%zu)\n", + priv->shm_ipc_ep_in.tx_packet_count); + seq_printf(s, "shm_ipc_ep_in.rx_packet_count(%zu)\n", + priv->shm_ipc_ep_in.rx_packet_count); + seq_printf(s, "shm_ipc_ep_out.tx_packet_count(%zu)\n", + priv->shm_ipc_ep_out.tx_timeout_count); + seq_printf(s, "shm_ipc_ep_out.rx_packet_count(%zu)\n", + priv->shm_ipc_ep_out.rx_packet_count); + + return 0; +} + +void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success, + const char *drv_name) +{ + struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + struct pci_dev *pdev = priv->pdev; + int ret; + + if (boot_success) { + bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE; + + ret = qtnf_core_attach(bus); + if (ret) { + pr_err("failed to attach core\n"); + boot_success = false; + } + } + + if (boot_success) { + qtnf_debugfs_init(bus, drv_name); + qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show); + qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show); + qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats); + } else { + bus->fw_state = QTNF_FW_STATE_DETACHED; + } + + put_device(&pdev->dev); +} + +static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv) +{ + struct pci_dev *pdev = priv->pdev; + struct pci_dev *parent; + int mps_p, mps_o, mps_m, mps; + int ret; + + /* current mps */ + mps_o = pcie_get_mps(pdev); + + /* maximum supported mps */ + mps_m = 128 << pdev->pcie_mpss; + + /* suggested new mps value */ + mps = mps_m; + + if (pdev->bus && pdev->bus->self) { + /* parent (bus) mps */ + parent = pdev->bus->self; + + if (pci_is_pcie(parent)) { + mps_p = pcie_get_mps(parent); + mps = min(mps_m, mps_p); + } + } + + ret = pcie_set_mps(pdev, mps); + if (ret) { + pr_err("failed to set mps to %d, keep using current %d\n", + mps, mps_o); + priv->mps = mps_o; + return; + } + + pr_debug("set mps to %d (was %d, max %d)\n", mps, mps_o, mps_m); + priv->mps = mps; +} + +static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv, bool use_msi) +{ + struct pci_dev *pdev = priv->pdev; + + /* fall back to legacy INTx interrupts by default */ + priv->msi_enabled = 0; + + /* check if MSI capability is available */ + if (use_msi) { + if (!pci_enable_msi(pdev)) { + pr_debug("enabled MSI interrupt\n"); + priv->msi_enabled = 1; + } else { + pr_warn("failed to enable MSI interrupts"); + } + } + + if (!priv->msi_enabled) { + pr_warn("legacy PCIE interrupts enabled\n"); + pci_intx(pdev, 1); + } +} + +static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index) +{ + void __iomem *vaddr; + dma_addr_t busaddr; + size_t len; + int ret; + + ret = pcim_iomap_regions(priv->pdev, 1 << index, "qtnfmac_pcie"); + if (ret) + return IOMEM_ERR_PTR(ret); + + busaddr = pci_resource_start(priv->pdev, index); + len = pci_resource_len(priv->pdev, index); + vaddr = pcim_iomap_table(priv->pdev)[index]; + if (!vaddr) + return IOMEM_ERR_PTR(-ENOMEM); + + pr_debug("BAR%u vaddr=0x%p busaddr=%pad len=%u\n", + index, vaddr, &busaddr, (int)len); + + return vaddr; +} + +static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv) +{ + int ret = -ENOMEM; + + priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR); + if (IS_ERR(priv->sysctl_bar)) { + pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR); + return ret; + } + + priv->dmareg_bar = qtnf_map_bar(priv, QTN_DMA_BAR); + if (IS_ERR(priv->dmareg_bar)) { + pr_err("failed to map BAR%u\n", QTN_DMA_BAR); + return ret; + } + + priv->epmem_bar = qtnf_map_bar(priv, QTN_SHMEM_BAR); + if (IS_ERR(priv->epmem_bar)) { + pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR); + return ret; + } + + return 0; +} + +static void qtnf_pcie_control_rx_callback(void *arg, const u8 __iomem *buf, + size_t len) +{ + struct qtnf_pcie_bus_priv *priv = arg; + struct qtnf_bus *bus = pci_get_drvdata(priv->pdev); + struct sk_buff *skb; + + if (unlikely(len == 0)) { + pr_warn("zero length packet received\n"); + return; + } + + skb = __dev_alloc_skb(len, GFP_KERNEL); + + if (unlikely(!skb)) { + pr_err("failed to allocate skb\n"); + return; + } + + memcpy_fromio(skb_put(skb, len), buf, len); + + qtnf_trans_handle_rx_ctl_packet(bus, skb); +} + +void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv, + struct qtnf_shm_ipc_region __iomem *ipc_tx_reg, + struct qtnf_shm_ipc_region __iomem *ipc_rx_reg, + const struct qtnf_shm_ipc_int *ipc_int) +{ + const struct qtnf_shm_ipc_rx_callback rx_callback = { + qtnf_pcie_control_rx_callback, priv }; + + qtnf_shm_ipc_init(&priv->shm_ipc_ep_in, QTNF_SHM_IPC_OUTBOUND, + ipc_tx_reg, priv->workqueue, + ipc_int, &rx_callback); + qtnf_shm_ipc_init(&priv->shm_ipc_ep_out, QTNF_SHM_IPC_INBOUND, + ipc_rx_reg, priv->workqueue, + ipc_int, &rx_callback); +} + +int qtnf_pcie_probe(struct pci_dev *pdev, size_t priv_size, + const struct qtnf_bus_ops *bus_ops, u64 dma_mask, + bool use_msi) +{ + struct qtnf_pcie_bus_priv *pcie_priv; + struct qtnf_bus *bus; + int ret; + + bus = devm_kzalloc(&pdev->dev, + sizeof(*bus) + priv_size, GFP_KERNEL); + if (!bus) + return -ENOMEM; + + pcie_priv = get_bus_priv(bus); + + pci_set_drvdata(pdev, bus); + bus->bus_ops = bus_ops; + bus->dev = &pdev->dev; + bus->fw_state = QTNF_FW_STATE_RESET; + pcie_priv->pdev = pdev; + pcie_priv->tx_stopped = 0; + + mutex_init(&bus->bus_lock); + spin_lock_init(&pcie_priv->tx_lock); + spin_lock_init(&pcie_priv->tx_reclaim_lock); + + pcie_priv->tx_full_count = 0; + pcie_priv->tx_done_count = 0; + pcie_priv->pcie_irq_count = 0; + pcie_priv->tx_reclaim_done = 0; + pcie_priv->tx_reclaim_req = 0; + + pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE"); + if (!pcie_priv->workqueue) { + pr_err("failed to alloc bus workqueue\n"); + ret = -ENODEV; + goto err_init; + } + + init_dummy_netdev(&bus->mux_dev); + + if (!pci_is_pcie(pdev)) { + pr_err("device %s is not PCI Express\n", pci_name(pdev)); + ret = -EIO; + goto err_base; + } + + qtnf_tune_pcie_mps(pcie_priv); + + ret = pcim_enable_device(pdev); + if (ret) { + pr_err("failed to init PCI device %x\n", pdev->device); + goto err_base; + } else { + pr_debug("successful init of PCI device %x\n", pdev->device); + } + + ret = dma_set_mask_and_coherent(&pdev->dev, dma_mask); + if (ret) { + pr_err("PCIE DMA coherent mask init failed\n"); + goto err_base; + } + + pci_set_master(pdev); + qtnf_pcie_init_irq(pcie_priv, use_msi); + + ret = qtnf_pcie_init_memory(pcie_priv); + if (ret < 0) { + pr_err("PCIE memory init failed\n"); + goto err_base; + } + + pci_save_state(pdev); + + return 0; + +err_base: + flush_workqueue(pcie_priv->workqueue); + destroy_workqueue(pcie_priv->workqueue); +err_init: + pci_set_drvdata(pdev, NULL); + + return ret; +} + +static void qtnf_pcie_free_shm_ipc(struct qtnf_pcie_bus_priv *priv) +{ + qtnf_shm_ipc_free(&priv->shm_ipc_ep_in); + qtnf_shm_ipc_free(&priv->shm_ipc_ep_out); +} + +void qtnf_pcie_remove(struct qtnf_bus *bus, struct qtnf_pcie_bus_priv *priv) +{ + cancel_work_sync(&bus->fw_work); + + if (bus->fw_state == QTNF_FW_STATE_ACTIVE || + bus->fw_state == QTNF_FW_STATE_EP_DEAD) + qtnf_core_detach(bus); + + netif_napi_del(&bus->mux_napi); + flush_workqueue(priv->workqueue); + destroy_workqueue(priv->workqueue); + tasklet_kill(&priv->reclaim_tq); + + qtnf_pcie_free_shm_ipc(priv); + qtnf_debugfs_remove(bus); + pci_set_drvdata(priv->pdev, NULL); +} diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h new file mode 100644 index 000000000000..5c70fb4c0f92 --- /dev/null +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018 Quantenna Communications, Inc. All rights reserved. */ + +#ifndef _QTN_FMAC_PCIE_H_ +#define _QTN_FMAC_PCIE_H_ + +#include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/io.h> +#include <linux/skbuff.h> +#include <linux/workqueue.h> +#include <linux/interrupt.h> + +#include "shm_ipc.h" +#include "bus.h" + +#define SKB_BUF_SIZE 2048 + +#define QTN_FW_DL_TIMEOUT_MS 3000 +#define QTN_FW_QLINK_TIMEOUT_MS 30000 +#define QTN_EP_RESET_WAIT_MS 1000 + +struct qtnf_pcie_bus_priv { + struct pci_dev *pdev; + + spinlock_t tx_reclaim_lock; + spinlock_t tx_lock; + int mps; + + struct workqueue_struct *workqueue; + struct tasklet_struct reclaim_tq; + + void __iomem *sysctl_bar; + void __iomem *epmem_bar; + void __iomem *dmareg_bar; + + struct qtnf_shm_ipc shm_ipc_ep_in; + struct qtnf_shm_ipc shm_ipc_ep_out; + + u16 tx_bd_num; + u16 rx_bd_num; + + struct sk_buff **tx_skb; + struct sk_buff **rx_skb; + + u32 rx_bd_w_index; + u32 rx_bd_r_index; + + u32 tx_bd_w_index; + u32 tx_bd_r_index; + + /* diagnostics stats */ + u32 pcie_irq_count; + u32 tx_full_count; + u32 tx_done_count; + u32 tx_reclaim_done; + u32 tx_reclaim_req; + + u8 msi_enabled; + u8 tx_stopped; +}; + +int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb); +int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv); +void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus); +void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success, + const char *drv_name); +void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv, + struct qtnf_shm_ipc_region __iomem *ipc_tx_reg, + struct qtnf_shm_ipc_region __iomem *ipc_rx_reg, + const struct qtnf_shm_ipc_int *ipc_int); +int qtnf_pcie_probe(struct pci_dev *pdev, size_t priv_size, + const struct qtnf_bus_ops *bus_ops, u64 dma_mask, + bool use_msi); +void qtnf_pcie_remove(struct qtnf_bus *bus, struct qtnf_pcie_bus_priv *priv); + +static inline void qtnf_non_posted_write(u32 val, void __iomem *basereg) +{ + writel(val, basereg); + + /* flush posted write */ + readl(basereg); +} + +#endif /* _QTN_FMAC_PCIE_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c new file mode 100644 index 000000000000..5aca12a51fe3 --- /dev/null +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c @@ -0,0 +1,1262 @@ +/* + * Copyright (c) 2015-2016 Quantenna Communications, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/firmware.h> +#include <linux/pci.h> +#include <linux/vmalloc.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <linux/completion.h> +#include <linux/crc32.h> +#include <linux/spinlock.h> +#include <linux/circ_buf.h> +#include <linux/log2.h> + +#include "pcie_priv.h" +#include "pearl_pcie_regs.h" +#include "pearl_pcie_ipc.h" +#include "qtn_hw_ids.h" +#include "core.h" +#include "bus.h" +#include "shm_ipc.h" +#include "debug.h" + +static bool use_msi = true; +module_param(use_msi, bool, 0644); +MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt"); + +static unsigned int tx_bd_size_param = 32; +module_param(tx_bd_size_param, uint, 0644); +MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size, power of two"); + +static unsigned int rx_bd_size_param = 256; +module_param(rx_bd_size_param, uint, 0644); +MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size, power of two"); + +static u8 flashboot = 1; +module_param(flashboot, byte, 0644); +MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS"); + +#define DRV_NAME "qtnfmac_pearl_pcie" + +struct qtnf_pearl_bda { + __le16 bda_len; + __le16 bda_version; + __le32 bda_pci_endian; + __le32 bda_ep_state; + __le32 bda_rc_state; + __le32 bda_dma_mask; + __le32 bda_msi_addr; + __le32 bda_flashsz; + u8 bda_boardname[PCIE_BDA_NAMELEN]; + __le32 bda_rc_msi_enabled; + u8 bda_hhbm_list[PCIE_HHBM_MAX_SIZE]; + __le32 bda_dsbw_start_index; + __le32 bda_dsbw_end_index; + __le32 bda_dsbw_total_bytes; + __le32 bda_rc_tx_bd_base; + __le32 bda_rc_tx_bd_num; + u8 bda_pcie_mac[QTN_ENET_ADDR_LENGTH]; + struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096); /* host TX */ + struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096); /* host RX */ +} __packed; + +struct qtnf_pearl_tx_bd { + __le32 addr; + __le32 addr_h; + __le32 info; + __le32 info_h; +} __packed; + +struct qtnf_pearl_rx_bd { + __le32 addr; + __le32 addr_h; + __le32 info; + __le32 info_h; + __le32 next_ptr; + __le32 next_ptr_h; +} __packed; + +struct qtnf_pearl_fw_hdr { + u8 boardflg[8]; + __le32 fwsize; + __le32 seqnum; + __le32 type; + __le32 pktlen; + __le32 crc; +} __packed; + +struct qtnf_pcie_pearl_state { + struct qtnf_pcie_bus_priv base; + + /* lock for irq configuration changes */ + spinlock_t irq_lock; + + struct qtnf_pearl_bda __iomem *bda; + void __iomem *pcie_reg_base; + + struct qtnf_pearl_tx_bd *tx_bd_vbase; + dma_addr_t tx_bd_pbase; + + struct qtnf_pearl_rx_bd *rx_bd_vbase; + dma_addr_t rx_bd_pbase; + + dma_addr_t bd_table_paddr; + void *bd_table_vaddr; + u32 bd_table_len; + u32 pcie_irq_mask; + u32 pcie_irq_rx_count; + u32 pcie_irq_tx_count; + u32 pcie_irq_uf_count; +}; + +static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_pearl_state *ps) +{ + unsigned long flags; + + spin_lock_irqsave(&ps->irq_lock, flags); + ps->pcie_irq_mask = (PCIE_HDP_INT_RX_BITS | PCIE_HDP_INT_TX_BITS); + spin_unlock_irqrestore(&ps->irq_lock, flags); +} + +static inline void qtnf_enable_hdp_irqs(struct qtnf_pcie_pearl_state *ps) +{ + unsigned long flags; + + spin_lock_irqsave(&ps->irq_lock, flags); + writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base)); + spin_unlock_irqrestore(&ps->irq_lock, flags); +} + +static inline void qtnf_disable_hdp_irqs(struct qtnf_pcie_pearl_state *ps) +{ + unsigned long flags; + + spin_lock_irqsave(&ps->irq_lock, flags); + writel(0x0, PCIE_HDP_INT_EN(ps->pcie_reg_base)); + spin_unlock_irqrestore(&ps->irq_lock, flags); +} + +static inline void qtnf_en_rxdone_irq(struct qtnf_pcie_pearl_state *ps) +{ + unsigned long flags; + + spin_lock_irqsave(&ps->irq_lock, flags); + ps->pcie_irq_mask |= PCIE_HDP_INT_RX_BITS; + writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base)); + spin_unlock_irqrestore(&ps->irq_lock, flags); +} + +static inline void qtnf_dis_rxdone_irq(struct qtnf_pcie_pearl_state *ps) +{ + unsigned long flags; + + spin_lock_irqsave(&ps->irq_lock, flags); + ps->pcie_irq_mask &= ~PCIE_HDP_INT_RX_BITS; + writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base)); + spin_unlock_irqrestore(&ps->irq_lock, flags); +} + +static inline void qtnf_en_txdone_irq(struct qtnf_pcie_pearl_state *ps) +{ + unsigned long flags; + + spin_lock_irqsave(&ps->irq_lock, flags); + ps->pcie_irq_mask |= PCIE_HDP_INT_TX_BITS; + writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base)); + spin_unlock_irqrestore(&ps->irq_lock, flags); +} + +static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_pearl_state *ps) +{ + unsigned long flags; + + spin_lock_irqsave(&ps->irq_lock, flags); + ps->pcie_irq_mask &= ~PCIE_HDP_INT_TX_BITS; + writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base)); + spin_unlock_irqrestore(&ps->irq_lock, flags); +} + +static void qtnf_deassert_intx(struct qtnf_pcie_pearl_state *ps) +{ + void __iomem *reg = ps->base.sysctl_bar + PEARL_PCIE_CFG0_OFFSET; + u32 cfg; + + cfg = readl(reg); + cfg &= ~PEARL_ASSERT_INTX; + qtnf_non_posted_write(cfg, reg); +} + +static void qtnf_pearl_reset_ep(struct qtnf_pcie_pearl_state *ps) +{ + const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_EP_RESET); + void __iomem *reg = ps->base.sysctl_bar + + QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET; + + qtnf_non_posted_write(data, reg); + msleep(QTN_EP_RESET_WAIT_MS); + pci_restore_state(ps->base.pdev); +} + +static void qtnf_pcie_pearl_ipc_gen_ep_int(void *arg) +{ + const struct qtnf_pcie_pearl_state *ps = arg; + const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_IPC_IRQ); + void __iomem *reg = ps->base.sysctl_bar + + QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET; + + qtnf_non_posted_write(data, reg); +} + +static int qtnf_is_state(__le32 __iomem *reg, u32 state) +{ + u32 s = readl(reg); + + return s & state; +} + +static void qtnf_set_state(__le32 __iomem *reg, u32 state) +{ + u32 s = readl(reg); + + qtnf_non_posted_write(state | s, reg); +} + +static void qtnf_clear_state(__le32 __iomem *reg, u32 state) +{ + u32 s = readl(reg); + + qtnf_non_posted_write(s & ~state, reg); +} + +static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms) +{ + u32 timeout = 0; + + while ((qtnf_is_state(reg, state) == 0)) { + usleep_range(1000, 1200); + if (++timeout > delay_in_ms) + return -1; + } + + return 0; +} + +static int pearl_alloc_bd_table(struct qtnf_pcie_pearl_state *ps) +{ + struct qtnf_pcie_bus_priv *priv = &ps->base; + dma_addr_t paddr; + void *vaddr; + int len; + + len = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd) + + priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd); + + vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL); + if (!vaddr) + return -ENOMEM; + + /* tx bd */ + + memset(vaddr, 0, len); + + ps->bd_table_vaddr = vaddr; + ps->bd_table_paddr = paddr; + ps->bd_table_len = len; + + ps->tx_bd_vbase = vaddr; + ps->tx_bd_pbase = paddr; + + pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); + + priv->tx_bd_r_index = 0; + priv->tx_bd_w_index = 0; + + /* rx bd */ + + vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num; + paddr += priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd); + + ps->rx_bd_vbase = vaddr; + ps->rx_bd_pbase = paddr; + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + writel(QTN_HOST_HI32(paddr), + PCIE_HDP_TX_HOST_Q_BASE_H(ps->pcie_reg_base)); +#endif + writel(QTN_HOST_LO32(paddr), + PCIE_HDP_TX_HOST_Q_BASE_L(ps->pcie_reg_base)); + writel(priv->rx_bd_num | (sizeof(struct qtnf_pearl_rx_bd)) << 16, + PCIE_HDP_TX_HOST_Q_SZ_CTRL(ps->pcie_reg_base)); + + pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); + + return 0; +} + +static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index) +{ + struct qtnf_pcie_bus_priv *priv = &ps->base; + struct qtnf_pearl_rx_bd *rxbd; + struct sk_buff *skb; + dma_addr_t paddr; + + skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC); + if (!skb) { + priv->rx_skb[index] = NULL; + return -ENOMEM; + } + + priv->rx_skb[index] = skb; + rxbd = &ps->rx_bd_vbase[index]; + + paddr = pci_map_single(priv->pdev, skb->data, + SKB_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(priv->pdev, paddr)) { + pr_err("skb DMA mapping error: %pad\n", &paddr); + return -ENOMEM; + } + + /* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */ + rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr)); + rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr)); + rxbd->info = 0x0; + + priv->rx_bd_w_index = index; + + /* sync up all descriptor updates */ + wmb(); + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + writel(QTN_HOST_HI32(paddr), + PCIE_HDP_HHBM_BUF_PTR_H(ps->pcie_reg_base)); +#endif + writel(QTN_HOST_LO32(paddr), + PCIE_HDP_HHBM_BUF_PTR(ps->pcie_reg_base)); + + writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(ps->pcie_reg_base)); + return 0; +} + +static int pearl_alloc_rx_buffers(struct qtnf_pcie_pearl_state *ps) +{ + u16 i; + int ret = 0; + + memset(ps->rx_bd_vbase, 0x0, + ps->base.rx_bd_num * sizeof(struct qtnf_pearl_rx_bd)); + + for (i = 0; i < ps->base.rx_bd_num; i++) { + ret = pearl_skb2rbd_attach(ps, i); + if (ret) + break; + } + + return ret; +} + +/* all rx/tx activity should have ceased before calling this function */ +static void qtnf_pearl_free_xfer_buffers(struct qtnf_pcie_pearl_state *ps) +{ + struct qtnf_pcie_bus_priv *priv = &ps->base; + struct qtnf_pearl_tx_bd *txbd; + struct qtnf_pearl_rx_bd *rxbd; + struct sk_buff *skb; + dma_addr_t paddr; + int i; + + /* free rx buffers */ + for (i = 0; i < priv->rx_bd_num; i++) { + if (priv->rx_skb && priv->rx_skb[i]) { + rxbd = &ps->rx_bd_vbase[i]; + skb = priv->rx_skb[i]; + paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h), + le32_to_cpu(rxbd->addr)); + pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE, + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(skb); + priv->rx_skb[i] = NULL; + } + } + + /* free tx buffers */ + for (i = 0; i < priv->tx_bd_num; i++) { + if (priv->tx_skb && priv->tx_skb[i]) { + txbd = &ps->tx_bd_vbase[i]; + skb = priv->tx_skb[i]; + paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h), + le32_to_cpu(txbd->addr)); + pci_unmap_single(priv->pdev, paddr, skb->len, + PCI_DMA_TODEVICE); + dev_kfree_skb_any(skb); + priv->tx_skb[i] = NULL; + } + } +} + +static int pearl_hhbm_init(struct qtnf_pcie_pearl_state *ps) +{ + u32 val; + + val = readl(PCIE_HHBM_CONFIG(ps->pcie_reg_base)); + val |= HHBM_CONFIG_SOFT_RESET; + writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base)); + usleep_range(50, 100); + val &= ~HHBM_CONFIG_SOFT_RESET; +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + val |= HHBM_64BIT; +#endif + writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base)); + writel(ps->base.rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(ps->pcie_reg_base)); + + return 0; +} + +static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps) +{ + struct qtnf_pcie_bus_priv *priv = &ps->base; + int ret; + u32 val; + + priv->tx_bd_num = tx_bd_size_param; + priv->rx_bd_num = rx_bd_size_param; + priv->rx_bd_w_index = 0; + priv->rx_bd_r_index = 0; + + if (!priv->tx_bd_num || !is_power_of_2(priv->tx_bd_num)) { + pr_err("tx_bd_size_param %u is not power of two\n", + priv->tx_bd_num); + return -EINVAL; + } + + val = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd); + if (val > PCIE_HHBM_MAX_SIZE) { + pr_err("tx_bd_size_param %u is too large\n", + priv->tx_bd_num); + return -EINVAL; + } + + if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) { + pr_err("rx_bd_size_param %u is not power of two\n", + priv->rx_bd_num); + return -EINVAL; + } + + val = priv->rx_bd_num * sizeof(dma_addr_t); + if (val > PCIE_HHBM_MAX_SIZE) { + pr_err("rx_bd_size_param %u is too large\n", + priv->rx_bd_num); + return -EINVAL; + } + + ret = pearl_hhbm_init(ps); + if (ret) { + pr_err("failed to init h/w queues\n"); + return ret; + } + + ret = qtnf_pcie_alloc_skb_array(priv); + if (ret) { + pr_err("failed to allocate skb array\n"); + return ret; + } + + ret = pearl_alloc_bd_table(ps); + if (ret) { + pr_err("failed to allocate bd table\n"); + return ret; + } + + ret = pearl_alloc_rx_buffers(ps); + if (ret) { + pr_err("failed to allocate rx buffers\n"); + return ret; + } + + return ret; +} + +static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state *ps) +{ + struct qtnf_pcie_bus_priv *priv = &ps->base; + struct qtnf_pearl_tx_bd *txbd; + struct sk_buff *skb; + unsigned long flags; + dma_addr_t paddr; + u32 tx_done_index; + int count = 0; + int i; + + spin_lock_irqsave(&priv->tx_reclaim_lock, flags); + + tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base)) + & (priv->tx_bd_num - 1); + + i = priv->tx_bd_r_index; + + while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) { + skb = priv->tx_skb[i]; + if (likely(skb)) { + txbd = &ps->tx_bd_vbase[i]; + paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h), + le32_to_cpu(txbd->addr)); + pci_unmap_single(priv->pdev, paddr, skb->len, + PCI_DMA_TODEVICE); + + if (skb->dev) { + qtnf_update_tx_stats(skb->dev, skb); + if (unlikely(priv->tx_stopped)) { + qtnf_wake_all_queues(skb->dev); + priv->tx_stopped = 0; + } + } + + dev_kfree_skb_any(skb); + } + + priv->tx_skb[i] = NULL; + count++; + + if (++i >= priv->tx_bd_num) + i = 0; + } + + priv->tx_reclaim_done += count; + priv->tx_reclaim_req++; + priv->tx_bd_r_index = i; + + spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags); +} + +static int qtnf_tx_queue_ready(struct qtnf_pcie_pearl_state *ps) +{ + struct qtnf_pcie_bus_priv *priv = &ps->base; + + if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, + priv->tx_bd_num)) { + qtnf_pearl_data_tx_reclaim(ps); + + if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, + priv->tx_bd_num)) { + pr_warn_ratelimited("reclaim full Tx queue\n"); + priv->tx_full_count++; + return 0; + } + } + + return 1; +} + +static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) +{ + struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus); + struct qtnf_pcie_bus_priv *priv = &ps->base; + dma_addr_t txbd_paddr, skb_paddr; + struct qtnf_pearl_tx_bd *txbd; + unsigned long flags; + int len, i; + u32 info; + int ret = 0; + + spin_lock_irqsave(&priv->tx_lock, flags); + + if (!qtnf_tx_queue_ready(ps)) { + if (skb->dev) { + netif_tx_stop_all_queues(skb->dev); + priv->tx_stopped = 1; + } + + spin_unlock_irqrestore(&priv->tx_lock, flags); + return NETDEV_TX_BUSY; + } + + i = priv->tx_bd_w_index; + priv->tx_skb[i] = skb; + len = skb->len; + + skb_paddr = pci_map_single(priv->pdev, skb->data, + skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(priv->pdev, skb_paddr)) { + pr_err("skb DMA mapping error: %pad\n", &skb_paddr); + ret = -ENOMEM; + goto tx_done; + } + + txbd = &ps->tx_bd_vbase[i]; + txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr)); + txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr)); + + info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT; + txbd->info = cpu_to_le32(info); + + /* sync up all descriptor updates before passing them to EP */ + dma_wmb(); + + /* write new TX descriptor to PCIE_RX_FIFO on EP */ + txbd_paddr = ps->tx_bd_pbase + i * sizeof(struct qtnf_pearl_tx_bd); + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + writel(QTN_HOST_HI32(txbd_paddr), + PCIE_HDP_HOST_WR_DESC0_H(ps->pcie_reg_base)); +#endif + writel(QTN_HOST_LO32(txbd_paddr), + PCIE_HDP_HOST_WR_DESC0(ps->pcie_reg_base)); + + if (++i >= priv->tx_bd_num) + i = 0; + + priv->tx_bd_w_index = i; + +tx_done: + if (ret && skb) { + pr_err_ratelimited("drop skb\n"); + if (skb->dev) + skb->dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + } + + priv->tx_done_count++; + spin_unlock_irqrestore(&priv->tx_lock, flags); + + qtnf_pearl_data_tx_reclaim(ps); + + return NETDEV_TX_OK; +} + +static irqreturn_t qtnf_pcie_pearl_interrupt(int irq, void *data) +{ + struct qtnf_bus *bus = (struct qtnf_bus *)data; + struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus); + struct qtnf_pcie_bus_priv *priv = &ps->base; + u32 status; + + priv->pcie_irq_count++; + status = readl(PCIE_HDP_INT_STATUS(ps->pcie_reg_base)); + + qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in); + qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out); + + if (!(status & ps->pcie_irq_mask)) + goto irq_done; + + if (status & PCIE_HDP_INT_RX_BITS) + ps->pcie_irq_rx_count++; + + if (status & PCIE_HDP_INT_TX_BITS) + ps->pcie_irq_tx_count++; + + if (status & PCIE_HDP_INT_HHBM_UF) + ps->pcie_irq_uf_count++; + + if (status & PCIE_HDP_INT_RX_BITS) { + qtnf_dis_rxdone_irq(ps); + napi_schedule(&bus->mux_napi); + } + + if (status & PCIE_HDP_INT_TX_BITS) { + qtnf_dis_txdone_irq(ps); + tasklet_hi_schedule(&priv->reclaim_tq); + } + +irq_done: + /* H/W workaround: clean all bits, not only enabled */ + qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(ps->pcie_reg_base)); + + if (!priv->msi_enabled) + qtnf_deassert_intx(ps); + + return IRQ_HANDLED; +} + +static int qtnf_rx_data_ready(struct qtnf_pcie_pearl_state *ps) +{ + u16 index = ps->base.rx_bd_r_index; + struct qtnf_pearl_rx_bd *rxbd; + u32 descw; + + rxbd = &ps->rx_bd_vbase[index]; + descw = le32_to_cpu(rxbd->info); + + if (descw & QTN_TXDONE_MASK) + return 1; + + return 0; +} + +static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget) +{ + struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi); + struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus); + struct qtnf_pcie_bus_priv *priv = &ps->base; + struct net_device *ndev = NULL; + struct sk_buff *skb = NULL; + int processed = 0; + struct qtnf_pearl_rx_bd *rxbd; + dma_addr_t skb_paddr; + int consume; + u32 descw; + u32 psize; + u16 r_idx; + u16 w_idx; + int ret; + + while (processed < budget) { + if (!qtnf_rx_data_ready(ps)) + goto rx_out; + + r_idx = priv->rx_bd_r_index; + rxbd = &ps->rx_bd_vbase[r_idx]; + descw = le32_to_cpu(rxbd->info); + + skb = priv->rx_skb[r_idx]; + psize = QTN_GET_LEN(descw); + consume = 1; + + if (!(descw & QTN_TXDONE_MASK)) { + pr_warn("skip invalid rxbd[%d]\n", r_idx); + consume = 0; + } + + if (!skb) { + pr_warn("skip missing rx_skb[%d]\n", r_idx); + consume = 0; + } + + if (skb && (skb_tailroom(skb) < psize)) { + pr_err("skip packet with invalid length: %u > %u\n", + psize, skb_tailroom(skb)); + consume = 0; + } + + if (skb) { + skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h), + le32_to_cpu(rxbd->addr)); + pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE, + PCI_DMA_FROMDEVICE); + } + + if (consume) { + skb_put(skb, psize); + ndev = qtnf_classify_skb(bus, skb); + if (likely(ndev)) { + qtnf_update_rx_stats(ndev, skb); + skb->protocol = eth_type_trans(skb, ndev); + napi_gro_receive(napi, skb); + } else { + pr_debug("drop untagged skb\n"); + bus->mux_dev.stats.rx_dropped++; + dev_kfree_skb_any(skb); + } + } else { + if (skb) { + bus->mux_dev.stats.rx_dropped++; + dev_kfree_skb_any(skb); + } + } + + priv->rx_skb[r_idx] = NULL; + if (++r_idx >= priv->rx_bd_num) + r_idx = 0; + + priv->rx_bd_r_index = r_idx; + + /* repalce processed buffer by a new one */ + w_idx = priv->rx_bd_w_index; + while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, + priv->rx_bd_num) > 0) { + if (++w_idx >= priv->rx_bd_num) + w_idx = 0; + + ret = pearl_skb2rbd_attach(ps, w_idx); + if (ret) { + pr_err("failed to allocate new rx_skb[%d]\n", + w_idx); + break; + } + } + + processed++; + } + +rx_out: + if (processed < budget) { + napi_complete(napi); + qtnf_en_rxdone_irq(ps); + } + + return processed; +} + +static void +qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev) +{ + struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus); + + tasklet_hi_schedule(&ps->base.reclaim_tq); +} + +static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus) +{ + struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus); + + qtnf_enable_hdp_irqs(ps); + napi_enable(&bus->mux_napi); +} + +static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus) +{ + struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus); + + napi_disable(&bus->mux_napi); + qtnf_disable_hdp_irqs(ps); +} + +static const struct qtnf_bus_ops qtnf_pcie_pearl_bus_ops = { + /* control path methods */ + .control_tx = qtnf_pcie_control_tx, + + /* data path methods */ + .data_tx = qtnf_pcie_data_tx, + .data_tx_timeout = qtnf_pcie_data_tx_timeout, + .data_rx_start = qtnf_pcie_data_rx_start, + .data_rx_stop = qtnf_pcie_data_rx_stop, +}; + +static int qtnf_dbg_irq_stats(struct seq_file *s, void *data) +{ + struct qtnf_bus *bus = dev_get_drvdata(s->private); + struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus); + u32 reg = readl(PCIE_HDP_INT_EN(ps->pcie_reg_base)); + u32 status; + + seq_printf(s, "pcie_irq_count(%u)\n", ps->base.pcie_irq_count); + seq_printf(s, "pcie_irq_tx_count(%u)\n", ps->pcie_irq_tx_count); + status = reg & PCIE_HDP_INT_TX_BITS; + seq_printf(s, "pcie_irq_tx_status(%s)\n", + (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS"); + seq_printf(s, "pcie_irq_rx_count(%u)\n", ps->pcie_irq_rx_count); + status = reg & PCIE_HDP_INT_RX_BITS; + seq_printf(s, "pcie_irq_rx_status(%s)\n", + (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS"); + seq_printf(s, "pcie_irq_uf_count(%u)\n", ps->pcie_irq_uf_count); + status = reg & PCIE_HDP_INT_HHBM_UF; + seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n", + (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS"); + + return 0; +} + +static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data) +{ + struct qtnf_bus *bus = dev_get_drvdata(s->private); + struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus); + struct qtnf_pcie_bus_priv *priv = &ps->base; + + seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count); + seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count); + seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done); + seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req); + + seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index); + seq_printf(s, "tx_bd_p_index(%u)\n", + readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base)) + & (priv->tx_bd_num - 1)); + seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index); + seq_printf(s, "tx queue len(%u)\n", + CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index, + priv->tx_bd_num)); + + seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index); + seq_printf(s, "rx_bd_p_index(%u)\n", + readl(PCIE_HDP_TX0DMA_CNT(ps->pcie_reg_base)) + & (priv->rx_bd_num - 1)); + seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index); + seq_printf(s, "rx alloc queue len(%u)\n", + CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, + priv->rx_bd_num)); + + return 0; +} + +static int qtnf_ep_fw_send(struct pci_dev *pdev, uint32_t size, + int blk, const u8 *pblk, const u8 *fw) +{ + struct qtnf_bus *bus = pci_get_drvdata(pdev); + + struct qtnf_pearl_fw_hdr *hdr; + u8 *pdata; + + int hds = sizeof(*hdr); + struct sk_buff *skb = NULL; + int len = 0; + int ret; + + skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + skb->len = QTN_PCIE_FW_BUFSZ; + skb->dev = NULL; + + hdr = (struct qtnf_pearl_fw_hdr *)skb->data; + memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG)); + hdr->fwsize = cpu_to_le32(size); + hdr->seqnum = cpu_to_le32(blk); + + if (blk) + hdr->type = cpu_to_le32(QTN_FW_DSUB); + else + hdr->type = cpu_to_le32(QTN_FW_DBEGIN); + + pdata = skb->data + hds; + + len = QTN_PCIE_FW_BUFSZ - hds; + if (pblk >= (fw + size - len)) { + len = fw + size - pblk; + hdr->type = cpu_to_le32(QTN_FW_DEND); + } + + hdr->pktlen = cpu_to_le32(len); + memcpy(pdata, pblk, len); + hdr->crc = cpu_to_le32(~crc32(0, pdata, len)); + + ret = qtnf_pcie_data_tx(bus, skb); + + return (ret == NETDEV_TX_OK) ? len : 0; +} + +static int +qtnf_ep_fw_load(struct qtnf_pcie_pearl_state *ps, const u8 *fw, u32 fw_size) +{ + int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pearl_fw_hdr); + int blk_count = fw_size / blk_size + ((fw_size % blk_size) ? 1 : 0); + const u8 *pblk = fw; + int threshold = 0; + int blk = 0; + int len; + + pr_debug("FW upload started: fw_addr=0x%p size=%d\n", fw, fw_size); + + while (blk < blk_count) { + if (++threshold > 10000) { + pr_err("FW upload failed: too many retries\n"); + return -ETIMEDOUT; + } + + len = qtnf_ep_fw_send(ps->base.pdev, fw_size, blk, pblk, fw); + if (len <= 0) + continue; + + if (!((blk + 1) & QTN_PCIE_FW_DLMASK) || + (blk == (blk_count - 1))) { + qtnf_set_state(&ps->bda->bda_rc_state, + QTN_RC_FW_SYNC); + if (qtnf_poll_state(&ps->bda->bda_ep_state, + QTN_EP_FW_SYNC, + QTN_FW_DL_TIMEOUT_MS)) { + pr_err("FW upload failed: SYNC timed out\n"); + return -ETIMEDOUT; + } + + qtnf_clear_state(&ps->bda->bda_ep_state, + QTN_EP_FW_SYNC); + + if (qtnf_is_state(&ps->bda->bda_ep_state, + QTN_EP_FW_RETRY)) { + if (blk == (blk_count - 1)) { + int last_round = + blk_count & QTN_PCIE_FW_DLMASK; + blk -= last_round; + pblk -= ((last_round - 1) * + blk_size + len); + } else { + blk -= QTN_PCIE_FW_DLMASK; + pblk -= QTN_PCIE_FW_DLMASK * blk_size; + } + + qtnf_clear_state(&ps->bda->bda_ep_state, + QTN_EP_FW_RETRY); + + pr_warn("FW upload retry: block #%d\n", blk); + continue; + } + + qtnf_pearl_data_tx_reclaim(ps); + } + + pblk += len; + blk++; + } + + pr_debug("FW upload completed: totally sent %d blocks\n", blk); + return 0; +} + +static void qtnf_pearl_fw_work_handler(struct work_struct *work) +{ + struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work); + struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus); + struct pci_dev *pdev = ps->base.pdev; + const struct firmware *fw; + int ret; + u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK; + const char *fwname = QTN_PCI_PEARL_FW_NAME; + bool fw_boot_success = false; + + if (flashboot) { + state |= QTN_RC_FW_FLASHBOOT; + } else { + ret = request_firmware(&fw, fwname, &pdev->dev); + if (ret < 0) { + pr_err("failed to get firmware %s\n", fwname); + goto fw_load_exit; + } + } + + qtnf_set_state(&ps->bda->bda_rc_state, state); + + if (qtnf_poll_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY, + QTN_FW_DL_TIMEOUT_MS)) { + pr_err("card is not ready\n"); + + if (!flashboot) + release_firmware(fw); + + goto fw_load_exit; + } + + qtnf_clear_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY); + + if (flashboot) { + pr_info("booting firmware from flash\n"); + + } else { + pr_info("starting firmware upload: %s\n", fwname); + + ret = qtnf_ep_fw_load(ps, fw->data, fw->size); + release_firmware(fw); + if (ret) { + pr_err("firmware upload error\n"); + goto fw_load_exit; + } + } + + if (qtnf_poll_state(&ps->bda->bda_ep_state, QTN_EP_FW_DONE, + QTN_FW_DL_TIMEOUT_MS)) { + pr_err("firmware bringup timed out\n"); + goto fw_load_exit; + } + + pr_info("firmware is up and running\n"); + + if (qtnf_poll_state(&ps->bda->bda_ep_state, + QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) { + pr_err("firmware runtime failure\n"); + goto fw_load_exit; + } + + fw_boot_success = true; + +fw_load_exit: + qtnf_pcie_fw_boot_done(bus, fw_boot_success, DRV_NAME); + + if (fw_boot_success) { + qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats); + qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats); + } +} + +static void qtnf_pearl_reclaim_tasklet_fn(unsigned long data) +{ + struct qtnf_pcie_pearl_state *ps = (void *)data; + + qtnf_pearl_data_tx_reclaim(ps); + qtnf_en_txdone_irq(ps); +} + +static int qtnf_pearl_check_chip_id(struct qtnf_pcie_pearl_state *ps) +{ + unsigned int chipid; + + chipid = qtnf_chip_id_get(ps->base.sysctl_bar); + + switch (chipid) { + case QTN_CHIP_ID_PEARL: + case QTN_CHIP_ID_PEARL_B: + case QTN_CHIP_ID_PEARL_C: + pr_info("chip ID is 0x%x\n", chipid); + break; + default: + pr_err("incorrect chip ID 0x%x\n", chipid); + return -ENODEV; + } + + return 0; +} + +static int qtnf_pcie_pearl_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct qtnf_shm_ipc_int ipc_int; + struct qtnf_pcie_pearl_state *ps; + struct qtnf_bus *bus; + int ret; + u64 dma_mask; + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + dma_mask = DMA_BIT_MASK(64); +#else + dma_mask = DMA_BIT_MASK(32); +#endif + + ret = qtnf_pcie_probe(pdev, sizeof(*ps), &qtnf_pcie_pearl_bus_ops, + dma_mask, use_msi); + if (ret) + return ret; + + bus = pci_get_drvdata(pdev); + ps = get_bus_priv(bus); + + spin_lock_init(&ps->irq_lock); + + tasklet_init(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn, + (unsigned long)ps); + netif_napi_add(&bus->mux_dev, &bus->mux_napi, + qtnf_pcie_pearl_rx_poll, 10); + INIT_WORK(&bus->fw_work, qtnf_pearl_fw_work_handler); + + ps->pcie_reg_base = ps->base.dmareg_bar; + ps->bda = ps->base.epmem_bar; + writel(ps->base.msi_enabled, &ps->bda->bda_rc_msi_enabled); + + ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int; + ipc_int.arg = ps; + qtnf_pcie_init_shm_ipc(&ps->base, &ps->bda->bda_shm_reg1, + &ps->bda->bda_shm_reg2, &ipc_int); + + ret = qtnf_pearl_check_chip_id(ps); + if (ret) + goto error; + + ret = qtnf_pcie_pearl_init_xfer(ps); + if (ret) { + pr_err("PCIE xfer init failed\n"); + goto error; + } + + /* init default irq settings */ + qtnf_init_hdp_irqs(ps); + + /* start with disabled irqs */ + qtnf_disable_hdp_irqs(ps); + + ret = devm_request_irq(&pdev->dev, pdev->irq, + &qtnf_pcie_pearl_interrupt, 0, + "qtnf_pcie_irq", (void *)bus); + if (ret) { + pr_err("failed to request pcie irq %d\n", pdev->irq); + goto err_xfer; + } + + qtnf_pcie_bringup_fw_async(bus); + + return 0; + +err_xfer: + qtnf_pearl_free_xfer_buffers(ps); +error: + qtnf_pcie_remove(bus, &ps->base); + + return ret; +} + +static void qtnf_pcie_pearl_remove(struct pci_dev *pdev) +{ + struct qtnf_pcie_pearl_state *ps; + struct qtnf_bus *bus; + + bus = pci_get_drvdata(pdev); + if (!bus) + return; + + ps = get_bus_priv(bus); + + qtnf_pcie_remove(bus, &ps->base); + qtnf_pearl_reset_ep(ps); + qtnf_pearl_free_xfer_buffers(ps); +} + +#ifdef CONFIG_PM_SLEEP +static int qtnf_pcie_pearl_suspend(struct device *dev) +{ + return -EOPNOTSUPP; +} + +static int qtnf_pcie_pearl_resume(struct device *dev) +{ + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_SLEEP +/* Power Management Hooks */ +static SIMPLE_DEV_PM_OPS(qtnf_pcie_pearl_pm_ops, qtnf_pcie_pearl_suspend, + qtnf_pcie_pearl_resume); +#endif + +static const struct pci_device_id qtnf_pcie_devid_table[] = { + { + PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + }, + { }, +}; + +MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table); + +static struct pci_driver qtnf_pcie_pearl_drv_data = { + .name = DRV_NAME, + .id_table = qtnf_pcie_devid_table, + .probe = qtnf_pcie_pearl_probe, + .remove = qtnf_pcie_pearl_remove, +#ifdef CONFIG_PM_SLEEP + .driver = { + .pm = &qtnf_pcie_pearl_pm_ops, + }, +#endif +}; + +static int __init qtnf_pcie_pearl_register(void) +{ + pr_info("register Quantenna QSR10g FullMAC PCIE driver\n"); + return pci_register_driver(&qtnf_pcie_pearl_drv_data); +} + +static void __exit qtnf_pcie_pearl_exit(void) +{ + pr_info("unregister Quantenna QSR10g FullMAC PCIE driver\n"); + pci_unregister_driver(&qtnf_pcie_pearl_drv_data); +} + +module_init(qtnf_pcie_pearl_register); +module_exit(qtnf_pcie_pearl_exit); + +MODULE_AUTHOR("Quantenna Communications"); +MODULE_DESCRIPTION("Quantenna QSR10g PCIe bus driver for 802.11 wireless LAN."); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h index 00bb21a1c47a..f21e97ede090 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h @@ -43,11 +43,6 @@ #define QTN_RC_FW_LOADRDY BIT(8) #define QTN_RC_FW_SYNC BIT(9) -/* state transition timeouts */ -#define QTN_FW_DL_TIMEOUT_MS 3000 -#define QTN_FW_QLINK_TIMEOUT_MS 30000 -#define QTN_EP_RESET_WAIT_MS 1000 - #define PCIE_HDP_INT_RX_BITS (0 \ | PCIE_HDP_INT_EP_TXDMA \ | PCIE_HDP_INT_EP_TXEMPTY \ @@ -68,17 +63,11 @@ #define QTN_HOST_ADDR(h, l) ((u32)l) #endif -#define QTN_SYSCTL_BAR 0 -#define QTN_SHMEM_BAR 2 -#define QTN_DMA_BAR 3 - #define QTN_PCIE_BDA_VERSION 0x1002 #define PCIE_BDA_NAMELEN 32 #define PCIE_HHBM_MAX_SIZE 2048 -#define SKB_BUF_SIZE 2048 - #define QTN_PCIE_BOARDFLG "PCIEQTN" #define QTN_PCIE_FW_DLMASK 0xF #define QTN_PCIE_FW_BUFSZ 2048 @@ -101,44 +90,6 @@ enum qtnf_pcie_bda_ipc_flags { QTN_PCIE_IPC_FLAG_SHM_PIO = BIT(1), }; -struct qtnf_pcie_bda { - __le16 bda_len; - __le16 bda_version; - __le32 bda_pci_endian; - __le32 bda_ep_state; - __le32 bda_rc_state; - __le32 bda_dma_mask; - __le32 bda_msi_addr; - __le32 bda_flashsz; - u8 bda_boardname[PCIE_BDA_NAMELEN]; - __le32 bda_rc_msi_enabled; - u8 bda_hhbm_list[PCIE_HHBM_MAX_SIZE]; - __le32 bda_dsbw_start_index; - __le32 bda_dsbw_end_index; - __le32 bda_dsbw_total_bytes; - __le32 bda_rc_tx_bd_base; - __le32 bda_rc_tx_bd_num; - u8 bda_pcie_mac[QTN_ENET_ADDR_LENGTH]; - struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096); /* host TX */ - struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096); /* host RX */ -} __packed; - -struct qtnf_tx_bd { - __le32 addr; - __le32 addr_h; - __le32 info; - __le32 info_h; -} __packed; - -struct qtnf_rx_bd { - __le32 addr; - __le32 addr_h; - __le32 info; - __le32 info_h; - __le32 next_ptr; - __le32 next_ptr_h; -} __packed; - enum qtnf_fw_loadtype { QTN_FW_DBEGIN, QTN_FW_DSUB, @@ -146,13 +97,4 @@ enum qtnf_fw_loadtype { QTN_FW_CTRL }; -struct qtnf_pcie_fw_hdr { - u8 boardflg[8]; - __le32 fwsize; - __le32 seqnum; - __le32 type; - __le32 pktlen; - __le32 crc; -} __packed; - #endif /* _QTN_FMAC_PCIE_IPC_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h index 0bfe285b6b48..0bfe285b6b48 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c deleted file mode 100644 index 3120d49df565..000000000000 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ /dev/null @@ -1,1494 +0,0 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/firmware.h> -#include <linux/pci.h> -#include <linux/vmalloc.h> -#include <linux/delay.h> -#include <linux/interrupt.h> -#include <linux/sched.h> -#include <linux/completion.h> -#include <linux/crc32.h> -#include <linux/spinlock.h> -#include <linux/circ_buf.h> -#include <linux/log2.h> - -#include "qtn_hw_ids.h" -#include "pcie_bus_priv.h" -#include "core.h" -#include "bus.h" -#include "debug.h" - -static bool use_msi = true; -module_param(use_msi, bool, 0644); -MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt"); - -static unsigned int tx_bd_size_param = 32; -module_param(tx_bd_size_param, uint, 0644); -MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size, power of two"); - -static unsigned int rx_bd_size_param = 256; -module_param(rx_bd_size_param, uint, 0644); -MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size, power of two"); - -static u8 flashboot = 1; -module_param(flashboot, byte, 0644); -MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS"); - -#define DRV_NAME "qtnfmac_pearl_pcie" - -static inline void qtnf_non_posted_write(u32 val, void __iomem *basereg) -{ - writel(val, basereg); - - /* flush posted write */ - readl(basereg); -} - -static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_bus_priv *priv) -{ - unsigned long flags; - - spin_lock_irqsave(&priv->irq_lock, flags); - priv->pcie_irq_mask = (PCIE_HDP_INT_RX_BITS | PCIE_HDP_INT_TX_BITS); - spin_unlock_irqrestore(&priv->irq_lock, flags); -} - -static inline void qtnf_enable_hdp_irqs(struct qtnf_pcie_bus_priv *priv) -{ - unsigned long flags; - - spin_lock_irqsave(&priv->irq_lock, flags); - writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base)); - spin_unlock_irqrestore(&priv->irq_lock, flags); -} - -static inline void qtnf_disable_hdp_irqs(struct qtnf_pcie_bus_priv *priv) -{ - unsigned long flags; - - spin_lock_irqsave(&priv->irq_lock, flags); - writel(0x0, PCIE_HDP_INT_EN(priv->pcie_reg_base)); - spin_unlock_irqrestore(&priv->irq_lock, flags); -} - -static inline void qtnf_en_rxdone_irq(struct qtnf_pcie_bus_priv *priv) -{ - unsigned long flags; - - spin_lock_irqsave(&priv->irq_lock, flags); - priv->pcie_irq_mask |= PCIE_HDP_INT_RX_BITS; - writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base)); - spin_unlock_irqrestore(&priv->irq_lock, flags); -} - -static inline void qtnf_dis_rxdone_irq(struct qtnf_pcie_bus_priv *priv) -{ - unsigned long flags; - - spin_lock_irqsave(&priv->irq_lock, flags); - priv->pcie_irq_mask &= ~PCIE_HDP_INT_RX_BITS; - writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base)); - spin_unlock_irqrestore(&priv->irq_lock, flags); -} - -static inline void qtnf_en_txdone_irq(struct qtnf_pcie_bus_priv *priv) -{ - unsigned long flags; - - spin_lock_irqsave(&priv->irq_lock, flags); - priv->pcie_irq_mask |= PCIE_HDP_INT_TX_BITS; - writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base)); - spin_unlock_irqrestore(&priv->irq_lock, flags); -} - -static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_bus_priv *priv) -{ - unsigned long flags; - - spin_lock_irqsave(&priv->irq_lock, flags); - priv->pcie_irq_mask &= ~PCIE_HDP_INT_TX_BITS; - writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base)); - spin_unlock_irqrestore(&priv->irq_lock, flags); -} - -static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv) -{ - struct pci_dev *pdev = priv->pdev; - - /* fall back to legacy INTx interrupts by default */ - priv->msi_enabled = 0; - - /* check if MSI capability is available */ - if (use_msi) { - if (!pci_enable_msi(pdev)) { - pr_debug("MSI interrupt enabled\n"); - priv->msi_enabled = 1; - } else { - pr_warn("failed to enable MSI interrupts"); - } - } - - if (!priv->msi_enabled) { - pr_warn("legacy PCIE interrupts enabled\n"); - pci_intx(pdev, 1); - } -} - -static void qtnf_deassert_intx(struct qtnf_pcie_bus_priv *priv) -{ - void __iomem *reg = priv->sysctl_bar + PEARL_PCIE_CFG0_OFFSET; - u32 cfg; - - cfg = readl(reg); - cfg &= ~PEARL_ASSERT_INTX; - qtnf_non_posted_write(cfg, reg); -} - -static void qtnf_reset_card(struct qtnf_pcie_bus_priv *priv) -{ - const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_EP_RESET); - void __iomem *reg = priv->sysctl_bar + - QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET; - - qtnf_non_posted_write(data, reg); - msleep(QTN_EP_RESET_WAIT_MS); - pci_restore_state(priv->pdev); -} - -static void qtnf_ipc_gen_ep_int(void *arg) -{ - const struct qtnf_pcie_bus_priv *priv = arg; - const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_IPC_IRQ); - void __iomem *reg = priv->sysctl_bar + - QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET; - - qtnf_non_posted_write(data, reg); -} - -static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index) -{ - void __iomem *vaddr; - dma_addr_t busaddr; - size_t len; - int ret; - - ret = pcim_iomap_regions(priv->pdev, 1 << index, DRV_NAME); - if (ret) - return IOMEM_ERR_PTR(ret); - - busaddr = pci_resource_start(priv->pdev, index); - len = pci_resource_len(priv->pdev, index); - vaddr = pcim_iomap_table(priv->pdev)[index]; - if (!vaddr) - return IOMEM_ERR_PTR(-ENOMEM); - - pr_debug("BAR%u vaddr=0x%p busaddr=%pad len=%u\n", - index, vaddr, &busaddr, (int)len); - - return vaddr; -} - -static void qtnf_pcie_control_rx_callback(void *arg, const u8 *buf, size_t len) -{ - struct qtnf_pcie_bus_priv *priv = arg; - struct qtnf_bus *bus = pci_get_drvdata(priv->pdev); - struct sk_buff *skb; - - if (unlikely(len == 0)) { - pr_warn("zero length packet received\n"); - return; - } - - skb = __dev_alloc_skb(len, GFP_KERNEL); - - if (unlikely(!skb)) { - pr_err("failed to allocate skb\n"); - return; - } - - skb_put_data(skb, buf, len); - - qtnf_trans_handle_rx_ctl_packet(bus, skb); -} - -static int qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv) -{ - struct qtnf_shm_ipc_region __iomem *ipc_tx_reg; - struct qtnf_shm_ipc_region __iomem *ipc_rx_reg; - const struct qtnf_shm_ipc_int ipc_int = { qtnf_ipc_gen_ep_int, priv }; - const struct qtnf_shm_ipc_rx_callback rx_callback = { - qtnf_pcie_control_rx_callback, priv }; - - ipc_tx_reg = &priv->bda->bda_shm_reg1; - ipc_rx_reg = &priv->bda->bda_shm_reg2; - - qtnf_shm_ipc_init(&priv->shm_ipc_ep_in, QTNF_SHM_IPC_OUTBOUND, - ipc_tx_reg, priv->workqueue, - &ipc_int, &rx_callback); - qtnf_shm_ipc_init(&priv->shm_ipc_ep_out, QTNF_SHM_IPC_INBOUND, - ipc_rx_reg, priv->workqueue, - &ipc_int, &rx_callback); - - return 0; -} - -static void qtnf_pcie_free_shm_ipc(struct qtnf_pcie_bus_priv *priv) -{ - qtnf_shm_ipc_free(&priv->shm_ipc_ep_in); - qtnf_shm_ipc_free(&priv->shm_ipc_ep_out); -} - -static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv) -{ - int ret = -ENOMEM; - - priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR); - if (IS_ERR(priv->sysctl_bar)) { - pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR); - return ret; - } - - priv->dmareg_bar = qtnf_map_bar(priv, QTN_DMA_BAR); - if (IS_ERR(priv->dmareg_bar)) { - pr_err("failed to map BAR%u\n", QTN_DMA_BAR); - return ret; - } - - priv->epmem_bar = qtnf_map_bar(priv, QTN_SHMEM_BAR); - if (IS_ERR(priv->epmem_bar)) { - pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR); - return ret; - } - - priv->pcie_reg_base = priv->dmareg_bar; - priv->bda = priv->epmem_bar; - writel(priv->msi_enabled, &priv->bda->bda_rc_msi_enabled); - - return 0; -} - -static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv) -{ - struct pci_dev *pdev = priv->pdev; - struct pci_dev *parent; - int mps_p, mps_o, mps_m, mps; - int ret; - - /* current mps */ - mps_o = pcie_get_mps(pdev); - - /* maximum supported mps */ - mps_m = 128 << pdev->pcie_mpss; - - /* suggested new mps value */ - mps = mps_m; - - if (pdev->bus && pdev->bus->self) { - /* parent (bus) mps */ - parent = pdev->bus->self; - - if (pci_is_pcie(parent)) { - mps_p = pcie_get_mps(parent); - mps = min(mps_m, mps_p); - } - } - - ret = pcie_set_mps(pdev, mps); - if (ret) { - pr_err("failed to set mps to %d, keep using current %d\n", - mps, mps_o); - priv->mps = mps_o; - return; - } - - pr_debug("set mps to %d (was %d, max %d)\n", mps, mps_o, mps_m); - priv->mps = mps; -} - -static int qtnf_is_state(__le32 __iomem *reg, u32 state) -{ - u32 s = readl(reg); - - return s & state; -} - -static void qtnf_set_state(__le32 __iomem *reg, u32 state) -{ - u32 s = readl(reg); - - qtnf_non_posted_write(state | s, reg); -} - -static void qtnf_clear_state(__le32 __iomem *reg, u32 state) -{ - u32 s = readl(reg); - - qtnf_non_posted_write(s & ~state, reg); -} - -static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms) -{ - u32 timeout = 0; - - while ((qtnf_is_state(reg, state) == 0)) { - usleep_range(1000, 1200); - if (++timeout > delay_in_ms) - return -1; - } - - return 0; -} - -static int alloc_skb_array(struct qtnf_pcie_bus_priv *priv) -{ - struct sk_buff **vaddr; - int len; - - len = priv->tx_bd_num * sizeof(*priv->tx_skb) + - priv->rx_bd_num * sizeof(*priv->rx_skb); - vaddr = devm_kzalloc(&priv->pdev->dev, len, GFP_KERNEL); - - if (!vaddr) - return -ENOMEM; - - priv->tx_skb = vaddr; - - vaddr += priv->tx_bd_num; - priv->rx_skb = vaddr; - - return 0; -} - -static int alloc_bd_table(struct qtnf_pcie_bus_priv *priv) -{ - dma_addr_t paddr; - void *vaddr; - int len; - - len = priv->tx_bd_num * sizeof(struct qtnf_tx_bd) + - priv->rx_bd_num * sizeof(struct qtnf_rx_bd); - - vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL); - if (!vaddr) - return -ENOMEM; - - /* tx bd */ - - memset(vaddr, 0, len); - - priv->bd_table_vaddr = vaddr; - priv->bd_table_paddr = paddr; - priv->bd_table_len = len; - - priv->tx_bd_vbase = vaddr; - priv->tx_bd_pbase = paddr; - - pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); - - priv->tx_bd_r_index = 0; - priv->tx_bd_w_index = 0; - - /* rx bd */ - - vaddr = ((struct qtnf_tx_bd *)vaddr) + priv->tx_bd_num; - paddr += priv->tx_bd_num * sizeof(struct qtnf_tx_bd); - - priv->rx_bd_vbase = vaddr; - priv->rx_bd_pbase = paddr; - -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - writel(QTN_HOST_HI32(paddr), - PCIE_HDP_TX_HOST_Q_BASE_H(priv->pcie_reg_base)); -#endif - writel(QTN_HOST_LO32(paddr), - PCIE_HDP_TX_HOST_Q_BASE_L(priv->pcie_reg_base)); - writel(priv->rx_bd_num | (sizeof(struct qtnf_rx_bd)) << 16, - PCIE_HDP_TX_HOST_Q_SZ_CTRL(priv->pcie_reg_base)); - - pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); - - return 0; -} - -static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 index) -{ - struct qtnf_rx_bd *rxbd; - struct sk_buff *skb; - dma_addr_t paddr; - - skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC); - if (!skb) { - priv->rx_skb[index] = NULL; - return -ENOMEM; - } - - priv->rx_skb[index] = skb; - rxbd = &priv->rx_bd_vbase[index]; - - paddr = pci_map_single(priv->pdev, skb->data, - SKB_BUF_SIZE, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(priv->pdev, paddr)) { - pr_err("skb DMA mapping error: %pad\n", &paddr); - return -ENOMEM; - } - - /* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */ - rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr)); - rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr)); - rxbd->info = 0x0; - - priv->rx_bd_w_index = index; - - /* sync up all descriptor updates */ - wmb(); - -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - writel(QTN_HOST_HI32(paddr), - PCIE_HDP_HHBM_BUF_PTR_H(priv->pcie_reg_base)); -#endif - writel(QTN_HOST_LO32(paddr), - PCIE_HDP_HHBM_BUF_PTR(priv->pcie_reg_base)); - - writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base)); - return 0; -} - -static int alloc_rx_buffers(struct qtnf_pcie_bus_priv *priv) -{ - u16 i; - int ret = 0; - - memset(priv->rx_bd_vbase, 0x0, - priv->rx_bd_num * sizeof(struct qtnf_rx_bd)); - - for (i = 0; i < priv->rx_bd_num; i++) { - ret = skb2rbd_attach(priv, i); - if (ret) - break; - } - - return ret; -} - -/* all rx/tx activity should have ceased before calling this function */ -static void qtnf_free_xfer_buffers(struct qtnf_pcie_bus_priv *priv) -{ - struct qtnf_tx_bd *txbd; - struct qtnf_rx_bd *rxbd; - struct sk_buff *skb; - dma_addr_t paddr; - int i; - - /* free rx buffers */ - for (i = 0; i < priv->rx_bd_num; i++) { - if (priv->rx_skb && priv->rx_skb[i]) { - rxbd = &priv->rx_bd_vbase[i]; - skb = priv->rx_skb[i]; - paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h), - le32_to_cpu(rxbd->addr)); - pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE, - PCI_DMA_FROMDEVICE); - dev_kfree_skb_any(skb); - priv->rx_skb[i] = NULL; - } - } - - /* free tx buffers */ - for (i = 0; i < priv->tx_bd_num; i++) { - if (priv->tx_skb && priv->tx_skb[i]) { - txbd = &priv->tx_bd_vbase[i]; - skb = priv->tx_skb[i]; - paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h), - le32_to_cpu(txbd->addr)); - pci_unmap_single(priv->pdev, paddr, skb->len, - PCI_DMA_TODEVICE); - dev_kfree_skb_any(skb); - priv->tx_skb[i] = NULL; - } - } -} - -static int qtnf_hhbm_init(struct qtnf_pcie_bus_priv *priv) -{ - u32 val; - - val = readl(PCIE_HHBM_CONFIG(priv->pcie_reg_base)); - val |= HHBM_CONFIG_SOFT_RESET; - writel(val, PCIE_HHBM_CONFIG(priv->pcie_reg_base)); - usleep_range(50, 100); - val &= ~HHBM_CONFIG_SOFT_RESET; -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - val |= HHBM_64BIT; -#endif - writel(val, PCIE_HHBM_CONFIG(priv->pcie_reg_base)); - writel(priv->rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(priv->pcie_reg_base)); - - return 0; -} - -static int qtnf_pcie_init_xfer(struct qtnf_pcie_bus_priv *priv) -{ - int ret; - u32 val; - - priv->tx_bd_num = tx_bd_size_param; - priv->rx_bd_num = rx_bd_size_param; - priv->rx_bd_w_index = 0; - priv->rx_bd_r_index = 0; - - if (!priv->tx_bd_num || !is_power_of_2(priv->tx_bd_num)) { - pr_err("tx_bd_size_param %u is not power of two\n", - priv->tx_bd_num); - return -EINVAL; - } - - val = priv->tx_bd_num * sizeof(struct qtnf_tx_bd); - if (val > PCIE_HHBM_MAX_SIZE) { - pr_err("tx_bd_size_param %u is too large\n", - priv->tx_bd_num); - return -EINVAL; - } - - if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) { - pr_err("rx_bd_size_param %u is not power of two\n", - priv->rx_bd_num); - return -EINVAL; - } - - val = priv->rx_bd_num * sizeof(dma_addr_t); - if (val > PCIE_HHBM_MAX_SIZE) { - pr_err("rx_bd_size_param %u is too large\n", - priv->rx_bd_num); - return -EINVAL; - } - - ret = qtnf_hhbm_init(priv); - if (ret) { - pr_err("failed to init h/w queues\n"); - return ret; - } - - ret = alloc_skb_array(priv); - if (ret) { - pr_err("failed to allocate skb array\n"); - return ret; - } - - ret = alloc_bd_table(priv); - if (ret) { - pr_err("failed to allocate bd table\n"); - return ret; - } - - ret = alloc_rx_buffers(priv); - if (ret) { - pr_err("failed to allocate rx buffers\n"); - return ret; - } - - return ret; -} - -static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv) -{ - struct qtnf_tx_bd *txbd; - struct sk_buff *skb; - unsigned long flags; - dma_addr_t paddr; - u32 tx_done_index; - int count = 0; - int i; - - spin_lock_irqsave(&priv->tx_reclaim_lock, flags); - - tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base)) - & (priv->tx_bd_num - 1); - - i = priv->tx_bd_r_index; - - while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) { - skb = priv->tx_skb[i]; - if (likely(skb)) { - txbd = &priv->tx_bd_vbase[i]; - paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h), - le32_to_cpu(txbd->addr)); - pci_unmap_single(priv->pdev, paddr, skb->len, - PCI_DMA_TODEVICE); - - if (skb->dev) { - qtnf_update_tx_stats(skb->dev, skb); - if (unlikely(priv->tx_stopped)) { - qtnf_wake_all_queues(skb->dev); - priv->tx_stopped = 0; - } - } - - dev_kfree_skb_any(skb); - } - - priv->tx_skb[i] = NULL; - count++; - - if (++i >= priv->tx_bd_num) - i = 0; - } - - priv->tx_reclaim_done += count; - priv->tx_reclaim_req++; - priv->tx_bd_r_index = i; - - spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags); -} - -static int qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv) -{ - if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, - priv->tx_bd_num)) { - qtnf_pcie_data_tx_reclaim(priv); - - if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, - priv->tx_bd_num)) { - pr_warn_ratelimited("reclaim full Tx queue\n"); - priv->tx_full_count++; - return 0; - } - } - - return 1; -} - -static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) -{ - struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); - dma_addr_t txbd_paddr, skb_paddr; - struct qtnf_tx_bd *txbd; - unsigned long flags; - int len, i; - u32 info; - int ret = 0; - - spin_lock_irqsave(&priv->tx0_lock, flags); - - if (!qtnf_tx_queue_ready(priv)) { - if (skb->dev) { - netif_tx_stop_all_queues(skb->dev); - priv->tx_stopped = 1; - } - - spin_unlock_irqrestore(&priv->tx0_lock, flags); - return NETDEV_TX_BUSY; - } - - i = priv->tx_bd_w_index; - priv->tx_skb[i] = skb; - len = skb->len; - - skb_paddr = pci_map_single(priv->pdev, skb->data, - skb->len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(priv->pdev, skb_paddr)) { - pr_err("skb DMA mapping error: %pad\n", &skb_paddr); - ret = -ENOMEM; - goto tx_done; - } - - txbd = &priv->tx_bd_vbase[i]; - txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr)); - txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr)); - - info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT; - txbd->info = cpu_to_le32(info); - - /* sync up all descriptor updates before passing them to EP */ - dma_wmb(); - - /* write new TX descriptor to PCIE_RX_FIFO on EP */ - txbd_paddr = priv->tx_bd_pbase + i * sizeof(struct qtnf_tx_bd); - -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - writel(QTN_HOST_HI32(txbd_paddr), - PCIE_HDP_HOST_WR_DESC0_H(priv->pcie_reg_base)); -#endif - writel(QTN_HOST_LO32(txbd_paddr), - PCIE_HDP_HOST_WR_DESC0(priv->pcie_reg_base)); - - if (++i >= priv->tx_bd_num) - i = 0; - - priv->tx_bd_w_index = i; - -tx_done: - if (ret && skb) { - pr_err_ratelimited("drop skb\n"); - if (skb->dev) - skb->dev->stats.tx_dropped++; - dev_kfree_skb_any(skb); - } - - priv->tx_done_count++; - spin_unlock_irqrestore(&priv->tx0_lock, flags); - - qtnf_pcie_data_tx_reclaim(priv); - - return NETDEV_TX_OK; -} - -static int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb) -{ - struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); - int ret; - - ret = qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len); - - if (ret == -ETIMEDOUT) { - pr_err("EP firmware is dead\n"); - bus->fw_state = QTNF_FW_STATE_EP_DEAD; - } - - return ret; -} - -static irqreturn_t qtnf_interrupt(int irq, void *data) -{ - struct qtnf_bus *bus = (struct qtnf_bus *)data; - struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); - u32 status; - - priv->pcie_irq_count++; - status = readl(PCIE_HDP_INT_STATUS(priv->pcie_reg_base)); - - qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in); - qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out); - - if (!(status & priv->pcie_irq_mask)) - goto irq_done; - - if (status & PCIE_HDP_INT_RX_BITS) - priv->pcie_irq_rx_count++; - - if (status & PCIE_HDP_INT_TX_BITS) - priv->pcie_irq_tx_count++; - - if (status & PCIE_HDP_INT_HHBM_UF) - priv->pcie_irq_uf_count++; - - if (status & PCIE_HDP_INT_RX_BITS) { - qtnf_dis_rxdone_irq(priv); - napi_schedule(&bus->mux_napi); - } - - if (status & PCIE_HDP_INT_TX_BITS) { - qtnf_dis_txdone_irq(priv); - tasklet_hi_schedule(&priv->reclaim_tq); - } - -irq_done: - /* H/W workaround: clean all bits, not only enabled */ - qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(priv->pcie_reg_base)); - - if (!priv->msi_enabled) - qtnf_deassert_intx(priv); - - return IRQ_HANDLED; -} - -static int qtnf_rx_data_ready(struct qtnf_pcie_bus_priv *priv) -{ - u16 index = priv->rx_bd_r_index; - struct qtnf_rx_bd *rxbd; - u32 descw; - - rxbd = &priv->rx_bd_vbase[index]; - descw = le32_to_cpu(rxbd->info); - - if (descw & QTN_TXDONE_MASK) - return 1; - - return 0; -} - -static int qtnf_rx_poll(struct napi_struct *napi, int budget) -{ - struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi); - struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); - struct net_device *ndev = NULL; - struct sk_buff *skb = NULL; - int processed = 0; - struct qtnf_rx_bd *rxbd; - dma_addr_t skb_paddr; - int consume; - u32 descw; - u32 psize; - u16 r_idx; - u16 w_idx; - int ret; - - while (processed < budget) { - - - if (!qtnf_rx_data_ready(priv)) - goto rx_out; - - r_idx = priv->rx_bd_r_index; - rxbd = &priv->rx_bd_vbase[r_idx]; - descw = le32_to_cpu(rxbd->info); - - skb = priv->rx_skb[r_idx]; - psize = QTN_GET_LEN(descw); - consume = 1; - - if (!(descw & QTN_TXDONE_MASK)) { - pr_warn("skip invalid rxbd[%d]\n", r_idx); - consume = 0; - } - - if (!skb) { - pr_warn("skip missing rx_skb[%d]\n", r_idx); - consume = 0; - } - - if (skb && (skb_tailroom(skb) < psize)) { - pr_err("skip packet with invalid length: %u > %u\n", - psize, skb_tailroom(skb)); - consume = 0; - } - - if (skb) { - skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h), - le32_to_cpu(rxbd->addr)); - pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE, - PCI_DMA_FROMDEVICE); - } - - if (consume) { - skb_put(skb, psize); - ndev = qtnf_classify_skb(bus, skb); - if (likely(ndev)) { - qtnf_update_rx_stats(ndev, skb); - skb->protocol = eth_type_trans(skb, ndev); - napi_gro_receive(napi, skb); - } else { - pr_debug("drop untagged skb\n"); - bus->mux_dev.stats.rx_dropped++; - dev_kfree_skb_any(skb); - } - } else { - if (skb) { - bus->mux_dev.stats.rx_dropped++; - dev_kfree_skb_any(skb); - } - } - - priv->rx_skb[r_idx] = NULL; - if (++r_idx >= priv->rx_bd_num) - r_idx = 0; - - priv->rx_bd_r_index = r_idx; - - /* repalce processed buffer by a new one */ - w_idx = priv->rx_bd_w_index; - while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, - priv->rx_bd_num) > 0) { - if (++w_idx >= priv->rx_bd_num) - w_idx = 0; - - ret = skb2rbd_attach(priv, w_idx); - if (ret) { - pr_err("failed to allocate new rx_skb[%d]\n", - w_idx); - break; - } - } - - processed++; - } - -rx_out: - if (processed < budget) { - napi_complete(napi); - qtnf_en_rxdone_irq(priv); - } - - return processed; -} - -static void -qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev) -{ - struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); - - tasklet_hi_schedule(&priv->reclaim_tq); -} - -static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus) -{ - struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); - - qtnf_enable_hdp_irqs(priv); - napi_enable(&bus->mux_napi); -} - -static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus) -{ - struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); - - napi_disable(&bus->mux_napi); - qtnf_disable_hdp_irqs(priv); -} - -static const struct qtnf_bus_ops qtnf_pcie_bus_ops = { - /* control path methods */ - .control_tx = qtnf_pcie_control_tx, - - /* data path methods */ - .data_tx = qtnf_pcie_data_tx, - .data_tx_timeout = qtnf_pcie_data_tx_timeout, - .data_rx_start = qtnf_pcie_data_rx_start, - .data_rx_stop = qtnf_pcie_data_rx_stop, -}; - -static int qtnf_dbg_mps_show(struct seq_file *s, void *data) -{ - struct qtnf_bus *bus = dev_get_drvdata(s->private); - struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); - - seq_printf(s, "%d\n", priv->mps); - - return 0; -} - -static int qtnf_dbg_msi_show(struct seq_file *s, void *data) -{ - struct qtnf_bus *bus = dev_get_drvdata(s->private); - struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); - - seq_printf(s, "%u\n", priv->msi_enabled); - - return 0; -} - -static int qtnf_dbg_irq_stats(struct seq_file *s, void *data) -{ - struct qtnf_bus *bus = dev_get_drvdata(s->private); - struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); - u32 reg = readl(PCIE_HDP_INT_EN(priv->pcie_reg_base)); - u32 status; - - seq_printf(s, "pcie_irq_count(%u)\n", priv->pcie_irq_count); - seq_printf(s, "pcie_irq_tx_count(%u)\n", priv->pcie_irq_tx_count); - status = reg & PCIE_HDP_INT_TX_BITS; - seq_printf(s, "pcie_irq_tx_status(%s)\n", - (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS"); - seq_printf(s, "pcie_irq_rx_count(%u)\n", priv->pcie_irq_rx_count); - status = reg & PCIE_HDP_INT_RX_BITS; - seq_printf(s, "pcie_irq_rx_status(%s)\n", - (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS"); - seq_printf(s, "pcie_irq_uf_count(%u)\n", priv->pcie_irq_uf_count); - status = reg & PCIE_HDP_INT_HHBM_UF; - seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n", - (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS"); - - return 0; -} - -static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data) -{ - struct qtnf_bus *bus = dev_get_drvdata(s->private); - struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); - - seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count); - seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count); - seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done); - seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req); - - seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index); - seq_printf(s, "tx_bd_p_index(%u)\n", - readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base)) - & (priv->tx_bd_num - 1)); - seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index); - seq_printf(s, "tx queue len(%u)\n", - CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index, - priv->tx_bd_num)); - - seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index); - seq_printf(s, "rx_bd_p_index(%u)\n", - readl(PCIE_HDP_TX0DMA_CNT(priv->pcie_reg_base)) - & (priv->rx_bd_num - 1)); - seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index); - seq_printf(s, "rx alloc queue len(%u)\n", - CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, - priv->rx_bd_num)); - - return 0; -} - -static int qtnf_dbg_shm_stats(struct seq_file *s, void *data) -{ - struct qtnf_bus *bus = dev_get_drvdata(s->private); - struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); - - seq_printf(s, "shm_ipc_ep_in.tx_packet_count(%zu)\n", - priv->shm_ipc_ep_in.tx_packet_count); - seq_printf(s, "shm_ipc_ep_in.rx_packet_count(%zu)\n", - priv->shm_ipc_ep_in.rx_packet_count); - seq_printf(s, "shm_ipc_ep_out.tx_packet_count(%zu)\n", - priv->shm_ipc_ep_out.tx_timeout_count); - seq_printf(s, "shm_ipc_ep_out.rx_packet_count(%zu)\n", - priv->shm_ipc_ep_out.rx_packet_count); - - return 0; -} - -static int qtnf_ep_fw_send(struct qtnf_pcie_bus_priv *priv, uint32_t size, - int blk, const u8 *pblk, const u8 *fw) -{ - struct pci_dev *pdev = priv->pdev; - struct qtnf_bus *bus = pci_get_drvdata(pdev); - - struct qtnf_pcie_fw_hdr *hdr; - u8 *pdata; - - int hds = sizeof(*hdr); - struct sk_buff *skb = NULL; - int len = 0; - int ret; - - skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL); - if (!skb) - return -ENOMEM; - - skb->len = QTN_PCIE_FW_BUFSZ; - skb->dev = NULL; - - hdr = (struct qtnf_pcie_fw_hdr *)skb->data; - memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG)); - hdr->fwsize = cpu_to_le32(size); - hdr->seqnum = cpu_to_le32(blk); - - if (blk) - hdr->type = cpu_to_le32(QTN_FW_DSUB); - else - hdr->type = cpu_to_le32(QTN_FW_DBEGIN); - - pdata = skb->data + hds; - - len = QTN_PCIE_FW_BUFSZ - hds; - if (pblk >= (fw + size - len)) { - len = fw + size - pblk; - hdr->type = cpu_to_le32(QTN_FW_DEND); - } - - hdr->pktlen = cpu_to_le32(len); - memcpy(pdata, pblk, len); - hdr->crc = cpu_to_le32(~crc32(0, pdata, len)); - - ret = qtnf_pcie_data_tx(bus, skb); - - return (ret == NETDEV_TX_OK) ? len : 0; -} - -static int -qtnf_ep_fw_load(struct qtnf_pcie_bus_priv *priv, const u8 *fw, u32 fw_size) -{ - int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pcie_fw_hdr); - int blk_count = fw_size / blk_size + ((fw_size % blk_size) ? 1 : 0); - const u8 *pblk = fw; - int threshold = 0; - int blk = 0; - int len; - - pr_debug("FW upload started: fw_addr=0x%p size=%d\n", fw, fw_size); - - while (blk < blk_count) { - if (++threshold > 10000) { - pr_err("FW upload failed: too many retries\n"); - return -ETIMEDOUT; - } - - len = qtnf_ep_fw_send(priv, fw_size, blk, pblk, fw); - if (len <= 0) - continue; - - if (!((blk + 1) & QTN_PCIE_FW_DLMASK) || - (blk == (blk_count - 1))) { - qtnf_set_state(&priv->bda->bda_rc_state, - QTN_RC_FW_SYNC); - if (qtnf_poll_state(&priv->bda->bda_ep_state, - QTN_EP_FW_SYNC, - QTN_FW_DL_TIMEOUT_MS)) { - pr_err("FW upload failed: SYNC timed out\n"); - return -ETIMEDOUT; - } - - qtnf_clear_state(&priv->bda->bda_ep_state, - QTN_EP_FW_SYNC); - - if (qtnf_is_state(&priv->bda->bda_ep_state, - QTN_EP_FW_RETRY)) { - if (blk == (blk_count - 1)) { - int last_round = - blk_count & QTN_PCIE_FW_DLMASK; - blk -= last_round; - pblk -= ((last_round - 1) * - blk_size + len); - } else { - blk -= QTN_PCIE_FW_DLMASK; - pblk -= QTN_PCIE_FW_DLMASK * blk_size; - } - - qtnf_clear_state(&priv->bda->bda_ep_state, - QTN_EP_FW_RETRY); - - pr_warn("FW upload retry: block #%d\n", blk); - continue; - } - - qtnf_pcie_data_tx_reclaim(priv); - } - - pblk += len; - blk++; - } - - pr_debug("FW upload completed: totally sent %d blocks\n", blk); - return 0; -} - -static void qtnf_fw_work_handler(struct work_struct *work) -{ - struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work); - struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); - struct pci_dev *pdev = priv->pdev; - const struct firmware *fw; - int ret; - u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK; - - if (flashboot) { - state |= QTN_RC_FW_FLASHBOOT; - } else { - ret = request_firmware(&fw, bus->fwname, &pdev->dev); - if (ret < 0) { - pr_err("failed to get firmware %s\n", bus->fwname); - goto fw_load_fail; - } - } - - qtnf_set_state(&priv->bda->bda_rc_state, state); - - if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY, - QTN_FW_DL_TIMEOUT_MS)) { - pr_err("card is not ready\n"); - - if (!flashboot) - release_firmware(fw); - - goto fw_load_fail; - } - - qtnf_clear_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY); - - if (flashboot) { - pr_info("booting firmware from flash\n"); - } else { - pr_info("starting firmware upload: %s\n", bus->fwname); - - ret = qtnf_ep_fw_load(priv, fw->data, fw->size); - release_firmware(fw); - if (ret) { - pr_err("firmware upload error\n"); - goto fw_load_fail; - } - } - - if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_DONE, - QTN_FW_DL_TIMEOUT_MS)) { - pr_err("firmware bringup timed out\n"); - goto fw_load_fail; - } - - bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE; - pr_info("firmware is up and running\n"); - - if (qtnf_poll_state(&priv->bda->bda_ep_state, - QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) { - pr_err("firmware runtime failure\n"); - goto fw_load_fail; - } - - ret = qtnf_core_attach(bus); - if (ret) { - pr_err("failed to attach core\n"); - goto fw_load_fail; - } - - qtnf_debugfs_init(bus, DRV_NAME); - qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show); - qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show); - qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats); - qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats); - qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats); - - goto fw_load_exit; - -fw_load_fail: - bus->fw_state = QTNF_FW_STATE_DETACHED; - -fw_load_exit: - complete(&bus->firmware_init_complete); - put_device(&pdev->dev); -} - -static void qtnf_bringup_fw_async(struct qtnf_bus *bus) -{ - struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); - struct pci_dev *pdev = priv->pdev; - - get_device(&pdev->dev); - INIT_WORK(&bus->fw_work, qtnf_fw_work_handler); - schedule_work(&bus->fw_work); -} - -static void qtnf_reclaim_tasklet_fn(unsigned long data) -{ - struct qtnf_pcie_bus_priv *priv = (void *)data; - - qtnf_pcie_data_tx_reclaim(priv); - qtnf_en_txdone_irq(priv); -} - -static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct qtnf_pcie_bus_priv *pcie_priv; - struct qtnf_bus *bus; - int ret; - - bus = devm_kzalloc(&pdev->dev, - sizeof(*bus) + sizeof(*pcie_priv), GFP_KERNEL); - if (!bus) - return -ENOMEM; - - pcie_priv = get_bus_priv(bus); - - pci_set_drvdata(pdev, bus); - bus->bus_ops = &qtnf_pcie_bus_ops; - bus->dev = &pdev->dev; - bus->fw_state = QTNF_FW_STATE_RESET; - pcie_priv->pdev = pdev; - - strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME); - init_completion(&bus->firmware_init_complete); - mutex_init(&bus->bus_lock); - spin_lock_init(&pcie_priv->tx0_lock); - spin_lock_init(&pcie_priv->irq_lock); - spin_lock_init(&pcie_priv->tx_reclaim_lock); - - /* init stats */ - pcie_priv->tx_full_count = 0; - pcie_priv->tx_done_count = 0; - pcie_priv->pcie_irq_count = 0; - pcie_priv->pcie_irq_rx_count = 0; - pcie_priv->pcie_irq_tx_count = 0; - pcie_priv->pcie_irq_uf_count = 0; - pcie_priv->tx_reclaim_done = 0; - pcie_priv->tx_reclaim_req = 0; - - tasklet_init(&pcie_priv->reclaim_tq, qtnf_reclaim_tasklet_fn, - (unsigned long)pcie_priv); - - init_dummy_netdev(&bus->mux_dev); - netif_napi_add(&bus->mux_dev, &bus->mux_napi, - qtnf_rx_poll, 10); - - pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PEARL_PCIE"); - if (!pcie_priv->workqueue) { - pr_err("failed to alloc bus workqueue\n"); - ret = -ENODEV; - goto err_init; - } - - if (!pci_is_pcie(pdev)) { - pr_err("device %s is not PCI Express\n", pci_name(pdev)); - ret = -EIO; - goto err_base; - } - - qtnf_tune_pcie_mps(pcie_priv); - - ret = pcim_enable_device(pdev); - if (ret) { - pr_err("failed to init PCI device %x\n", pdev->device); - goto err_base; - } else { - pr_debug("successful init of PCI device %x\n", pdev->device); - } - -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); -#else - ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); -#endif - if (ret) { - pr_err("PCIE DMA coherent mask init failed\n"); - goto err_base; - } - - pci_set_master(pdev); - qtnf_pcie_init_irq(pcie_priv); - - ret = qtnf_pcie_init_memory(pcie_priv); - if (ret < 0) { - pr_err("PCIE memory init failed\n"); - goto err_base; - } - - pci_save_state(pdev); - - ret = qtnf_pcie_init_shm_ipc(pcie_priv); - if (ret < 0) { - pr_err("PCIE SHM IPC init failed\n"); - goto err_base; - } - - ret = qtnf_pcie_init_xfer(pcie_priv); - if (ret) { - pr_err("PCIE xfer init failed\n"); - goto err_ipc; - } - - /* init default irq settings */ - qtnf_init_hdp_irqs(pcie_priv); - - /* start with disabled irqs */ - qtnf_disable_hdp_irqs(pcie_priv); - - ret = devm_request_irq(&pdev->dev, pdev->irq, &qtnf_interrupt, 0, - "qtnf_pcie_irq", (void *)bus); - if (ret) { - pr_err("failed to request pcie irq %d\n", pdev->irq); - goto err_xfer; - } - - qtnf_bringup_fw_async(bus); - - return 0; - -err_xfer: - qtnf_free_xfer_buffers(pcie_priv); - -err_ipc: - qtnf_pcie_free_shm_ipc(pcie_priv); - -err_base: - flush_workqueue(pcie_priv->workqueue); - destroy_workqueue(pcie_priv->workqueue); - netif_napi_del(&bus->mux_napi); - -err_init: - tasklet_kill(&pcie_priv->reclaim_tq); - pci_set_drvdata(pdev, NULL); - - return ret; -} - -static void qtnf_pcie_remove(struct pci_dev *pdev) -{ - struct qtnf_pcie_bus_priv *priv; - struct qtnf_bus *bus; - - bus = pci_get_drvdata(pdev); - if (!bus) - return; - - wait_for_completion(&bus->firmware_init_complete); - - if (bus->fw_state == QTNF_FW_STATE_ACTIVE || - bus->fw_state == QTNF_FW_STATE_EP_DEAD) - qtnf_core_detach(bus); - - priv = get_bus_priv(bus); - - netif_napi_del(&bus->mux_napi); - flush_workqueue(priv->workqueue); - destroy_workqueue(priv->workqueue); - tasklet_kill(&priv->reclaim_tq); - - qtnf_free_xfer_buffers(priv); - qtnf_debugfs_remove(bus); - - qtnf_pcie_free_shm_ipc(priv); - qtnf_reset_card(priv); -} - -#ifdef CONFIG_PM_SLEEP -static int qtnf_pcie_suspend(struct device *dev) -{ - return -EOPNOTSUPP; -} - -static int qtnf_pcie_resume(struct device *dev) -{ - return 0; -} -#endif /* CONFIG_PM_SLEEP */ - -#ifdef CONFIG_PM_SLEEP -/* Power Management Hooks */ -static SIMPLE_DEV_PM_OPS(qtnf_pcie_pm_ops, qtnf_pcie_suspend, - qtnf_pcie_resume); -#endif - -static const struct pci_device_id qtnf_pcie_devid_table[] = { - { - PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, - }, - { }, -}; - -MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table); - -static struct pci_driver qtnf_pcie_drv_data = { - .name = DRV_NAME, - .id_table = qtnf_pcie_devid_table, - .probe = qtnf_pcie_probe, - .remove = qtnf_pcie_remove, -#ifdef CONFIG_PM_SLEEP - .driver = { - .pm = &qtnf_pcie_pm_ops, - }, -#endif -}; - -static int __init qtnf_pcie_register(void) -{ - pr_info("register Quantenna QSR10g FullMAC PCIE driver\n"); - return pci_register_driver(&qtnf_pcie_drv_data); -} - -static void __exit qtnf_pcie_exit(void) -{ - pr_info("unregister Quantenna QSR10g FullMAC PCIE driver\n"); - pci_unregister_driver(&qtnf_pcie_drv_data); -} - -module_init(qtnf_pcie_register); -module_exit(qtnf_pcie_exit); - -MODULE_AUTHOR("Quantenna Communications"); -MODULE_DESCRIPTION("Quantenna QSR10g PCIe bus driver for 802.11 wireless LAN."); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h deleted file mode 100644 index 397875a50fc2..000000000000 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef _QTN_FMAC_PCIE_H_ -#define _QTN_FMAC_PCIE_H_ - -#include <linux/dma-mapping.h> -#include <linux/io.h> - -#include "pcie_regs_pearl.h" -#include "pcie_ipc.h" -#include "shm_ipc.h" - -struct bus; - -struct qtnf_pcie_bus_priv { - struct pci_dev *pdev; - - /* lock for irq configuration changes */ - spinlock_t irq_lock; - - /* lock for tx reclaim operations */ - spinlock_t tx_reclaim_lock; - /* lock for tx0 operations */ - spinlock_t tx0_lock; - u8 msi_enabled; - u8 tx_stopped; - int mps; - - struct workqueue_struct *workqueue; - struct tasklet_struct reclaim_tq; - - void __iomem *sysctl_bar; - void __iomem *epmem_bar; - void __iomem *dmareg_bar; - - struct qtnf_shm_ipc shm_ipc_ep_in; - struct qtnf_shm_ipc shm_ipc_ep_out; - - struct qtnf_pcie_bda __iomem *bda; - void __iomem *pcie_reg_base; - - u16 tx_bd_num; - u16 rx_bd_num; - - struct sk_buff **tx_skb; - struct sk_buff **rx_skb; - - struct qtnf_tx_bd *tx_bd_vbase; - dma_addr_t tx_bd_pbase; - - struct qtnf_rx_bd *rx_bd_vbase; - dma_addr_t rx_bd_pbase; - - dma_addr_t bd_table_paddr; - void *bd_table_vaddr; - u32 bd_table_len; - - u32 rx_bd_w_index; - u32 rx_bd_r_index; - - u32 tx_bd_w_index; - u32 tx_bd_r_index; - - u32 pcie_irq_mask; - - /* diagnostics stats */ - u32 pcie_irq_count; - u32 pcie_irq_rx_count; - u32 pcie_irq_tx_count; - u32 pcie_irq_uf_count; - u32 tx_full_count; - u32 tx_done_count; - u32 tx_reclaim_done; - u32 tx_reclaim_req; -}; - -#endif /* _QTN_FMAC_PCIE_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 99d37e3efba6..8d62addea895 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -71,6 +71,7 @@ struct qlink_msg_header { * @QLINK_HW_CAPAB_DFS_OFFLOAD: device implements DFS offload functionality * @QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR: device supports MAC Address * Randomization in probe requests. + * @QLINK_HW_CAPAB_OBSS_SCAN: device can perform OBSS scanning. */ enum qlink_hw_capab { QLINK_HW_CAPAB_REG_UPDATE = BIT(0), @@ -78,6 +79,8 @@ enum qlink_hw_capab { QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2), QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3), QLINK_HW_CAPAB_PWR_MGMT = BIT(4), + QLINK_HW_CAPAB_OBSS_SCAN = BIT(5), + QLINK_HW_CAPAB_SCAN_DWELL = BIT(6), }; enum qlink_iface_type { @@ -1149,6 +1152,8 @@ enum qlink_tlv_id { QTN_TLV_ID_MAX_SCAN_SSIDS = 0x0409, QTN_TLV_ID_WOWLAN_CAPAB = 0x0410, QTN_TLV_ID_WOWLAN_PATTERN = 0x0411, + QTN_TLV_ID_SCAN_FLUSH = 0x0412, + QTN_TLV_ID_SCAN_DWELL = 0x0413, }; struct qlink_tlv_hdr { diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h index 54caeb38917c..960d5d97492f 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h @@ -40,6 +40,14 @@ static inline void qtnf_cmd_skb_put_tlv_arr(struct sk_buff *skb, memcpy(hdr->val, arr, arr_len); } +static inline void qtnf_cmd_skb_put_tlv_tag(struct sk_buff *skb, u16 tlv_id) +{ + struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr)); + + hdr->type = cpu_to_le16(tlv_id); + hdr->len = cpu_to_le16(0); +} + static inline void qtnf_cmd_skb_put_tlv_u8(struct sk_buff *skb, u16 tlv_id, u8 value) { diff --git a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h index c4ad40d59085..1fe798a9a667 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h @@ -25,8 +25,22 @@ #define PCIE_DEVICE_ID_QTN_PEARL (0x0008) +#define QTN_REG_SYS_CTRL_CSR 0x14 +#define QTN_CHIP_ID_MASK 0xF0 +#define QTN_CHIP_ID_TOPAZ 0x40 +#define QTN_CHIP_ID_PEARL 0x50 +#define QTN_CHIP_ID_PEARL_B 0x60 +#define QTN_CHIP_ID_PEARL_C 0x70 + /* FW names */ #define QTN_PCI_PEARL_FW_NAME "qtn/fmac_qsr10g.img" +static inline unsigned int qtnf_chip_id_get(const void __iomem *regs_base) +{ + u32 board_rev = readl(regs_base + QTN_REG_SYS_CTRL_CSR); + + return board_rev & QTN_CHIP_ID_MASK; +} + #endif /* _QTN_HW_IDS_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c index aa106dd0a14b..2ec334199c2b 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c +++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c @@ -42,19 +42,18 @@ static void qtnf_shm_handle_new_data(struct qtnf_shm_ipc *ipc) if (unlikely(size == 0 || size > QTN_IPC_MAX_DATA_SZ)) { pr_err("wrong rx packet size: %zu\n", size); rx_buff_ok = false; - } else { - memcpy_fromio(ipc->rx_data, ipc->shm_region->data, size); + } + + if (likely(rx_buff_ok)) { + ipc->rx_packet_count++; + ipc->rx_callback.fn(ipc->rx_callback.arg, + ipc->shm_region->data, size); } writel(QTNF_SHM_IPC_ACK, &shm_reg_hdr->flags); readl(&shm_reg_hdr->flags); /* flush PCIe write */ ipc->interrupt.fn(ipc->interrupt.arg); - - if (likely(rx_buff_ok)) { - ipc->rx_packet_count++; - ipc->rx_callback.fn(ipc->rx_callback.arg, ipc->rx_data, size); - } } static void qtnf_shm_ipc_irq_work(struct work_struct *work) diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h index 453dd6477b12..c2a3702a9ee7 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h +++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h @@ -32,7 +32,7 @@ struct qtnf_shm_ipc_int { }; struct qtnf_shm_ipc_rx_callback { - void (*fn)(void *arg, const u8 *buf, size_t len); + void (*fn)(void *arg, const u8 __iomem *buf, size_t len); void *arg; }; @@ -51,8 +51,6 @@ struct qtnf_shm_ipc { u8 waiting_for_ack; - u8 rx_data[QTN_IPC_MAX_DATA_SZ] __aligned(sizeof(u32)); - struct qtnf_shm_ipc_int interrupt; struct qtnf_shm_ipc_rx_callback rx_callback; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index a567bc273ffc..9e7b8933d30c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -957,6 +957,47 @@ static void rt2800_rate_from_status(struct skb_frame_desc *skbdesc, skbdesc->tx_rate_flags = flags; } +static bool rt2800_txdone_entry_check(struct queue_entry *entry, u32 reg) +{ + __le32 *txwi; + u32 word; + int wcid, ack, pid; + int tx_wcid, tx_ack, tx_pid, is_agg; + + /* + * This frames has returned with an IO error, + * so the status report is not intended for this + * frame. + */ + if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) + return false; + + wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); + ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); + pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); + is_agg = rt2x00_get_field32(reg, TX_STA_FIFO_TX_AGGRE); + + /* + * Validate if this TX status report is intended for + * this entry by comparing the WCID/ACK/PID fields. + */ + txwi = rt2800_drv_get_txwi(entry); + + word = rt2x00_desc_read(txwi, 1); + tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID); + tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK); + tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID); + + if (wcid != tx_wcid || ack != tx_ack || (!is_agg && pid != tx_pid)) { + rt2x00_dbg(entry->queue->rt2x00dev, + "TX status report missed for queue %d entry %d\n", + entry->queue->qid, entry->entry_idx); + return false; + } + + return true; +} + void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi, bool match) { @@ -1059,6 +1100,119 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi, } EXPORT_SYMBOL_GPL(rt2800_txdone_entry); +void rt2800_txdone(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + struct queue_entry *entry; + u32 reg; + u8 qid; + bool match; + + while (kfifo_get(&rt2x00dev->txstatus_fifo, ®)) { + /* + * TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus qid is + * guaranteed to be one of the TX QIDs . + */ + qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE); + queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); + + if (unlikely(rt2x00queue_empty(queue))) { + rt2x00_dbg(rt2x00dev, "Got TX status for an empty queue %u, dropping\n", + qid); + break; + } + + entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); + + if (unlikely(test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || + !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))) { + rt2x00_warn(rt2x00dev, "Data pending for entry %u in queue %u\n", + entry->entry_idx, qid); + break; + } + + match = rt2800_txdone_entry_check(entry, reg); + rt2800_txdone_entry(entry, reg, rt2800_drv_get_txwi(entry), match); + } +} +EXPORT_SYMBOL_GPL(rt2800_txdone); + +static inline bool rt2800_entry_txstatus_timeout(struct rt2x00_dev *rt2x00dev, + struct queue_entry *entry) +{ + bool ret; + unsigned long tout; + + if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) + return false; + + if (test_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags)) + tout = msecs_to_jiffies(50); + else + tout = msecs_to_jiffies(2000); + + ret = time_after(jiffies, entry->last_action + tout); + if (unlikely(ret)) + rt2x00_dbg(entry->queue->rt2x00dev, + "TX status timeout for entry %d in queue %d\n", + entry->entry_idx, entry->queue->qid); + return ret; +} + +bool rt2800_txstatus_timeout(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + struct queue_entry *entry; + + if (!test_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags)) { + unsigned long tout = msecs_to_jiffies(1000); + + if (time_before(jiffies, rt2x00dev->last_nostatus_check + tout)) + return false; + } + + rt2x00dev->last_nostatus_check = jiffies; + + tx_queue_for_each(rt2x00dev, queue) { + entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); + if (rt2800_entry_txstatus_timeout(rt2x00dev, entry)) + return true; + } + + return false; +} +EXPORT_SYMBOL_GPL(rt2800_txstatus_timeout); + +void rt2800_txdone_nostatus(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + struct queue_entry *entry; + + /* + * Process any trailing TX status reports for IO failures, + * we loop until we find the first non-IO error entry. This + * can either be a frame which is free, is being uploaded, + * or has completed the upload but didn't have an entry + * in the TX_STAT_FIFO register yet. + */ + tx_queue_for_each(rt2x00dev, queue) { + while (!rt2x00queue_empty(queue)) { + entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); + + if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || + !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) + break; + + if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags) || + rt2800_entry_txstatus_timeout(rt2x00dev, entry)) + rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); + else + break; + } + } +} +EXPORT_SYMBOL_GPL(rt2800_txdone_nostatus); + static unsigned int rt2800_hw_beacon_base(struct rt2x00_dev *rt2x00dev, unsigned int index) { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h index 51d9c2a932cc..0dff2c7b3010 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h @@ -195,6 +195,9 @@ void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *tx void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi, bool match); +void rt2800_txdone(struct rt2x00_dev *rt2x00dev); +void rt2800_txdone_nostatus(struct rt2x00_dev *rt2x00dev); +bool rt2800_txstatus_timeout(struct rt2x00_dev *rt2x00dev); void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc); void rt2800_clear_beacon(struct queue_entry *entry); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c index e1a7ed7e4892..ddb88cfeace2 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c @@ -175,161 +175,6 @@ static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev) rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); } -static bool rt2800mmio_txdone_entry_check(struct queue_entry *entry, u32 status) -{ - __le32 *txwi; - u32 word; - int wcid, tx_wcid; - - wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID); - - txwi = rt2800_drv_get_txwi(entry); - word = rt2x00_desc_read(txwi, 1); - tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID); - - return (tx_wcid == wcid); -} - -static bool rt2800mmio_txdone_find_entry(struct queue_entry *entry, void *data) -{ - u32 status = *(u32 *)data; - - /* - * rt2800pci hardware might reorder frames when exchanging traffic - * with multiple BA enabled STAs. - * - * For example, a tx queue - * [ STA1 | STA2 | STA1 | STA2 ] - * can result in tx status reports - * [ STA1 | STA1 | STA2 | STA2 ] - * when the hw decides to aggregate the frames for STA1 into one AMPDU. - * - * To mitigate this effect, associate the tx status to the first frame - * in the tx queue with a matching wcid. - */ - if (rt2800mmio_txdone_entry_check(entry, status) && - !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) { - /* - * Got a matching frame, associate the tx status with - * the frame - */ - entry->status = status; - set_bit(ENTRY_DATA_STATUS_SET, &entry->flags); - return true; - } - - /* Check the next frame */ - return false; -} - -static bool rt2800mmio_txdone_match_first(struct queue_entry *entry, void *data) -{ - u32 status = *(u32 *)data; - - /* - * Find the first frame without tx status and assign this status to it - * regardless if it matches or not. - */ - if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) { - /* - * Got a matching frame, associate the tx status with - * the frame - */ - entry->status = status; - set_bit(ENTRY_DATA_STATUS_SET, &entry->flags); - return true; - } - - /* Check the next frame */ - return false; -} -static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry, - void *data) -{ - if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) { - rt2800_txdone_entry(entry, entry->status, - rt2800mmio_get_txwi(entry), true); - return false; - } - - /* No more frames to release */ - return true; -} - -static bool rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev) -{ - struct data_queue *queue; - u32 status; - u8 qid; - int max_tx_done = 16; - - while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) { - qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE); - if (unlikely(qid >= QID_RX)) { - /* - * Unknown queue, this shouldn't happen. Just drop - * this tx status. - */ - rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n", - qid); - break; - } - - queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); - if (unlikely(queue == NULL)) { - /* - * The queue is NULL, this shouldn't happen. Stop - * processing here and drop the tx status - */ - rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n", - qid); - break; - } - - if (unlikely(rt2x00queue_empty(queue))) { - /* - * The queue is empty. Stop processing here - * and drop the tx status. - */ - rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n", - qid); - break; - } - - /* - * Let's associate this tx status with the first - * matching frame. - */ - if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, - Q_INDEX, &status, - rt2800mmio_txdone_find_entry)) { - /* - * We cannot match the tx status to any frame, so just - * use the first one. - */ - if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, - Q_INDEX, &status, - rt2800mmio_txdone_match_first)) { - rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n", - qid); - break; - } - } - - /* - * Release all frames with a valid tx status. - */ - rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, - Q_INDEX, NULL, - rt2800mmio_txdone_release_entries); - - if (--max_tx_done == 0) - break; - } - - return !max_tx_done; -} - static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev, struct rt2x00_field32 irq_field) { @@ -346,20 +191,6 @@ static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev, spin_unlock_irq(&rt2x00dev->irqmask_lock); } -void rt2800mmio_txstatus_tasklet(unsigned long data) -{ - struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; - if (rt2800mmio_txdone(rt2x00dev)) - tasklet_schedule(&rt2x00dev->txstatus_tasklet); - - /* - * No need to enable the tx status interrupt here as we always - * leave it enabled to minimize the possibility of a tx status - * register overflow. See comment in interrupt handler. - */ -} -EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet); - void rt2800mmio_pretbtt_tasklet(unsigned long data) { struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; @@ -424,12 +255,26 @@ void rt2800mmio_autowake_tasklet(unsigned long data) } EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet); -static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev) +static void rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev) +{ + bool timeout = false; + + while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) || + (timeout = rt2800_txstatus_timeout(rt2x00dev))) { + + rt2800_txdone(rt2x00dev); + + if (timeout) + rt2800_txdone_nostatus(rt2x00dev); + } +} + +static bool rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev) { u32 status; - int i; + bool more = false; - /* + /* FIXEME: rewrite this comment * The TX_FIFO_STATUS interrupt needs special care. We should * read TX_STA_FIFO but we should do it immediately as otherwise * the register can overflow and we would lose status reports. @@ -440,28 +285,36 @@ static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev) * because we can schedule the tasklet multiple times (when the * interrupt fires again during tx status processing). * - * Furthermore we don't disable the TX_FIFO_STATUS - * interrupt here but leave it enabled so that the TX_STA_FIFO - * can also be read while the tx status tasklet gets executed. - * - * Since we have only one producer and one consumer we don't + * txstatus tasklet is called with INT_SOURCE_CSR_TX_FIFO_STATUS + * disabled so have only one producer and one consumer - we don't * need to lock the kfifo. */ - for (i = 0; i < rt2x00dev->tx->limit; i++) { + while (!kfifo_is_full(&rt2x00dev->txstatus_fifo)) { status = rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO); - if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID)) break; - if (!kfifo_put(&rt2x00dev->txstatus_fifo, status)) { - rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n"); - break; - } + kfifo_put(&rt2x00dev->txstatus_fifo, status); + more = true; } - /* Schedule the tasklet for processing the tx status. */ - tasklet_schedule(&rt2x00dev->txstatus_tasklet); + return more; +} + +void rt2800mmio_txstatus_tasklet(unsigned long data) +{ + struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; + + do { + rt2800mmio_txdone(rt2x00dev); + + } while (rt2800mmio_fetch_txstatus(rt2x00dev)); + + if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + rt2800mmio_enable_interrupt(rt2x00dev, + INT_SOURCE_CSR_TX_FIFO_STATUS); } +EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet); irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance) { @@ -486,11 +339,8 @@ irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance) mask = ~reg; if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) { - rt2800mmio_txstatus_interrupt(rt2x00dev); - /* - * Never disable the TX_FIFO_STATUS interrupt. - */ - rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1); + rt2800mmio_fetch_txstatus(rt2x00dev); + tasklet_schedule(&rt2x00dev->txstatus_tasklet); } if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT)) @@ -616,6 +466,53 @@ void rt2800mmio_kick_queue(struct data_queue *queue) } EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue); +void rt2800mmio_flush_queue(struct data_queue *queue, bool drop) +{ + struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; + bool tx_queue = false; + unsigned int i; + + switch (queue->qid) { + case QID_AC_VO: + case QID_AC_VI: + case QID_AC_BE: + case QID_AC_BK: + tx_queue = true; + break; + case QID_RX: + break; + default: + return; + } + + for (i = 0; i < 5; i++) { + /* + * Check if the driver is already done, otherwise we + * have to sleep a little while to give the driver/hw + * the oppurtunity to complete interrupt process itself. + */ + if (rt2x00queue_empty(queue)) + break; + + /* + * For TX queues schedule completion tasklet to catch + * tx status timeouts, othewise just wait. + */ + if (tx_queue) { + tasklet_disable(&rt2x00dev->txstatus_tasklet); + rt2800mmio_txdone(rt2x00dev); + tasklet_enable(&rt2x00dev->txstatus_tasklet); + } + + /* + * Wait for a little while to give the driver + * the oppurtunity to recover itself. + */ + msleep(50); + } +} +EXPORT_SYMBOL_GPL(rt2800mmio_flush_queue); + void rt2800mmio_stop_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h index b63312ce3f27..3a513273f414 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h @@ -148,6 +148,7 @@ void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev, /* Queue handlers */ void rt2800mmio_start_queue(struct data_queue *queue); void rt2800mmio_kick_queue(struct data_queue *queue); +void rt2800mmio_flush_queue(struct data_queue *queue, bool drop); void rt2800mmio_stop_queue(struct data_queue *queue); void rt2800mmio_queue_init(struct data_queue *queue); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c index 71b1affc3885..0291441ac548 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c @@ -364,7 +364,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { .start_queue = rt2800mmio_start_queue, .kick_queue = rt2800mmio_kick_queue, .stop_queue = rt2800mmio_stop_queue, - .flush_queue = rt2x00mmio_flush_queue, + .flush_queue = rt2800mmio_flush_queue, .write_tx_desc = rt2800mmio_write_tx_desc, .write_tx_data = rt2800_write_tx_data, .write_beacon = rt2800_write_beacon, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index 98a7313fea4a..19eabf16147b 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -116,35 +116,6 @@ static bool rt2800usb_txstatus_pending(struct rt2x00_dev *rt2x00dev) return false; } -static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry) -{ - bool tout; - - if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) - return false; - - tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(500)); - if (unlikely(tout)) - rt2x00_dbg(entry->queue->rt2x00dev, - "TX status timeout for entry %d in queue %d\n", - entry->entry_idx, entry->queue->qid); - return tout; - -} - -static bool rt2800usb_txstatus_timeout(struct rt2x00_dev *rt2x00dev) -{ - struct data_queue *queue; - struct queue_entry *entry; - - tx_queue_for_each(rt2x00dev, queue) { - entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); - if (rt2800usb_entry_txstatus_timeout(entry)) - return true; - } - return false; -} - #define TXSTATUS_READ_INTERVAL 1000000 static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev, @@ -171,7 +142,7 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev, } /* Check if there is any entry that timedout waiting on TX status */ - if (rt2800usb_txstatus_timeout(rt2x00dev)) + if (rt2800_txstatus_timeout(rt2x00dev)) queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); if (rt2800usb_txstatus_pending(rt2x00dev)) { @@ -501,123 +472,17 @@ static int rt2800usb_get_tx_data_len(struct queue_entry *entry) /* * TX control handlers */ -static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg) -{ - __le32 *txwi; - u32 word; - int wcid, ack, pid; - int tx_wcid, tx_ack, tx_pid, is_agg; - - /* - * This frames has returned with an IO error, - * so the status report is not intended for this - * frame. - */ - if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) - return false; - - wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); - ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); - pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); - is_agg = rt2x00_get_field32(reg, TX_STA_FIFO_TX_AGGRE); - - /* - * Validate if this TX status report is intended for - * this entry by comparing the WCID/ACK/PID fields. - */ - txwi = rt2800usb_get_txwi(entry); - - word = rt2x00_desc_read(txwi, 1); - tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID); - tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK); - tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID); - - if (wcid != tx_wcid || ack != tx_ack || (!is_agg && pid != tx_pid)) { - rt2x00_dbg(entry->queue->rt2x00dev, - "TX status report missed for queue %d entry %d\n", - entry->queue->qid, entry->entry_idx); - return false; - } - - return true; -} - -static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev) -{ - struct data_queue *queue; - struct queue_entry *entry; - u32 reg; - u8 qid; - bool match; - - while (kfifo_get(&rt2x00dev->txstatus_fifo, ®)) { - /* - * TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus qid is - * guaranteed to be one of the TX QIDs . - */ - qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE); - queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); - - if (unlikely(rt2x00queue_empty(queue))) { - rt2x00_dbg(rt2x00dev, "Got TX status for an empty queue %u, dropping\n", - qid); - break; - } - - entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); - - if (unlikely(test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || - !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))) { - rt2x00_warn(rt2x00dev, "Data pending for entry %u in queue %u\n", - entry->entry_idx, qid); - break; - } - - match = rt2800usb_txdone_entry_check(entry, reg); - rt2800_txdone_entry(entry, reg, rt2800usb_get_txwi(entry), match); - } -} - -static void rt2800usb_txdone_nostatus(struct rt2x00_dev *rt2x00dev) -{ - struct data_queue *queue; - struct queue_entry *entry; - - /* - * Process any trailing TX status reports for IO failures, - * we loop until we find the first non-IO error entry. This - * can either be a frame which is free, is being uploaded, - * or has completed the upload but didn't have an entry - * in the TX_STAT_FIFO register yet. - */ - tx_queue_for_each(rt2x00dev, queue) { - while (!rt2x00queue_empty(queue)) { - entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); - - if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || - !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) - break; - - if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags) || - rt2800usb_entry_txstatus_timeout(entry)) - rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); - else - break; - } - } -} - static void rt2800usb_work_txdone(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, txdone_work); while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) || - rt2800usb_txstatus_timeout(rt2x00dev)) { + rt2800_txstatus_timeout(rt2x00dev)) { - rt2800usb_txdone(rt2x00dev); + rt2800_txdone(rt2x00dev); - rt2800usb_txdone_nostatus(rt2x00dev); + rt2800_txdone_nostatus(rt2x00dev); /* * The hw may delay sending the packet after DMA complete diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index a279a4363bc1..4b1744e9fb78 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -665,6 +665,7 @@ enum rt2x00_state_flags { DEVICE_STATE_STARTED, DEVICE_STATE_ENABLED_RADIO, DEVICE_STATE_SCANNING, + DEVICE_STATE_FLUSHING, /* * Driver configuration @@ -980,6 +981,8 @@ struct rt2x00_dev { */ DECLARE_KFIFO_PTR(txstatus_fifo, u32); + unsigned long last_nostatus_check; + /* * Timer to ensure tx status reports are read (rt2800usb). */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c index acc399b5574e..61ba573e8bf1 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c @@ -464,11 +464,7 @@ static ssize_t rt2x00debug_read_##__name(struct file *file, \ \ size = sprintf(line, __format, value); \ \ - if (copy_to_user(buf, line, size)) \ - return -EFAULT; \ - \ - *offset += size; \ - return size; \ + return simple_read_from_buffer(buf, length, offset, line, size); \ } #define RT2X00DEBUGFS_OPS_WRITE(__name, __type) \ @@ -545,11 +541,7 @@ static ssize_t rt2x00debug_read_dev_flags(struct file *file, size = sprintf(line, "0x%.8x\n", (unsigned int)intf->rt2x00dev->flags); - if (copy_to_user(buf, line, size)) - return -EFAULT; - - *offset += size; - return size; + return simple_read_from_buffer(buf, length, offset, line, size); } static const struct file_operations rt2x00debug_fop_dev_flags = { @@ -574,11 +566,7 @@ static ssize_t rt2x00debug_read_cap_flags(struct file *file, size = sprintf(line, "0x%.8x\n", (unsigned int)intf->rt2x00dev->cap_flags); - if (copy_to_user(buf, line, size)) - return -EFAULT; - - *offset += size; - return size; + return simple_read_from_buffer(buf, length, offset, line, size); } static const struct file_operations rt2x00debug_fop_cap_flags = { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index fa2fd64084ac..2825560e2424 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -720,8 +720,12 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return; + set_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags); + tx_queue_for_each(rt2x00dev, queue) rt2x00queue_flush_queue(queue, drop); + + clear_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags); } EXPORT_SYMBOL_GPL(rt2x00mac_flush); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index 710e9641552e..92ddc19e7bf7 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -113,6 +113,7 @@ int rt2x00queue_map_txskb(struct queue_entry *entry) return -ENOMEM; skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; + rt2x00lib_dmadone(entry); return 0; } EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); @@ -1038,6 +1039,7 @@ void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev) */ tx_queue_for_each(rt2x00dev, queue) rt2x00queue_start_queue(queue); + rt2x00dev->last_nostatus_check = jiffies; rt2x00queue_start_queue(rt2x00dev->rx); } diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c index 9a1d15b3ce45..cec37787ecf8 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c @@ -499,7 +499,7 @@ static void rtl8187b_status_cb(struct urb *urb) if (cmd_type == 1) { unsigned int pkt_rc, seq_no; bool tok; - struct sk_buff *skb; + struct sk_buff *skb, *iter; struct ieee80211_hdr *ieee80211hdr; unsigned long flags; @@ -508,8 +508,9 @@ static void rtl8187b_status_cb(struct urb *urb) seq_no = (val >> 16) & 0xFFF; spin_lock_irqsave(&priv->b_tx_status.queue.lock, flags); - skb_queue_reverse_walk(&priv->b_tx_status.queue, skb) { - ieee80211hdr = (struct ieee80211_hdr *)skb->data; + skb = NULL; + skb_queue_reverse_walk(&priv->b_tx_status.queue, iter) { + ieee80211hdr = (struct ieee80211_hdr *)iter->data; /* * While testing, it was discovered that the seq_no @@ -522,10 +523,12 @@ static void rtl8187b_status_cb(struct urb *urb) * it's unlikely we wrongly ack some sent data */ if ((le16_to_cpu(ieee80211hdr->seq_ctrl) - & 0xFFF) == seq_no) + & 0xFFF) == seq_no) { + skb = iter; break; + } } - if (skb != (struct sk_buff *) &priv->b_tx_status.queue) { + if (skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); __skb_unlink(skb, &priv->b_tx_status.queue); diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c index c2d5b495c179..c089540116fa 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c @@ -146,7 +146,7 @@ static int rtl8187_register_led(struct ieee80211_hw *dev, led->dev = dev; led->ledpin = ledpin; led->is_radio = is_radio; - strncpy(led->name, name, sizeof(led->name)); + strlcpy(led->name, name, sizeof(led->name)); led->led_dev.name = led->name; led->led_dev.default_trigger = default_trigger; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 505ab1b055ff..73f6fc0d4a01 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -6231,6 +6231,8 @@ static const struct usb_device_id dev_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3308, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, /* Currently untested 8188 series devices */ +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x018a, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8170, 0xff, 0xff, 0xff), diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index b026e80940a4..6fbf8845a2ab 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -1324,13 +1324,13 @@ bool exhalbtc_initlize_variables_wifi_only(struct rtl_priv *rtlpriv) switch (rtlpriv->rtlhal.interface) { case INTF_PCI: - wifionly_cfg->chip_interface = BTC_INTF_PCI; + wifionly_cfg->chip_interface = WIFIONLY_INTF_PCI; break; case INTF_USB: - wifionly_cfg->chip_interface = BTC_INTF_USB; + wifionly_cfg->chip_interface = WIFIONLY_INTF_USB; break; default: - wifionly_cfg->chip_interface = BTC_INTF_UNKNOWN; + wifionly_cfg->chip_interface = WIFIONLY_INTF_UNKNOWN; break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index 988d5ac57d02..cfc8762c55f4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -951,12 +951,8 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw) static void _rtl88ee_hw_configure(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 reg_bw_opmode; - u32 reg_ratr, reg_prsr; + u32 reg_prsr; - reg_bw_opmode = BW_OPMODE_20MHZ; - reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG | - RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS; reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG; rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c index 545115db507e..f783e4a8083d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c @@ -799,11 +799,9 @@ static void _rtl8723e_hw_configure(struct ieee80211_hw *hw) struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); u8 reg_bw_opmode; - u32 reg_ratr, reg_prsr; + u32 reg_prsr; reg_bw_opmode = BW_OPMODE_20MHZ; - reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG | - RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS; reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG; rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8); diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 0f3b98c5227f..87bc21bb5e8b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -1905,10 +1905,6 @@ struct rtl_efuse { u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE]; u16 efuse_usedbytes; u8 efuse_usedpercentage; -#ifdef EFUSE_REPG_WORKAROUND - bool efuse_re_pg_sec1flag; - u8 efuse_re_pg_data[8]; -#endif u8 autoload_failflag; u8 autoload_status; diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 01edf960ff3c..182b06629371 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -282,10 +282,8 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) struct rsi_hw *adapter = common->priv; struct ieee80211_vif *vif; struct ieee80211_tx_info *info; - struct skb_info *tx_params; struct ieee80211_bss_conf *bss; int status = -EINVAL; - u8 header_size; if (!skb) return 0; @@ -297,8 +295,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) goto err; vif = info->control.vif; bss = &vif->bss_conf; - tx_params = (struct skb_info *)info->driver_data; - header_size = tx_params->internal_hdr_size; if (((vif->type == NL80211_IFTYPE_STATION) || (vif->type == NL80211_IFTYPE_P2P_CLIENT)) && diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 4e510cbe0a89..e56fc83faf0e 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -924,7 +924,7 @@ static int rsi_hal_key_config(struct ieee80211_hw *hw, if (status) return status; - if (vif->type == NL80211_IFTYPE_STATION && key->key && + if (vif->type == NL80211_IFTYPE_STATION && (key->cipher == WLAN_CIPHER_SUITE_WEP104 || key->cipher == WLAN_CIPHER_SUITE_WEP40)) { if (!rsi_send_block_unblock_frame(adapter->priv, false)) diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index c0a163e40402..f360690396dd 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -266,15 +266,17 @@ static void rsi_rx_done_handler(struct urb *urb) if (urb->status) goto out; - if (urb->actual_length <= 0) { - rsi_dbg(INFO_ZONE, "%s: Zero length packet\n", __func__); + if (urb->actual_length <= 0 || + urb->actual_length > rx_cb->rx_skb->len) { + rsi_dbg(INFO_ZONE, "%s: Invalid packet length = %d\n", + __func__, urb->actual_length); goto out; } if (skb_queue_len(&dev->rx_q) >= RSI_MAX_RX_PKTS) { rsi_dbg(INFO_ZONE, "Max RX packets reached\n"); goto out; } - skb_put(rx_cb->rx_skb, urb->actual_length); + skb_trim(rx_cb->rx_skb, urb->actual_length); skb_queue_tail(&dev->rx_q, rx_cb->rx_skb); rsi_set_event(&dev->rx_thread.event); @@ -308,6 +310,7 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num) if (!skb) return -ENOMEM; skb_reserve(skb, MAX_DWORD_ALIGN_BYTES); + skb_put(skb, RSI_MAX_RX_USB_PKT_SIZE - MAX_DWORD_ALIGN_BYTES); dword_align_bytes = (unsigned long)skb->data & 0x3f; if (dword_align_bytes > 0) skb_push(skb, dword_align_bytes); @@ -319,7 +322,7 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num) usb_rcvbulkpipe(dev->usbdev, dev->bulkin_endpoint_addr[ep_num - 1]), urb->transfer_buffer, - RSI_MAX_RX_USB_PKT_SIZE, + skb->len, rsi_rx_done_handler, rx_cb); diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h index d9ff3b8be86e..60f1f286b030 100644 --- a/drivers/net/wireless/rsi/rsi_common.h +++ b/drivers/net/wireless/rsi/rsi_common.h @@ -75,7 +75,6 @@ static inline int rsi_kill_thread(struct rsi_thread *handle) atomic_inc(&handle->thread_done); rsi_set_event(&handle->event); - wait_for_completion(&handle->completion); return kthread_stop(handle->task); } diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c index f7b1b0062db3..8c800ef23159 100644 --- a/drivers/net/wireless/st/cw1200/txrx.c +++ b/drivers/net/wireless/st/cw1200/txrx.c @@ -624,9 +624,9 @@ cw1200_tx_h_bt(struct cw1200_common *priv, priority = WSM_EPTA_PRIORITY_ACTION; else if (ieee80211_is_mgmt(t->hdr->frame_control)) priority = WSM_EPTA_PRIORITY_MGT; - else if ((wsm->queue_id == WSM_QUEUE_VOICE)) + else if (wsm->queue_id == WSM_QUEUE_VOICE) priority = WSM_EPTA_PRIORITY_VOICE; - else if ((wsm->queue_id == WSM_QUEUE_VIDEO)) + else if (wsm->queue_id == WSM_QUEUE_VIDEO) priority = WSM_EPTA_PRIORITY_VIDEO; else priority = WSM_EPTA_PRIORITY_DATA; diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 89b0d0fade9f..26b187336875 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -27,6 +27,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/pm_runtime.h> +#include <linux/pm_wakeirq.h> #include "wlcore.h" #include "debug.h" @@ -957,6 +958,8 @@ static void wl1271_recovery_work(struct work_struct *work) BUG_ON(wl->conf.recovery.bug_on_recovery && !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); + clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); + if (wl->conf.recovery.no_recovery) { wl1271_info("No recovery (chosen on module load). Fw will remain stuck."); goto out_unlock; @@ -6625,13 +6628,25 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context) } #ifdef CONFIG_PM + device_init_wakeup(wl->dev, true); + ret = enable_irq_wake(wl->irq); if (!ret) { wl->irq_wake_enabled = true; - device_init_wakeup(wl->dev, 1); if (pdev_data->pwr_in_suspend) wl->hw->wiphy->wowlan = &wlcore_wowlan_support; } + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); + if (res) { + wl->wakeirq = res->start; + wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK; + ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq); + if (ret) + wl->wakeirq = -ENODEV; + } else { + wl->wakeirq = -ENODEV; + } #endif disable_irq(wl->irq); wl1271_power_off(wl); @@ -6659,6 +6674,9 @@ out_unreg: wl1271_unregister_hw(wl); out_irq: + if (wl->wakeirq >= 0) + dev_pm_clear_wake_irq(wl->dev); + device_init_wakeup(wl->dev, false); free_irq(wl->irq, wl); out_free_nvs: @@ -6710,6 +6728,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) int ret; unsigned long start_time = jiffies; bool pending = false; + bool recovery = false; /* Nothing to do if no ELP mode requested */ if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) @@ -6726,7 +6745,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); if (ret < 0) { - wl12xx_queue_recovery_work(wl); + recovery = true; goto err; } @@ -6734,11 +6753,12 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) ret = wait_for_completion_timeout(&compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT)); if (ret == 0) { - wl1271_error("ELP wakeup timeout!"); - wl12xx_queue_recovery_work(wl); + wl1271_warning("ELP wakeup timeout!"); /* Return no error for runtime PM for recovery */ - return 0; + ret = 0; + recovery = true; + goto err; } } @@ -6753,6 +6773,12 @@ err: spin_lock_irqsave(&wl->wl_lock, flags); wl->elp_compl = NULL; spin_unlock_irqrestore(&wl->wl_lock, flags); + + if (recovery) { + set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); + wl12xx_queue_recovery_work(wl); + } + return ret; } @@ -6815,10 +6841,16 @@ int wlcore_remove(struct platform_device *pdev) if (!wl->initialized) return 0; - if (wl->irq_wake_enabled) { - device_init_wakeup(wl->dev, 0); - disable_irq_wake(wl->irq); + if (wl->wakeirq >= 0) { + dev_pm_clear_wake_irq(wl->dev); + wl->wakeirq = -ENODEV; } + + device_init_wakeup(wl->dev, false); + + if (wl->irq_wake_enabled) + disable_irq_wake(wl->irq); + wl1271_unregister_hw(wl); pm_runtime_put_sync(wl->dev); diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 750bea3574ee..4c2154b9e6a3 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -241,7 +241,7 @@ static const struct of_device_id wlcore_sdio_of_match_table[] = { { } }; -static int wlcore_probe_of(struct device *dev, int *irq, +static int wlcore_probe_of(struct device *dev, int *irq, int *wakeirq, struct wlcore_platdev_data *pdev_data) { struct device_node *np = dev->of_node; @@ -259,6 +259,8 @@ static int wlcore_probe_of(struct device *dev, int *irq, return -EINVAL; } + *wakeirq = irq_of_parse_and_map(np, 1); + /* optional clock frequency params */ of_property_read_u32(np, "ref-clock-frequency", &pdev_data->ref_clock_freq); @@ -268,7 +270,7 @@ static int wlcore_probe_of(struct device *dev, int *irq, return 0; } #else -static int wlcore_probe_of(struct device *dev, int *irq, +static int wlcore_probe_of(struct device *dev, int *irq, int *wakeirq, struct wlcore_platdev_data *pdev_data) { return -ENODATA; @@ -280,10 +282,10 @@ static int wl1271_probe(struct sdio_func *func, { struct wlcore_platdev_data *pdev_data; struct wl12xx_sdio_glue *glue; - struct resource res[1]; + struct resource res[2]; mmc_pm_flag_t mmcflags; int ret = -ENOMEM; - int irq; + int irq, wakeirq; const char *chip_family; /* We are only able to handle the wlan function */ @@ -308,7 +310,7 @@ static int wl1271_probe(struct sdio_func *func, /* Use block mode for transferring over one block size of data */ func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; - ret = wlcore_probe_of(&func->dev, &irq, pdev_data); + ret = wlcore_probe_of(&func->dev, &irq, &wakeirq, pdev_data); if (ret) goto out; @@ -351,6 +353,11 @@ static int wl1271_probe(struct sdio_func *func, irqd_get_trigger_type(irq_get_irq_data(irq)); res[0].name = "irq"; + res[1].start = wakeirq; + res[1].flags = IORESOURCE_IRQ | + irqd_get_trigger_type(irq_get_irq_data(wakeirq)); + res[1].name = "wakeirq"; + ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); if (ret) { dev_err(glue->dev, "can't add resources\n"); diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index d4b1f66ef457..dd14850b0603 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -199,8 +199,10 @@ struct wl1271 { struct wl1271_if_operations *if_ops; int irq; + int wakeirq; int irq_flags; + int wakeirq_flags; spinlock_t wl_lock; diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c index 1f6d9f357e57..9ccd780695f0 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c @@ -235,7 +235,7 @@ void zd_mac_clear(struct zd_mac *mac) { flush_workqueue(zd_workqueue); zd_chip_clear(&mac->chip); - ZD_ASSERT(!spin_is_locked(&mac->lock)); + lockdep_assert_held(&mac->lock); ZD_MEMCLEAR(mac, sizeof(struct zd_mac)); } diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f6ae23fc3f6b..182d6770f102 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -166,7 +166,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, [skb_get_hash_raw(skb) % size]; } -static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 3621e05a7494..80aae3a32c2a 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1660,8 +1660,7 @@ module_init(netback_init); static void __exit netback_fini(void) { #ifdef CONFIG_DEBUG_FS - if (!IS_ERR_OR_NULL(xen_netback_dbg_root)) - debugfs_remove_recursive(xen_netback_dbg_root); + debugfs_remove_recursive(xen_netback_dbg_root); #endif /* CONFIG_DEBUG_FS */ xenvif_xenbus_fini(); } diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index cd51492ae6c2..fe1d52247bbe 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -254,8 +254,7 @@ static void xenvif_debugfs_delif(struct xenvif *vif) if (IS_ERR_OR_NULL(xen_netback_dbg_root)) return; - if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root)) - debugfs_remove_recursive(vif->xenvif_dbg_root); + debugfs_remove_recursive(vif->xenvif_dbg_root); vif->xenvif_dbg_root = NULL; } #endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 5c8d452e35e2..c89d3effd99d 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -48,6 +48,7 @@ source "drivers/phy/lantiq/Kconfig" source "drivers/phy/marvell/Kconfig" source "drivers/phy/mediatek/Kconfig" source "drivers/phy/motorola/Kconfig" +source "drivers/phy/mscc/Kconfig" source "drivers/phy/qualcomm/Kconfig" source "drivers/phy/ralink/Kconfig" source "drivers/phy/renesas/Kconfig" diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index 84e3bd9c5665..ce8339ff0022 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -18,6 +18,7 @@ obj-y += broadcom/ \ hisilicon/ \ marvell/ \ motorola/ \ + mscc/ \ qualcomm/ \ ralink/ \ samsung/ \ diff --git a/drivers/phy/mscc/Kconfig b/drivers/phy/mscc/Kconfig new file mode 100644 index 000000000000..2e2a466efd66 --- /dev/null +++ b/drivers/phy/mscc/Kconfig @@ -0,0 +1,11 @@ +# +# Phy drivers for Microsemi devices +# + +config PHY_OCELOT_SERDES + tristate "SerDes PHY driver for Microsemi Ocelot" + select GENERIC_PHY + depends on OF + depends on MFD_SYSCON + help + Enable this for supporting SerDes muxing with Microsemi Ocelot. diff --git a/drivers/phy/mscc/Makefile b/drivers/phy/mscc/Makefile new file mode 100644 index 000000000000..e14749170fc9 --- /dev/null +++ b/drivers/phy/mscc/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Microsemi phy drivers. +# + +obj-$(CONFIG_PHY_OCELOT_SERDES) := phy-ocelot-serdes.o diff --git a/drivers/phy/mscc/phy-ocelot-serdes.c b/drivers/phy/mscc/phy-ocelot-serdes.c new file mode 100644 index 000000000000..b2be54680cf7 --- /dev/null +++ b/drivers/phy/mscc/phy-ocelot-serdes.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * SerDes PHY driver for Microsemi Ocelot + * + * Copyright (c) 2018 Microsemi + * + */ + +#include <linux/err.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <soc/mscc/ocelot_hsio.h> +#include <dt-bindings/phy/phy-ocelot-serdes.h> + +struct serdes_ctrl { + struct regmap *regs; + struct device *dev; + struct phy *phys[SERDES_MAX]; +}; + +struct serdes_macro { + u8 idx; + /* Not used when in QSGMII or PCIe mode */ + int port; + struct serdes_ctrl *ctrl; +}; + +#define MCB_S1G_CFG_TIMEOUT 50 + +static int __serdes_write_mcb_s1g(struct regmap *regmap, u8 macro, u32 op) +{ + unsigned int regval; + + regmap_write(regmap, HSIO_MCB_S1G_ADDR_CFG, op | + HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(BIT(macro))); + + return regmap_read_poll_timeout(regmap, HSIO_MCB_S1G_ADDR_CFG, regval, + (regval & op) != op, 100, + MCB_S1G_CFG_TIMEOUT * 1000); +} + +static int serdes_commit_mcb_s1g(struct regmap *regmap, u8 macro) +{ + return __serdes_write_mcb_s1g(regmap, macro, + HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT); +} + +static int serdes_update_mcb_s1g(struct regmap *regmap, u8 macro) +{ + return __serdes_write_mcb_s1g(regmap, macro, + HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT); +} + +static int serdes_init_s1g(struct regmap *regmap, u8 serdes) +{ + int ret; + + ret = serdes_update_mcb_s1g(regmap, serdes); + if (ret) + return ret; + + regmap_update_bits(regmap, HSIO_S1G_COMMON_CFG, + HSIO_S1G_COMMON_CFG_SYS_RST | + HSIO_S1G_COMMON_CFG_ENA_LANE | + HSIO_S1G_COMMON_CFG_ENA_ELOOP | + HSIO_S1G_COMMON_CFG_ENA_FLOOP, + HSIO_S1G_COMMON_CFG_ENA_LANE); + + regmap_update_bits(regmap, HSIO_S1G_PLL_CFG, + HSIO_S1G_PLL_CFG_PLL_FSM_ENA | + HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M, + HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(200) | + HSIO_S1G_PLL_CFG_PLL_FSM_ENA); + + regmap_update_bits(regmap, HSIO_S1G_MISC_CFG, + HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA | + HSIO_S1G_MISC_CFG_LANE_RST, + HSIO_S1G_MISC_CFG_LANE_RST); + + ret = serdes_commit_mcb_s1g(regmap, serdes); + if (ret) + return ret; + + regmap_update_bits(regmap, HSIO_S1G_COMMON_CFG, + HSIO_S1G_COMMON_CFG_SYS_RST, + HSIO_S1G_COMMON_CFG_SYS_RST); + + regmap_update_bits(regmap, HSIO_S1G_MISC_CFG, + HSIO_S1G_MISC_CFG_LANE_RST, 0); + + ret = serdes_commit_mcb_s1g(regmap, serdes); + if (ret) + return ret; + + return 0; +} + +struct serdes_mux { + u8 idx; + u8 port; + enum phy_mode mode; + u32 mask; + u32 mux; +}; + +#define SERDES_MUX(_idx, _port, _mode, _mask, _mux) { \ + .idx = _idx, \ + .port = _port, \ + .mode = _mode, \ + .mask = _mask, \ + .mux = _mux, \ +} + +#define SERDES_MUX_SGMII(i, p, m, c) SERDES_MUX(i, p, PHY_MODE_SGMII, m, c) +#define SERDES_MUX_QSGMII(i, p, m, c) SERDES_MUX(i, p, PHY_MODE_QSGMII, m, c) + +static const struct serdes_mux ocelot_serdes_muxes[] = { + SERDES_MUX_SGMII(SERDES1G(0), 0, 0, 0), + SERDES_MUX_SGMII(SERDES1G(1), 1, HSIO_HW_CFG_DEV1G_5_MODE, 0), + SERDES_MUX_SGMII(SERDES1G(1), 5, HSIO_HW_CFG_QSGMII_ENA | + HSIO_HW_CFG_DEV1G_5_MODE, HSIO_HW_CFG_DEV1G_5_MODE), + SERDES_MUX_SGMII(SERDES1G(2), 2, HSIO_HW_CFG_DEV1G_4_MODE, 0), + SERDES_MUX_SGMII(SERDES1G(2), 4, HSIO_HW_CFG_QSGMII_ENA | + HSIO_HW_CFG_DEV1G_4_MODE, HSIO_HW_CFG_DEV1G_4_MODE), + SERDES_MUX_SGMII(SERDES1G(3), 3, HSIO_HW_CFG_DEV1G_6_MODE, 0), + SERDES_MUX_SGMII(SERDES1G(3), 6, HSIO_HW_CFG_QSGMII_ENA | + HSIO_HW_CFG_DEV1G_6_MODE, HSIO_HW_CFG_DEV1G_6_MODE), + SERDES_MUX_SGMII(SERDES1G(4), 4, HSIO_HW_CFG_QSGMII_ENA | + HSIO_HW_CFG_DEV1G_4_MODE | HSIO_HW_CFG_DEV1G_9_MODE, + 0), + SERDES_MUX_SGMII(SERDES1G(4), 9, HSIO_HW_CFG_DEV1G_4_MODE | + HSIO_HW_CFG_DEV1G_9_MODE, HSIO_HW_CFG_DEV1G_4_MODE | + HSIO_HW_CFG_DEV1G_9_MODE), + SERDES_MUX_SGMII(SERDES1G(5), 5, HSIO_HW_CFG_QSGMII_ENA | + HSIO_HW_CFG_DEV1G_5_MODE | HSIO_HW_CFG_DEV2G5_10_MODE, + 0), + SERDES_MUX_SGMII(SERDES1G(5), 10, HSIO_HW_CFG_PCIE_ENA | + HSIO_HW_CFG_DEV1G_5_MODE | HSIO_HW_CFG_DEV2G5_10_MODE, + HSIO_HW_CFG_DEV1G_5_MODE | HSIO_HW_CFG_DEV2G5_10_MODE), + SERDES_MUX_QSGMII(SERDES6G(0), 4, HSIO_HW_CFG_QSGMII_ENA, + HSIO_HW_CFG_QSGMII_ENA), + SERDES_MUX_QSGMII(SERDES6G(0), 5, HSIO_HW_CFG_QSGMII_ENA, + HSIO_HW_CFG_QSGMII_ENA), + SERDES_MUX_QSGMII(SERDES6G(0), 6, HSIO_HW_CFG_QSGMII_ENA, + HSIO_HW_CFG_QSGMII_ENA), + SERDES_MUX_SGMII(SERDES6G(0), 7, HSIO_HW_CFG_QSGMII_ENA, 0), + SERDES_MUX_QSGMII(SERDES6G(0), 7, HSIO_HW_CFG_QSGMII_ENA, + HSIO_HW_CFG_QSGMII_ENA), + SERDES_MUX_SGMII(SERDES6G(1), 8, 0, 0), + SERDES_MUX_SGMII(SERDES6G(2), 10, HSIO_HW_CFG_PCIE_ENA | + HSIO_HW_CFG_DEV2G5_10_MODE, 0), + SERDES_MUX(SERDES6G(2), 10, PHY_MODE_PCIE, HSIO_HW_CFG_PCIE_ENA, + HSIO_HW_CFG_PCIE_ENA), +}; + +static int serdes_set_mode(struct phy *phy, enum phy_mode mode) +{ + struct serdes_macro *macro = phy_get_drvdata(phy); + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(ocelot_serdes_muxes); i++) { + if (macro->idx != ocelot_serdes_muxes[i].idx || + mode != ocelot_serdes_muxes[i].mode) + continue; + + if (mode != PHY_MODE_QSGMII && + macro->port != ocelot_serdes_muxes[i].port) + continue; + + ret = regmap_update_bits(macro->ctrl->regs, HSIO_HW_CFG, + ocelot_serdes_muxes[i].mask, + ocelot_serdes_muxes[i].mux); + if (ret) + return ret; + + if (macro->idx <= SERDES1G_MAX) + return serdes_init_s1g(macro->ctrl->regs, macro->idx); + + /* SERDES6G and PCIe not supported yet */ + return -EOPNOTSUPP; + } + + return -EINVAL; +} + +static const struct phy_ops serdes_ops = { + .set_mode = serdes_set_mode, + .owner = THIS_MODULE, +}; + +static struct phy *serdes_simple_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct serdes_ctrl *ctrl = dev_get_drvdata(dev); + unsigned int port, idx, i; + + if (args->args_count != 2) + return ERR_PTR(-EINVAL); + + port = args->args[0]; + idx = args->args[1]; + + for (i = 0; i <= SERDES_MAX; i++) { + struct serdes_macro *macro = phy_get_drvdata(ctrl->phys[i]); + + if (idx != macro->idx) + continue; + + /* SERDES6G(0) is the only SerDes capable of QSGMII */ + if (idx != SERDES6G(0) && macro->port >= 0) + return ERR_PTR(-EBUSY); + + macro->port = port; + return ctrl->phys[i]; + } + + return ERR_PTR(-ENODEV); +} + +static int serdes_phy_create(struct serdes_ctrl *ctrl, u8 idx, struct phy **phy) +{ + struct serdes_macro *macro; + + *phy = devm_phy_create(ctrl->dev, NULL, &serdes_ops); + if (IS_ERR(*phy)) + return PTR_ERR(*phy); + + macro = devm_kzalloc(ctrl->dev, sizeof(*macro), GFP_KERNEL); + if (!macro) + return -ENOMEM; + + macro->idx = idx; + macro->ctrl = ctrl; + macro->port = -1; + + phy_set_drvdata(*phy, macro); + + return 0; +} + +static int serdes_probe(struct platform_device *pdev) +{ + struct phy_provider *provider; + struct serdes_ctrl *ctrl; + unsigned int i; + int ret; + + ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + + ctrl->dev = &pdev->dev; + ctrl->regs = syscon_node_to_regmap(pdev->dev.parent->of_node); + if (IS_ERR(ctrl->regs)) + return PTR_ERR(ctrl->regs); + + for (i = 0; i <= SERDES_MAX; i++) { + ret = serdes_phy_create(ctrl, i, &ctrl->phys[i]); + if (ret) + return ret; + } + + dev_set_drvdata(&pdev->dev, ctrl); + + provider = devm_of_phy_provider_register(ctrl->dev, + serdes_simple_xlate); + + return PTR_ERR_OR_ZERO(provider); +} + +static const struct of_device_id serdes_ids[] = { + { .compatible = "mscc,vsc7514-serdes", }, + {}, +}; +MODULE_DEVICE_TABLE(of, serdes_ids); + +static struct platform_driver mscc_ocelot_serdes = { + .probe = serdes_probe, + .driver = { + .name = "mscc,ocelot-serdes", + .of_match_table = of_match_ptr(serdes_ids), + }, +}; + +module_platform_driver(mscc_ocelot_serdes); + +MODULE_AUTHOR("Quentin Schulz <quentin.schulz@bootlin.com>"); +MODULE_DESCRIPTION("SerDes driver for Microsemi Ocelot"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 34e0d476c5c6..6843bc7ee9f2 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -26,6 +26,7 @@ #include <net/ipv6.h> #include <net/if_inet6.h> #include <net/addrconf.h> +#include <net/tcp.h> #include <asm/debug.h> #include <asm/qdio.h> @@ -389,8 +390,9 @@ enum qeth_layer2_frame_flags { enum qeth_header_ids { QETH_HEADER_TYPE_LAYER3 = 0x01, QETH_HEADER_TYPE_LAYER2 = 0x02, - QETH_HEADER_TYPE_TSO = 0x03, + QETH_HEADER_TYPE_L3_TSO = 0x03, QETH_HEADER_TYPE_OSN = 0x04, + QETH_HEADER_TYPE_L2_TSO = 0x06, }; /* flags for qeth_hdr.ext_flags */ #define QETH_HDR_EXT_VLAN_FRAME 0x01 @@ -581,7 +583,8 @@ struct qeth_cmd_buffer { struct qeth_channel *channel; unsigned char *data; int rc; - void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *); + void (*callback)(struct qeth_card *card, struct qeth_channel *channel, + struct qeth_cmd_buffer *iob); }; static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob) @@ -638,7 +641,6 @@ struct qeth_reply { atomic_t received; int rc; void *param; - struct qeth_card *card; refcount_t refcnt; }; @@ -671,6 +673,12 @@ struct qeth_card_info { __u32 hwtrap; }; +enum qeth_discipline_id { + QETH_DISCIPLINE_UNDETERMINED = -1, + QETH_DISCIPLINE_LAYER3 = 0, + QETH_DISCIPLINE_LAYER2 = 1, +}; + struct qeth_card_options { struct qeth_routing_info route4; struct qeth_ipa_info ipa4; @@ -680,7 +688,7 @@ struct qeth_card_options { struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */ struct qeth_vnicc_info vnicc; /* VNICC options */ int fake_broadcast; - int layer2; + enum qeth_discipline_id layer; int performance_stats; int rx_sg_cb; enum qeth_ipa_isolation_modes isolation; @@ -690,6 +698,9 @@ struct qeth_card_options { char hsuid[9]; }; +#define IS_LAYER2(card) ((card)->options.layer == QETH_DISCIPLINE_LAYER2) +#define IS_LAYER3(card) ((card)->options.layer == QETH_DISCIPLINE_LAYER3) + /* * thread bits for qeth_card thread masks */ @@ -702,12 +713,6 @@ struct qeth_osn_info { int (*data_cb)(struct sk_buff *skb); }; -enum qeth_discipline_id { - QETH_DISCIPLINE_UNDETERMINED = -1, - QETH_DISCIPLINE_LAYER3 = 0, - QETH_DISCIPLINE_LAYER2 = 1, -}; - struct qeth_discipline { const struct device_type *devtype; int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done); @@ -759,7 +764,6 @@ struct qeth_switch_info { struct qeth_card { struct list_head list; enum qeth_card_states state; - int lan_online; spinlock_t lock; struct ccwgroup_device *gdev; struct qeth_channel read; @@ -892,11 +896,6 @@ static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv) if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) || (ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) *flags |= QETH_HDR_EXT_UDP; - if (ipv == 4) { - /* some HW requires combined L3+L4 csum offload: */ - *flags |= QETH_HDR_EXT_CSUM_HDR_REQ; - ip_hdr(skb)->check = 0; - } } static inline void qeth_put_buffer_pool_entry(struct qeth_card *card, @@ -1007,9 +1006,7 @@ int qeth_query_switch_attributes(struct qeth_card *card, int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), void *reply_param); -int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, - int extra_elems, int data_offset); -int qeth_get_elements_for_frags(struct sk_buff *); +unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset); int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, unsigned int offset, unsigned int hd_len); @@ -1027,7 +1024,6 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...); int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd); int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback); -int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int); int qeth_configure_cq(struct qeth_card *, enum qeth_cq); int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); void qeth_trace_features(struct qeth_card *); @@ -1052,6 +1048,13 @@ int qeth_vm_request_mac(struct qeth_card *card); int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int hdr_len, unsigned int proto_len, unsigned int *elements); +void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len, + struct sk_buff *skb, unsigned int proto_len); +int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, int cast_type, + void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb, int ipv, int cast_type, + unsigned int data_len)); /* exports for OSN */ int qeth_osn_assist(struct net_device *, void *, int); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index ffce6f39828a..3274f13aad57 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -16,6 +16,7 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/log2.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/mii.h> @@ -61,10 +62,10 @@ static struct kmem_cache *qeth_qdio_outbuf_cache; static struct device *qeth_core_root_dev; static struct lock_class_key qdio_out_skb_queue_key; -static struct mutex qeth_mod_mutex; -static void qeth_send_control_data_cb(struct qeth_channel *, - struct qeth_cmd_buffer *); +static void qeth_send_control_data_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob); static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); @@ -591,7 +592,6 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) if (reply) { refcount_set(&reply->refcnt, 1); atomic_set(&reply->received, 0); - reply->card = card; } return reply; } @@ -626,80 +626,61 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, } static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, - struct qeth_cmd_buffer *iob) + struct qeth_ipa_cmd *cmd) { - struct qeth_ipa_cmd *cmd = NULL; - QETH_CARD_TEXT(card, 5, "chkipad"); - if (IS_IPA(iob->data)) { - cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); - if (IS_IPA_REPLY(cmd)) { - if (cmd->hdr.command != IPA_CMD_SETCCID && - cmd->hdr.command != IPA_CMD_DELCCID && - cmd->hdr.command != IPA_CMD_MODCCID && - cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) - qeth_issue_ipa_msg(cmd, - cmd->hdr.return_code, card); - return cmd; + + if (IS_IPA_REPLY(cmd)) { + if (cmd->hdr.command != IPA_CMD_SETCCID && + cmd->hdr.command != IPA_CMD_DELCCID && + cmd->hdr.command != IPA_CMD_MODCCID && + cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) + qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); + return cmd; + } + + /* handle unsolicited event: */ + switch (cmd->hdr.command) { + case IPA_CMD_STOPLAN: + if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { + dev_err(&card->gdev->dev, + "Interface %s is down because the adjacent port is no longer in reflective relay mode\n", + QETH_CARD_IFNAME(card)); + qeth_close_dev(card); } else { - switch (cmd->hdr.command) { - case IPA_CMD_STOPLAN: - if (cmd->hdr.return_code == - IPA_RC_VEPA_TO_VEB_TRANSITION) { - dev_err(&card->gdev->dev, - "Interface %s is down because the " - "adjacent port is no longer in " - "reflective relay mode\n", - QETH_CARD_IFNAME(card)); - qeth_close_dev(card); - } else { - dev_warn(&card->gdev->dev, - "The link for interface %s on CHPID" - " 0x%X failed\n", - QETH_CARD_IFNAME(card), - card->info.chpid); - qeth_issue_ipa_msg(cmd, - cmd->hdr.return_code, card); - } - card->lan_online = 0; - netif_carrier_off(card->dev); - return NULL; - case IPA_CMD_STARTLAN: - dev_info(&card->gdev->dev, - "The link for %s on CHPID 0x%X has" - " been restored\n", - QETH_CARD_IFNAME(card), - card->info.chpid); - netif_carrier_on(card->dev); - card->lan_online = 1; - if (card->info.hwtrap) - card->info.hwtrap = 2; - qeth_schedule_recovery(card); - return NULL; - case IPA_CMD_SETBRIDGEPORT_IQD: - case IPA_CMD_SETBRIDGEPORT_OSA: - case IPA_CMD_ADDRESS_CHANGE_NOTIF: - if (card->discipline->control_event_handler - (card, cmd)) - return cmd; - else - return NULL; - case IPA_CMD_MODCCID: - return cmd; - case IPA_CMD_REGISTER_LOCAL_ADDR: - QETH_CARD_TEXT(card, 3, "irla"); - break; - case IPA_CMD_UNREGISTER_LOCAL_ADDR: - QETH_CARD_TEXT(card, 3, "urla"); - break; - default: - QETH_DBF_MESSAGE(2, "Received data is IPA " - "but not a reply!\n"); - break; - } + dev_warn(&card->gdev->dev, + "The link for interface %s on CHPID 0x%X failed\n", + QETH_CARD_IFNAME(card), card->info.chpid); + qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); + netif_carrier_off(card->dev); } + return NULL; + case IPA_CMD_STARTLAN: + dev_info(&card->gdev->dev, + "The link for %s on CHPID 0x%X has been restored\n", + QETH_CARD_IFNAME(card), card->info.chpid); + if (card->info.hwtrap) + card->info.hwtrap = 2; + qeth_schedule_recovery(card); + return NULL; + case IPA_CMD_SETBRIDGEPORT_IQD: + case IPA_CMD_SETBRIDGEPORT_OSA: + case IPA_CMD_ADDRESS_CHANGE_NOTIF: + if (card->discipline->control_event_handler(card, cmd)) + return cmd; + return NULL; + case IPA_CMD_MODCCID: + return cmd; + case IPA_CMD_REGISTER_LOCAL_ADDR: + QETH_CARD_TEXT(card, 3, "irla"); + return NULL; + case IPA_CMD_UNREGISTER_LOCAL_ADDR: + QETH_CARD_TEXT(card, 3, "urla"); + return NULL; + default: + QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n"); + return cmd; } - return cmd; } void qeth_clear_ipacmd_list(struct qeth_card *card) @@ -746,18 +727,10 @@ static int qeth_check_idx_response(struct qeth_card *card, return 0; } -static struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev) -{ - struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *) - dev_get_drvdata(&cdev->dev))->dev); - return card; -} - static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) { __u8 index; - QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff"); index = channel->io_buf_no; do { if (channel->iob[index].state == BUF_STATE_FREE) { @@ -778,9 +751,7 @@ void qeth_release_buffer(struct qeth_channel *channel, { unsigned long flags; - QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff"); spin_lock_irqsave(&channel->iob_lock, flags); - memset(iob->data, 0, QETH_BUFSIZE); iob->state = BUF_STATE_FREE; iob->callback = qeth_send_control_data_cb; iob->rc = 0; @@ -789,6 +760,13 @@ void qeth_release_buffer(struct qeth_channel *channel, } EXPORT_SYMBOL_GPL(qeth_release_buffer); +static void qeth_release_buffer_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + qeth_release_buffer(channel, iob); +} + static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel) { struct qeth_cmd_buffer *buffer = NULL; @@ -819,17 +797,16 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel) } EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); -static void qeth_send_control_data_cb(struct qeth_channel *channel, - struct qeth_cmd_buffer *iob) +static void qeth_send_control_data_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) { - struct qeth_card *card; + struct qeth_ipa_cmd *cmd = NULL; struct qeth_reply *reply, *r; - struct qeth_ipa_cmd *cmd; unsigned long flags; int keep_reply; int rc = 0; - card = CARD_FROM_CDEV(channel->ccwdev); QETH_CARD_TEXT(card, 4, "sndctlcb"); rc = qeth_check_idx_response(card, iob->data); switch (rc) { @@ -843,16 +820,20 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel, goto out; } - cmd = qeth_check_ipa_data(card, iob); - if ((cmd == NULL) && (card->state != CARD_STATE_DOWN)) - goto out; - /*in case of OSN : check if cmd is set */ - if (card->info.type == QETH_CARD_TYPE_OSN && - cmd && - cmd->hdr.command != IPA_CMD_STARTLAN && - card->osn_info.assist_cb != NULL) { - card->osn_info.assist_cb(card->dev, cmd); - goto out; + if (IS_IPA(iob->data)) { + cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); + cmd = qeth_check_ipa_data(card, cmd); + if (!cmd) + goto out; + if (IS_OSN(card) && card->osn_info.assist_cb && + cmd->hdr.command != IPA_CMD_STARTLAN) { + card->osn_info.assist_cb(card->dev, cmd); + goto out; + } + } else { + /* non-IPA commands should only flow during initialization */ + if (card->state != CARD_STATE_DOWN) + goto out; } spin_lock_irqsave(&card->lock, flags); @@ -900,44 +881,6 @@ out: qeth_release_buffer(channel, iob); } -static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) -{ - int cnt; - - QETH_DBF_TEXT(SETUP, 2, "setupch"); - - channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); - if (!channel->ccw) - return -ENOMEM; - channel->state = CH_STATE_DOWN; - atomic_set(&channel->irq_pending, 0); - init_waitqueue_head(&channel->wait_q); - - if (!alloc_buffers) - return 0; - - for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { - channel->iob[cnt].data = - kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); - if (channel->iob[cnt].data == NULL) - break; - channel->iob[cnt].state = BUF_STATE_FREE; - channel->iob[cnt].channel = channel; - channel->iob[cnt].callback = qeth_send_control_data_cb; - channel->iob[cnt].rc = 0; - } - if (cnt < QETH_CMD_BUFFER_NO) { - kfree(channel->ccw); - while (cnt-- > 0) - kfree(channel->iob[cnt].data); - return -ENOMEM; - } - channel->io_buf_no = 0; - spin_lock_init(&channel->iob_lock); - - return 0; -} - static int qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) { @@ -1013,16 +956,15 @@ void qeth_schedule_recovery(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_schedule_recovery); -static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) +static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, + struct irb *irb) { int dstat, cstat; char *sense; - struct qeth_card *card; sense = (char *) irb->ecw; cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; - card = CARD_FROM_CDEV(cdev); if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | @@ -1062,14 +1004,11 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) return 0; } -static long __qeth_check_irb_error(struct ccw_device *cdev, - unsigned long intparm, struct irb *irb) +static long qeth_check_irb_error(struct qeth_card *card, + struct ccw_device *cdev, unsigned long intparm, + struct irb *irb) { - struct qeth_card *card; - - card = CARD_FROM_CDEV(cdev); - - if (!card || !IS_ERR(irb)) + if (!IS_ERR(irb)) return 0; switch (PTR_ERR(irb)) { @@ -1106,10 +1045,13 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, int rc; int cstat, dstat; struct qeth_cmd_buffer *iob = NULL; + struct ccwgroup_device *gdev; struct qeth_channel *channel; struct qeth_card *card; - card = CARD_FROM_CDEV(cdev); + /* while we hold the ccwdev lock, this stays valid: */ + gdev = dev_get_drvdata(&cdev->dev); + card = dev_get_drvdata(&gdev->dev); if (!card) return; @@ -1129,7 +1071,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, if (qeth_intparm_is_iob(intparm)) iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); - if (__qeth_check_irb_error(cdev, intparm, irb)) { + if (qeth_check_irb_error(card, cdev, intparm, irb)) { /* IO was terminated, free its resources. */ if (iob) qeth_release_buffer(iob->channel, iob); @@ -1184,7 +1126,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, channel->state = CH_STATE_DOWN; goto out; } - rc = qeth_get_problem(cdev, irb); + rc = qeth_get_problem(card, cdev, irb); if (rc) { card->read_or_write_problem = 1; qeth_clear_ipacmd_list(card); @@ -1204,7 +1146,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, __qeth_issue_next_read(card); if (iob && iob->callback) - iob->callback(iob->channel, iob); + iob->callback(card, iob->channel, iob); out: wake_up(&card->wait_q); @@ -1217,54 +1159,23 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q, { struct sk_buff *skb; - if (skb_queue_empty(&buf->skb_list)) - goto out; - skb = skb_peek(&buf->skb_list); - while (skb) { + skb_queue_walk(&buf->skb_list, skb) { QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); - if (be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) { - if (skb->sk) { - struct iucv_sock *iucv = iucv_sk(skb->sk); - iucv->sk_txnotify(skb, notification); - } - } - if (skb_queue_is_last(&buf->skb_list, skb)) - skb = NULL; - else - skb = skb_queue_next(&buf->skb_list, skb); + if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk) + iucv_sk(skb->sk)->sk_txnotify(skb, notification); } -out: - return; } static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) { - struct sk_buff *skb; - struct iucv_sock *iucv; - int notify_general_error = 0; - - if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) - notify_general_error = 1; - /* release may never happen from within CQ tasklet scope */ WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); - skb = skb_dequeue(&buf->skb_list); - while (skb) { - QETH_CARD_TEXT(buf->q->card, 5, "skbr"); - QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb); - if (notify_general_error && - be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) { - if (skb->sk) { - iucv = iucv_sk(skb->sk); - iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR); - } - } - refcount_dec(&skb->users); - dev_kfree_skb_any(skb); - skb = skb_dequeue(&buf->skb_list); - } + if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) + qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR); + + __skb_queue_purge(&buf->skb_list); } static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, @@ -1336,14 +1247,61 @@ static void qeth_free_buffer_pool(struct qeth_card *card) static void qeth_clean_channel(struct qeth_channel *channel) { + struct ccw_device *cdev = channel->ccwdev; int cnt; QETH_DBF_TEXT(SETUP, 2, "freech"); + + spin_lock_irq(get_ccwdev_lock(cdev)); + cdev->handler = NULL; + spin_unlock_irq(get_ccwdev_lock(cdev)); + for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) kfree(channel->iob[cnt].data); kfree(channel->ccw); } +static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) +{ + struct ccw_device *cdev = channel->ccwdev; + int cnt; + + QETH_DBF_TEXT(SETUP, 2, "setupch"); + + channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); + if (!channel->ccw) + return -ENOMEM; + channel->state = CH_STATE_DOWN; + atomic_set(&channel->irq_pending, 0); + init_waitqueue_head(&channel->wait_q); + + spin_lock_irq(get_ccwdev_lock(cdev)); + cdev->handler = qeth_irq; + spin_unlock_irq(get_ccwdev_lock(cdev)); + + if (!alloc_buffers) + return 0; + + for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { + channel->iob[cnt].data = kmalloc(QETH_BUFSIZE, + GFP_KERNEL | GFP_DMA); + if (channel->iob[cnt].data == NULL) + break; + channel->iob[cnt].state = BUF_STATE_FREE; + channel->iob[cnt].channel = channel; + channel->iob[cnt].callback = qeth_send_control_data_cb; + channel->iob[cnt].rc = 0; + } + if (cnt < QETH_CMD_BUFFER_NO) { + qeth_clean_channel(channel); + return -ENOMEM; + } + channel->io_buf_no = 0; + spin_lock_init(&channel->iob_lock); + + return 0; +} + static void qeth_set_single_write_queues(struct qeth_card *card) { if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && @@ -1421,6 +1379,7 @@ static void qeth_set_initial_options(struct qeth_card *card) card->options.rx_sg_cb = QETH_RX_SG_CB; card->options.isolation = ISOLATION_MODE_NONE; card->options.cq = QETH_CQ_DISABLED; + card->options.layer = QETH_DISCIPLINE_UNDETERMINED; } static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) @@ -1494,7 +1453,7 @@ static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) CARD_BUS_ID(card), card->info.mcl_level); } -static struct qeth_card *qeth_alloc_card(void) +static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) { struct qeth_card *card; @@ -1503,13 +1462,18 @@ static struct qeth_card *qeth_alloc_card(void) if (!card) goto out; QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); + + card->gdev = gdev; + dev_set_drvdata(&gdev->dev, card); + CARD_RDEV(card) = gdev->cdev[0]; + CARD_WDEV(card) = gdev->cdev[1]; + CARD_DDEV(card) = gdev->cdev[2]; if (qeth_setup_channel(&card->read, true)) goto out_ip; if (qeth_setup_channel(&card->write, true)) goto out_channel; if (qeth_setup_channel(&card->data, false)) goto out_data; - card->options.layer2 = -1; card->qeth_service_level.seq_print = qeth_core_sl_print; register_service_level(&card->qeth_service_level); return card; @@ -1519,22 +1483,21 @@ out_data: out_channel: qeth_clean_channel(&card->read); out_ip: + dev_set_drvdata(&gdev->dev, NULL); kfree(card); out: return NULL; } -static int qeth_clear_channel(struct qeth_channel *channel) +static int qeth_clear_channel(struct qeth_card *card, + struct qeth_channel *channel) { - unsigned long flags; - struct qeth_card *card; int rc; - card = CARD_FROM_CDEV(channel->ccwdev); QETH_CARD_TEXT(card, 3, "clearch"); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) return rc; @@ -1548,17 +1511,15 @@ static int qeth_clear_channel(struct qeth_channel *channel) return 0; } -static int qeth_halt_channel(struct qeth_channel *channel) +static int qeth_halt_channel(struct qeth_card *card, + struct qeth_channel *channel) { - unsigned long flags; - struct qeth_card *card; int rc; - card = CARD_FROM_CDEV(channel->ccwdev); QETH_CARD_TEXT(card, 3, "haltch"); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) return rc; @@ -1576,9 +1537,9 @@ static int qeth_halt_channels(struct qeth_card *card) int rc1 = 0, rc2 = 0, rc3 = 0; QETH_CARD_TEXT(card, 3, "haltchs"); - rc1 = qeth_halt_channel(&card->read); - rc2 = qeth_halt_channel(&card->write); - rc3 = qeth_halt_channel(&card->data); + rc1 = qeth_halt_channel(card, &card->read); + rc2 = qeth_halt_channel(card, &card->write); + rc3 = qeth_halt_channel(card, &card->data); if (rc1) return rc1; if (rc2) @@ -1591,9 +1552,9 @@ static int qeth_clear_channels(struct qeth_card *card) int rc1 = 0, rc2 = 0, rc3 = 0; QETH_CARD_TEXT(card, 3, "clearchs"); - rc1 = qeth_clear_channel(&card->read); - rc2 = qeth_clear_channel(&card->write); - rc3 = qeth_clear_channel(&card->data); + rc1 = qeth_clear_channel(card, &card->read); + rc2 = qeth_clear_channel(card, &card->write); + rc3 = qeth_clear_channel(card, &card->data); if (rc1) return rc1; if (rc2) @@ -1652,7 +1613,6 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer, char *rcd_buf; int ret; struct qeth_channel *channel = &card->data; - unsigned long flags; /* * scan for RCD command in extended SenseID data @@ -1666,11 +1626,11 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer, qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf); channel->state = CH_STATE_RCD; - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw, QETH_RCD_PARM, LPM_ANYPATH, 0, QETH_RCD_TIMEOUT); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (!ret) wait_event(card->wait_q, (channel->state == CH_STATE_RCD_DONE || @@ -1823,30 +1783,29 @@ static void qeth_init_func_level(struct qeth_card *card) } } -static int qeth_idx_activate_get_answer(struct qeth_channel *channel, - void (*idx_reply_cb)(struct qeth_channel *, - struct qeth_cmd_buffer *)) +static int qeth_idx_activate_get_answer(struct qeth_card *card, + struct qeth_channel *channel, + void (*reply_cb)(struct qeth_card *, + struct qeth_channel *, + struct qeth_cmd_buffer *)) { struct qeth_cmd_buffer *iob; - unsigned long flags; int rc; - struct qeth_card *card; QETH_DBF_TEXT(SETUP, 2, "idxanswr"); - card = CARD_FROM_CDEV(channel->ccwdev); iob = qeth_get_buffer(channel); if (!iob) return -ENOMEM; - iob->callback = idx_reply_cb; + iob->callback = reply_cb; qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); wait_event(card->wait_q, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0, QETH_TIMEOUT); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); @@ -1867,26 +1826,24 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, return rc; } -static int qeth_idx_activate_channel(struct qeth_channel *channel, - void (*idx_reply_cb)(struct qeth_channel *, - struct qeth_cmd_buffer *)) +static int qeth_idx_activate_channel(struct qeth_card *card, + struct qeth_channel *channel, + void (*reply_cb)(struct qeth_card *, + struct qeth_channel *, + struct qeth_cmd_buffer *)) { - struct qeth_card *card; struct qeth_cmd_buffer *iob; - unsigned long flags; __u16 temp; __u8 tmp; int rc; struct ccw_dev_id temp_devid; - card = CARD_FROM_CDEV(channel->ccwdev); - QETH_DBF_TEXT(SETUP, 2, "idxactch"); iob = qeth_get_buffer(channel); if (!iob) return -ENOMEM; - iob->callback = idx_reply_cb; + iob->callback = reply_cb; qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE, iob->data); if (channel == &card->write) { @@ -1913,10 +1870,10 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, wait_event(card->wait_q, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0, QETH_TIMEOUT); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n", @@ -1938,7 +1895,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); return -ETIME; } - return qeth_idx_activate_get_answer(channel, idx_reply_cb); + return qeth_idx_activate_get_answer(card, channel, reply_cb); } static int qeth_peer_func_level(int level) @@ -1950,10 +1907,10 @@ static int qeth_peer_func_level(int level) return level; } -static void qeth_idx_write_cb(struct qeth_channel *channel, - struct qeth_cmd_buffer *iob) +static void qeth_idx_write_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) { - struct qeth_card *card; __u16 temp; QETH_DBF_TEXT(SETUP , 2, "idxwrcb"); @@ -1962,7 +1919,6 @@ static void qeth_idx_write_cb(struct qeth_channel *channel, channel->state = CH_STATE_ACTIVATING; goto out; } - card = CARD_FROM_CDEV(channel->ccwdev); if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL) @@ -1988,10 +1944,10 @@ out: qeth_release_buffer(channel, iob); } -static void qeth_idx_read_cb(struct qeth_channel *channel, - struct qeth_cmd_buffer *iob) +static void qeth_idx_read_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) { - struct qeth_card *card; __u16 temp; QETH_DBF_TEXT(SETUP , 2, "idxrdcb"); @@ -2000,7 +1956,6 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, goto out; } - card = CARD_FROM_CDEV(channel->ccwdev); if (qeth_check_idx_response(card, iob->data)) goto out; @@ -2049,7 +2004,7 @@ void qeth_prepare_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob) { qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data); - iob->callback = qeth_release_buffer; + iob->callback = qeth_release_buffer_cb; memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); @@ -2097,7 +2052,6 @@ int qeth_send_control_data(struct qeth_card *card, int len, { struct qeth_channel *channel = iob->channel; int rc; - unsigned long flags; struct qeth_reply *reply = NULL; unsigned long timeout, event_timeout; struct qeth_ipa_cmd *cmd = NULL; @@ -2130,26 +2084,26 @@ int qeth_send_control_data(struct qeth_card *card, int len, } qeth_prepare_control_data(card, len, iob); - spin_lock_irqsave(&card->lock, flags); + spin_lock_irq(&card->lock); list_add_tail(&reply->list, &card->cmd_waiter_list); - spin_unlock_irqrestore(&card->lock, flags); + spin_unlock_irq(&card->lock); timeout = jiffies + event_timeout; QETH_CARD_TEXT(card, 6, "noirqpnd"); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0, event_timeout); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " "ccw_device_start rc = %i\n", dev_name(&channel->ccwdev->dev), rc); QETH_CARD_TEXT_(card, 2, " err%d", rc); - spin_lock_irqsave(&card->lock, flags); + spin_lock_irq(&card->lock); list_del_init(&reply->list); qeth_put_reply(reply); - spin_unlock_irqrestore(&card->lock, flags); + spin_unlock_irq(&card->lock); qeth_release_buffer(channel, iob); atomic_set(&channel->irq_pending, 0); wake_up(&card->wait_q); @@ -2177,9 +2131,9 @@ int qeth_send_control_data(struct qeth_card *card, int len, time_err: reply->rc = -ETIME; - spin_lock_irqsave(&reply->card->lock, flags); + spin_lock_irq(&card->lock); list_del_init(&reply->list); - spin_unlock_irqrestore(&reply->card->lock, flags); + spin_unlock_irq(&card->lock); atomic_inc(&reply->received); rc = reply->rc; qeth_put_reply(reply); @@ -2198,7 +2152,6 @@ static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, memcpy(&card->token.cm_filter_r, QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); - QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } @@ -2224,7 +2177,6 @@ static int qeth_cm_enable(struct qeth_card *card) static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "cmsetpcb"); @@ -2233,7 +2185,6 @@ static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, memcpy(&card->token.cm_connection_r, QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), QETH_MPC_TOKEN_LENGTH); - QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } @@ -2255,7 +2206,6 @@ static int qeth_cm_setup(struct qeth_card *card) rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob, qeth_cm_setup_cb, NULL); return rc; - } static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) @@ -2284,7 +2234,7 @@ static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) if (dev->mtu) new_mtu = dev->mtu; /* default MTUs for first setup: */ - else if (card->options.layer2) + else if (IS_LAYER2(card)) new_mtu = ETH_DATA_LEN; else new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ @@ -2315,7 +2265,6 @@ static int qeth_get_mtu_outof_framesize(int framesize) static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - __u16 mtu, framesize; __u16 len; __u8 link_type; @@ -2343,7 +2292,6 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, } else card->info.link_type = 0; QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type); - QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } @@ -2351,7 +2299,7 @@ static u8 qeth_mpc_select_prot_type(struct qeth_card *card) { if (IS_OSN(card)) return QETH_PROT_OSN2; - return (card->options.layer2 == 1) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP; + return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP; } static int qeth_ulp_enable(struct qeth_card *card) @@ -2880,23 +2828,18 @@ static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type) } static void qeth_fill_ipacmd_header(struct qeth_card *card, - struct qeth_ipa_cmd *cmd, __u8 command, - enum qeth_prot_versions prot) + struct qeth_ipa_cmd *cmd, + enum qeth_ipa_cmds command, + enum qeth_prot_versions prot) { - memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); cmd->hdr.command = command; cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; /* cmd->hdr.seqno is set by qeth_send_control_data() */ cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port; - if (card->options.layer2) - cmd->hdr.prim_version_no = 2; - else - cmd->hdr.prim_version_no = 1; + cmd->hdr.prim_version_no = IS_LAYER2(card) ? 2 : 1; cmd->hdr.param_count = 1; cmd->hdr.prot_version = prot; - cmd->hdr.ipa_supported = 0; - cmd->hdr.ipa_enabled = 0; } struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, @@ -3043,7 +2986,7 @@ static int qeth_query_ipassists_cb(struct qeth_card *card, QETH_DBF_TEXT(SETUP, 2, "ipaunsup"); card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS; card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS; - return -0; + return 0; default: if (cmd->hdr.return_code) { QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled " @@ -3787,7 +3730,7 @@ EXPORT_SYMBOL_GPL(qeth_get_priority_queue); * Returns the number of pages, and thus QDIO buffer elements, needed to cover * fragmented part of the SKB. Returns zero for linear SKB. */ -int qeth_get_elements_for_frags(struct sk_buff *skb) +static int qeth_get_elements_for_frags(struct sk_buff *skb) { int cnt, elements = 0; @@ -3800,9 +3743,17 @@ int qeth_get_elements_for_frags(struct sk_buff *skb) } return elements; } -EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); -static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset) +/** + * qeth_count_elements() - Counts the number of QDIO buffer elements needed + * to transmit an skb. + * @skb: the skb to operate on. + * @data_offset: skip this part of the skb's linear data + * + * Returns the number of pages, and thus QDIO buffer elements, needed to map the + * skb's data (both its linear part and paged fragments). + */ +unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset) { unsigned int elements = qeth_get_elements_for_frags(skb); addr_t end = (addr_t)skb->data + skb_headlen(skb); @@ -3812,54 +3763,10 @@ static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset) elements += qeth_get_elements_for_range(start, end); return elements; } +EXPORT_SYMBOL_GPL(qeth_count_elements); -/** - * qeth_get_elements_no() - find number of SBALEs for skb data, inc. frags. - * @card: qeth card structure, to check max. elems. - * @skb: SKB address - * @extra_elems: extra elems needed, to check against max. - * @data_offset: range starts at skb->data + data_offset - * - * Returns the number of pages, and thus QDIO buffer elements, needed to cover - * skb data, including linear part and fragments. Checks if the result plus - * extra_elems fits under the limit for the card. Returns 0 if it does not. - * Note: extra_elems is not included in the returned result. - */ -int qeth_get_elements_no(struct qeth_card *card, - struct sk_buff *skb, int extra_elems, int data_offset) -{ - int elements = qeth_count_elements(skb, data_offset); - - if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { - QETH_DBF_MESSAGE(2, "Invalid size of IP packet " - "(Number=%d / Length=%d). Discarded.\n", - elements + extra_elems, skb->len); - return 0; - } - return elements; -} -EXPORT_SYMBOL_GPL(qeth_get_elements_no); - -int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len) -{ - int hroom, inpage, rest; - - if (((unsigned long)skb->data & PAGE_MASK) != - (((unsigned long)skb->data + len - 1) & PAGE_MASK)) { - hroom = skb_headroom(skb); - inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE); - rest = len - inpage; - if (rest > hroom) - return 1; - memmove(skb->data - rest, skb->data, skb_headlen(skb)); - skb->data -= rest; - skb->tail -= rest; - *hdr = (struct qeth_hdr *)skb->data; - QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest); - } - return 0; -} -EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); +#define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \ + MAX_TCP_HEADER) /** * qeth_add_hw_header() - add a HW header to an skb. @@ -3894,7 +3801,11 @@ check_layout: if (qeth_get_elements_for_range(start, end + contiguous) == 1) { /* Push HW header into same page as first protocol header. */ push_ok = true; - __elements = qeth_count_elements(skb, 0); + /* ... but TSO always needs a separate element for headers: */ + if (skb_is_gso(skb)) + __elements = 1 + qeth_count_elements(skb, proto_len); + else + __elements = qeth_count_elements(skb, 0); } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) { /* Push HW header into a new page. */ push_ok = true; @@ -3935,6 +3846,8 @@ check_layout: return hdr_len; } /* fall back */ + if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE) + return -E2BIG; *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); if (!*hdr) return -ENOMEM; @@ -4026,8 +3939,7 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, bool is_first_elem = true; int flush_cnt = 0; - refcount_inc(&skb->users); - skb_queue_tail(&buf->skb_list, skb); + __skb_queue_tail(&buf->skb_list, skb); /* build dedicated header element */ if (hd_len) { @@ -4176,6 +4088,97 @@ out: } EXPORT_SYMBOL_GPL(qeth_do_send_packet); +void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len, + struct sk_buff *skb, unsigned int proto_len) +{ + struct qeth_hdr_ext_tso *ext = &hdr->ext; + + ext->hdr_tot_len = sizeof(*ext); + ext->imb_hdr_no = 1; + ext->hdr_type = 1; + ext->hdr_version = 1; + ext->hdr_len = 28; + ext->payload_len = payload_len; + ext->mss = skb_shinfo(skb)->gso_size; + ext->dg_hdr_len = proto_len; +} +EXPORT_SYMBOL_GPL(qeth_fill_tso_ext); + +int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, int cast_type, + void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb, int ipv, int cast_type, + unsigned int data_len)) +{ + unsigned int proto_len, hw_hdr_len; + unsigned int frame_len = skb->len; + bool is_tso = skb_is_gso(skb); + unsigned int data_offset = 0; + struct qeth_hdr *hdr = NULL; + unsigned int hd_len = 0; + unsigned int elements; + int push_len, rc; + bool is_sg; + + if (is_tso) { + hw_hdr_len = sizeof(struct qeth_hdr_tso); + proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + } else { + hw_hdr_len = sizeof(struct qeth_hdr); + proto_len = IS_IQD(card) ? ETH_HLEN : 0; + } + + rc = skb_cow_head(skb, hw_hdr_len); + if (rc) + return rc; + + push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len, + &elements); + if (push_len < 0) + return push_len; + if (is_tso || !push_len) { + /* HW header needs its own buffer element. */ + hd_len = hw_hdr_len + proto_len; + data_offset = push_len + proto_len; + } + memset(hdr, 0, hw_hdr_len); + fill_header(card, hdr, skb, ipv, cast_type, frame_len); + if (is_tso) + qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, + frame_len - proto_len, skb, proto_len); + + is_sg = skb_is_nonlinear(skb); + if (IS_IQD(card)) { + rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset, + hd_len); + } else { + /* TODO: drop skb_orphan() once TX completion is fast enough */ + skb_orphan(skb); + rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, + hd_len, elements); + } + + if (!rc) { + if (card->options.performance_stats) { + card->perf_stats.buf_elements_sent += elements; + if (is_sg) + card->perf_stats.sg_skbs_sent++; + if (is_tso) { + card->perf_stats.large_send_bytes += frame_len; + card->perf_stats.large_send_cnt++; + } + } + } else { + if (!push_len) + kmem_cache_free(qeth_core_header_cache, hdr); + if (rc == -EBUSY) + /* roll back to ETH header */ + skb_pull(skb, push_len); + } + return rc; +} +EXPORT_SYMBOL_GPL(qeth_xmit); + static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { @@ -4243,8 +4246,7 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, if (qeth_setadpparms_inspect_rc(cmd)) return 0; - if (!card->options.layer2 || - !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { + if (IS_LAYER3(card) || !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { ether_addr_copy(card->dev->dev_addr, cmd->data.setadapterparms.data.change_addr.addr); card->info.mac_bits |= QETH_LAYER2_MAC_READ; @@ -4598,9 +4600,9 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata) return -EOPNOTSUPP; if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && - (!card->options.layer2)) { + IS_LAYER3(card)) return -EOPNOTSUPP; - } + /* skip 4 bytes (data_len struct member) to get req_len */ if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) return -EFAULT; @@ -5044,6 +5046,7 @@ static void qeth_core_free_card(struct qeth_card *card) qeth_clean_channel(&card->data); qeth_free_qdio_buffers(card); unregister_service_level(&card->qeth_service_level); + dev_set_drvdata(&card->gdev->dev, NULL); kfree(card); } @@ -5123,7 +5126,7 @@ retriable: qeth_determine_capabilities(card); qeth_init_tokens(card); qeth_init_func_level(card); - rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); + rc = qeth_idx_activate_channel(card, &card->read, qeth_idx_read_cb); if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break2"); return rc; @@ -5134,7 +5137,7 @@ retriable: else goto retry; } - rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb); + rc = qeth_idx_activate_channel(card, &card->write, qeth_idx_write_cb); if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break3"); return rc; @@ -5158,13 +5161,14 @@ retriable: if (rc == IPA_RC_LAN_OFFLINE) { dev_warn(&card->gdev->dev, "The LAN is offline\n"); - card->lan_online = 0; + netif_carrier_off(card->dev); } else { rc = -ENODEV; goto out; } - } else - card->lan_online = 1; + } else { + netif_carrier_on(card->dev); + } card->options.ipa4.supported_funcs = 0; card->options.ipa6.supported_funcs = 0; @@ -5421,6 +5425,21 @@ static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) return cmd->hdr.return_code; } +static int qeth_setassparms_get_caps_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_ipa_caps *caps = reply->param; + + if (qeth_setassparms_inspect_rc(cmd)) + return 0; + + caps->supported = cmd->data.setassparms.data.caps.supported; + caps->enabled = cmd->data.setassparms.data.caps.enabled; + return 0; +} + int qeth_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { @@ -5456,8 +5475,6 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, cmd->data.setassparms.hdr.assist_no = ipa_func; cmd->data.setassparms.hdr.length = 8 + len; cmd->data.setassparms.hdr.command_code = cmd_code; - cmd->data.setassparms.hdr.return_code = 0; - cmd->data.setassparms.hdr.seq_no = 0; } return iob; @@ -5560,11 +5577,11 @@ static int qeth_register_dbf_views(void) return 0; } +static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */ + int qeth_core_load_discipline(struct qeth_card *card, enum qeth_discipline_id discipline) { - int rc = 0; - mutex_lock(&qeth_mod_mutex); switch (discipline) { case QETH_DISCIPLINE_LAYER3: @@ -5578,22 +5595,25 @@ int qeth_core_load_discipline(struct qeth_card *card, default: break; } + mutex_unlock(&qeth_mod_mutex); if (!card->discipline) { dev_err(&card->gdev->dev, "There is no kernel module to " "support discipline %d\n", discipline); - rc = -EINVAL; + return -EINVAL; } - mutex_unlock(&qeth_mod_mutex); - return rc; + + card->options.layer = discipline; + return 0; } void qeth_core_free_discipline(struct qeth_card *card) { - if (card->options.layer2) + if (IS_LAYER2(card)) symbol_put(qeth_l2_discipline); else symbol_put(qeth_l3_discipline); + card->options.layer = QETH_DISCIPLINE_UNDETERMINED; card->discipline = NULL; } @@ -5731,7 +5751,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) struct device *dev; int rc; enum qeth_discipline_id enforced_disc; - unsigned long flags; char dbf_name[DBF_NAME_LEN]; QETH_DBF_TEXT(SETUP, 2, "probedev"); @@ -5742,7 +5761,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); - card = qeth_alloc_card(); + card = qeth_alloc_card(gdev); if (!card) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); rc = -ENOMEM; @@ -5758,15 +5777,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) goto err_card; } - card->read.ccwdev = gdev->cdev[0]; - card->write.ccwdev = gdev->cdev[1]; - card->data.ccwdev = gdev->cdev[2]; - dev_set_drvdata(&gdev->dev, card); - card->gdev = gdev; - gdev->cdev[0]->handler = qeth_irq; - gdev->cdev[1]->handler = qeth_irq; - gdev->cdev[2]->handler = qeth_irq; - qeth_setup_card(card); qeth_update_from_chp_desc(card); @@ -5797,9 +5807,9 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) break; } - write_lock_irqsave(&qeth_core_card_list.rwlock, flags); + write_lock_irq(&qeth_core_card_list.rwlock); list_add_tail(&card->list, &qeth_core_card_list.list); - write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); + write_unlock_irq(&qeth_core_card_list.rwlock); return 0; err_disc: @@ -5815,7 +5825,6 @@ err_dev: static void qeth_core_remove_device(struct ccwgroup_device *gdev) { - unsigned long flags; struct qeth_card *card = dev_get_drvdata(&gdev->dev); QETH_DBF_TEXT(SETUP, 2, "removedv"); @@ -5825,12 +5834,11 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev) qeth_core_free_discipline(card); } - write_lock_irqsave(&qeth_core_card_list.rwlock, flags); + write_lock_irq(&qeth_core_card_list.rwlock); list_del(&card->list); - write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); + write_unlock_irq(&qeth_core_card_list.rwlock); free_netdev(card->dev); qeth_core_free_card(card); - dev_set_drvdata(&gdev->dev, NULL); put_device(&gdev->dev); } @@ -6123,7 +6131,7 @@ void qeth_core_get_drvinfo(struct net_device *dev, { struct qeth_card *card = dev->ml_priv; - strlcpy(info->driver, card->options.layer2 ? "qeth_l2" : "qeth_l3", + strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3", sizeof(info->driver)); strlcpy(info->version, "1.0", sizeof(info->version)); strlcpy(info->fw_version, card->info.mcl_level, @@ -6434,27 +6442,85 @@ static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, return rc ? -EIO : 0; } -static int qeth_set_ipa_tso(struct qeth_card *card, int on) +static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_tso_start_data *tso_data = reply->param; + + if (qeth_setassparms_inspect_rc(cmd)) + return 0; + + tso_data->mss = cmd->data.setassparms.data.tso.mss; + tso_data->supported = cmd->data.setassparms.data.tso.supported; + return 0; +} + +static int qeth_set_tso_off(struct qeth_card *card, + enum qeth_prot_versions prot) { + return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO, + IPA_CMD_ASS_STOP, 0, prot); +} + +static int qeth_set_tso_on(struct qeth_card *card, + enum qeth_prot_versions prot) +{ + struct qeth_tso_start_data tso_data; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_caps caps; int rc; - QETH_CARD_TEXT(card, 3, "sttso"); + iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, + IPA_CMD_ASS_START, 0, prot); + if (!iob) + return -ENOMEM; - if (on) { - rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO, - IPA_CMD_ASS_START, 0); - if (rc) { - dev_warn(&card->gdev->dev, - "Starting outbound TCP segmentation offload for %s failed\n", - QETH_CARD_IFNAME(card)); - return -EIO; - } - dev_info(&card->gdev->dev, "Outbound TSO enabled\n"); - } else { - rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO, - IPA_CMD_ASS_STOP, 0); + rc = qeth_send_setassparms(card, iob, 0, 0 /* unused */, + qeth_start_tso_cb, &tso_data); + if (rc) + return rc; + + if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) { + qeth_set_tso_off(card, prot); + return -EOPNOTSUPP; } - return rc; + + iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, + IPA_CMD_ASS_ENABLE, sizeof(caps), prot); + if (!iob) { + qeth_set_tso_off(card, prot); + return -ENOMEM; + } + + /* enable TSO capability */ + caps.supported = 0; + caps.enabled = QETH_IPA_LARGE_SEND_TCP; + rc = qeth_send_setassparms(card, iob, sizeof(caps), (long) &caps, + qeth_setassparms_get_caps_cb, &caps); + if (rc) { + qeth_set_tso_off(card, prot); + return rc; + } + + if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) || + !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) { + qeth_set_tso_off(card, prot); + return -EOPNOTSUPP; + } + + dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot, + tso_data.mss); + return 0; +} + +static int qeth_set_ipa_tso(struct qeth_card *card, bool on, + enum qeth_prot_versions prot) +{ + int rc = on ? qeth_set_tso_on(card, prot) : + qeth_set_tso_off(card, prot); + + return rc ? -EIO : 0; } static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) @@ -6481,7 +6547,7 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) } #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \ - NETIF_F_IPV6_CSUM) + NETIF_F_IPV6_CSUM | NETIF_F_TSO6) /** * qeth_enable_hw_features() - (Re-)Enable HW functions for device features * @dev: a net_device @@ -6531,11 +6597,18 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features) if (rc) changed ^= NETIF_F_RXCSUM; } - if ((changed & NETIF_F_TSO)) { - rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO ? 1 : 0); + if (changed & NETIF_F_TSO) { + rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO, + QETH_PROT_IPV4); if (rc) changed ^= NETIF_F_TSO; } + if (changed & NETIF_F_TSO6) { + rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6, + QETH_PROT_IPV6); + if (rc) + changed ^= NETIF_F_TSO6; + } /* everything changed successfully? */ if ((dev->features ^ features) == changed) @@ -6561,6 +6634,8 @@ netdev_features_t qeth_fix_features(struct net_device *dev, features &= ~NETIF_F_RXCSUM; if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) features &= ~NETIF_F_TSO; + if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO)) + features &= ~NETIF_F_TSO6; /* if the card isn't up, remove features that require hw changes */ if (card->state == CARD_STATE_DOWN || card->state == CARD_STATE_RECOVER) @@ -6602,9 +6677,7 @@ static int __init qeth_core_init(void) pr_info("loading core functions\n"); INIT_LIST_HEAD(&qeth_core_card_list.list); - INIT_LIST_HEAD(&qeth_dbf_list); rwlock_init(&qeth_core_card_list.rwlock); - mutex_init(&qeth_mod_mutex); qeth_wq = create_singlethread_workqueue("qeth_wq"); if (!qeth_wq) { @@ -6619,8 +6692,10 @@ static int __init qeth_core_init(void) rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); if (rc) goto register_err; - qeth_core_header_cache = kmem_cache_create("qeth_hdr", - sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL); + qeth_core_header_cache = + kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE, + roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE), + 0, NULL); if (!qeth_core_header_cache) { rc = -ENOMEM; goto slab_err; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index aa5de1fe01e1..e85090467afe 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -56,6 +56,21 @@ static inline bool qeth_intparm_is_iob(unsigned long intparm) #define IPA_CMD_INITIATOR_OSA_REPLY 0x81 #define IPA_CMD_PRIM_VERSION_NO 0x01 +struct qeth_ipa_caps { + u32 supported; + u32 enabled; +}; + +static inline bool qeth_ipa_caps_supported(struct qeth_ipa_caps *caps, u32 mask) +{ + return (caps->supported & mask) == mask; +} + +static inline bool qeth_ipa_caps_enabled(struct qeth_ipa_caps *caps, u32 mask) +{ + return (caps->enabled & mask) == mask; +} + enum qeth_card_types { QETH_CARD_TYPE_OSD = 1, QETH_CARD_TYPE_IQD = 5, @@ -405,14 +420,25 @@ struct qeth_checksum_cmd { __u32 enabled; } __packed; +enum qeth_ipa_large_send_caps { + QETH_IPA_LARGE_SEND_TCP = 0x00000001, +}; + +struct qeth_tso_start_data { + u32 mss; + u32 supported; +}; + /* SETASSPARMS IPA Command: */ struct qeth_ipacmd_setassparms { struct qeth_ipacmd_setassparms_hdr hdr; union { __u32 flags_32bit; + struct qeth_ipa_caps caps; struct qeth_checksum_cmd chksum; struct qeth_arp_cache_entry add_arp_entry; struct qeth_arp_query_data query_arp; + struct qeth_tso_start_data tso; __u8 ip[16]; } data; } __attribute__ ((packed)); diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 25d0be25bcb3..30f61608fa22 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -31,10 +31,9 @@ static ssize_t qeth_dev_state_show(struct device *dev, case CARD_STATE_SOFTSETUP: return sprintf(buf, "SOFTSETUP\n"); case CARD_STATE_UP: - if (card->lan_online) - return sprintf(buf, "UP (LAN ONLINE)\n"); - else - return sprintf(buf, "UP (LAN OFFLINE)\n"); + return sprintf(buf, "UP (LAN %s)\n", + netif_carrier_ok(card->dev) ? "ONLINE" : + "OFFLINE"); case CARD_STATE_RECOVER: return sprintf(buf, "RECOVER\n"); default: @@ -228,7 +227,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS; card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; } else if (sysfs_streq(buf, "prio_queueing_vlan")) { - if (!card->options.layer2) { + if (IS_LAYER3(card)) { rc = -ENOTSUPP; goto out; } @@ -379,7 +378,7 @@ static ssize_t qeth_dev_layer2_show(struct device *dev, if (!card) return -EINVAL; - return sprintf(buf, "%i\n", card->options.layer2); + return sprintf(buf, "%i\n", card->options.layer); } static ssize_t qeth_dev_layer2_store(struct device *dev, @@ -413,7 +412,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, goto out; } - if (card->options.layer2 == newdis) + if (card->options.layer == newdis) goto out; if (card->info.layer_enforced) { /* fixed layer, can't switch */ @@ -432,8 +431,6 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, card->discipline->remove(card->gdev); qeth_core_free_discipline(card); - card->options.layer2 = -1; - free_netdev(card->dev); card->dev = ndev; } diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index b5e38531733f..23aaf373f631 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -193,15 +193,25 @@ static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) return RTN_UNICAST; } -static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb, - int cast_type, unsigned int data_len) +static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb, int ipv, int cast_type, + unsigned int data_len) { - struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb); + struct vlan_ethhdr *veth = vlan_eth_hdr(skb); - memset(hdr, 0, sizeof(struct qeth_hdr)); - hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; hdr->hdr.l2.pkt_length = data_len; + if (skb_is_gso(skb)) { + hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO; + } else { + hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv); + if (card->options.performance_stats) + card->perf_stats.tx_csum++; + } + } + /* set byte byte 3 to casting flags */ if (cast_type == RTN_MULTICAST) hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST; @@ -641,84 +651,43 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) qeth_promisc_to_bridge(card); } -static int qeth_l2_xmit(struct qeth_card *card, struct sk_buff *skb, - struct qeth_qdio_out_q *queue, int cast_type, int ipv) +static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue) { - const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0; - const unsigned int hw_hdr_len = sizeof(struct qeth_hdr); - unsigned int frame_len = skb->len; - unsigned int data_offset = 0; - struct qeth_hdr *hdr = NULL; + struct qeth_hdr *hdr = (struct qeth_hdr *)skb->data; + addr_t end = (addr_t)(skb->data + sizeof(*hdr)); + addr_t start = (addr_t)skb->data; + unsigned int elements = 0; unsigned int hd_len = 0; - unsigned int elements; - int push_len, rc; - bool is_sg; + int rc; - rc = skb_cow_head(skb, hw_hdr_len); - if (rc) - return rc; + if (skb->protocol == htons(ETH_P_IPV6)) + return -EPROTONOSUPPORT; - push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len, - &elements); - if (push_len < 0) - return push_len; - if (!push_len) { - /* HW header needs its own buffer element. */ - hd_len = hw_hdr_len + proto_len; - data_offset = proto_len; - } - qeth_l2_fill_header(hdr, skb, cast_type, frame_len); - if (skb->ip_summed == CHECKSUM_PARTIAL) { - qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv); - if (card->options.performance_stats) - card->perf_stats.tx_csum++; + if (qeth_get_elements_for_range(start, end) > 1) { + /* Misaligned HW header, move it to its own buffer element. */ + hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); + if (!hdr) + return -ENOMEM; + hd_len = sizeof(*hdr); + skb_copy_from_linear_data(skb, (char *)hdr, hd_len); + elements++; } - is_sg = skb_is_nonlinear(skb); - if (IS_IQD(card)) { - rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset, - hd_len); - } else { - /* TODO: drop skb_orphan() once TX completion is fast enough */ - skb_orphan(skb); - rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, - hd_len, elements); + elements += qeth_count_elements(skb, hd_len); + if (elements > QETH_MAX_BUFFER_ELEMENTS(card)) { + rc = -E2BIG; + goto out; } - if (!rc) { - if (card->options.performance_stats) { - card->perf_stats.buf_elements_sent += elements; - if (is_sg) - card->perf_stats.sg_skbs_sent++; - } - } else { - if (!push_len) - kmem_cache_free(qeth_core_header_cache, hdr); - if (rc == -EBUSY) - /* roll back to ETH header */ - skb_pull(skb, push_len); - } + rc = qeth_do_send_packet(card, queue, skb, hdr, hd_len, hd_len, + elements); +out: + if (rc && hd_len) + kmem_cache_free(qeth_core_header_cache, hdr); return rc; } -static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb, - struct qeth_qdio_out_q *queue) -{ - unsigned int elements; - struct qeth_hdr *hdr; - - if (skb->protocol == htons(ETH_P_IPV6)) - return -EPROTONOSUPPORT; - - hdr = (struct qeth_hdr *)skb->data; - elements = qeth_get_elements_no(card, skb, 0, 0); - if (!elements) - return -E2BIG; - if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr))) - return -EINVAL; - return qeth_do_send_packet(card, queue, skb, hdr, 0, 0, elements); -} - static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -729,7 +698,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, int tx_bytes = skb->len; int rc; - if ((card->state != CARD_STATE_UP) || !card->lan_online) { + if (card->state != CARD_STATE_UP) { card->stats.tx_carrier_errors++; goto tx_drop; } @@ -745,7 +714,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, if (IS_OSN(card)) rc = qeth_l2_xmit_osn(card, skb, queue); else - rc = qeth_l2_xmit(card, skb, queue, cast_type, ipv); + rc = qeth_xmit(card, skb, queue, ipv, cast_type, + qeth_l2_fill_header); if (!rc) { card->stats.tx_packets++; @@ -789,7 +759,10 @@ static int __qeth_l2_open(struct net_device *dev) if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { napi_enable(&card->napi); + local_bh_disable(); napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); } else rc = -EIO; return rc; @@ -837,7 +810,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) } INIT_LIST_HEAD(&card->vid_list); hash_init(card->mac_htable); - card->options.layer2 = 1; card->info.hwtrap = 0; qeth_l2_vnicc_set_defaults(card); return 0; @@ -929,6 +901,20 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->dev->hw_features |= NETIF_F_RXCSUM; card->dev->vlan_features |= NETIF_F_RXCSUM; } + if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) { + card->dev->hw_features |= NETIF_F_TSO; + card->dev->vlan_features |= NETIF_F_TSO; + } + if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) { + card->dev->hw_features |= NETIF_F_TSO6; + card->dev->vlan_features |= NETIF_F_TSO6; + } + + if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) { + card->dev->needed_headroom = sizeof(struct qeth_hdr_tso); + netif_set_gso_max_size(card->dev, + PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)); + } qeth_l2_request_initial_mac(card); netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); @@ -1029,10 +1015,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) goto out_remove; } card->state = CARD_STATE_SOFTSETUP; - if (card->lan_online) - netif_carrier_on(card->dev); - else - netif_carrier_off(card->dev); qeth_set_allowed_threads(card, 0xffffffff, 0); @@ -1178,9 +1160,6 @@ static int qeth_l2_pm_resume(struct ccwgroup_device *gdev) struct qeth_card *card = dev_get_drvdata(&gdev->dev); int rc = 0; - if (gdev->state == CCWGROUP_OFFLINE) - goto out; - if (card->state == CARD_STATE_RECOVER) { rc = __qeth_l2_set_online(card->gdev, 1); if (rc) { @@ -1190,7 +1169,7 @@ static int qeth_l2_pm_resume(struct ccwgroup_device *gdev) } } else rc = __qeth_l2_set_online(card->gdev, 0); -out: + qeth_set_allowed_threads(card, 0xffffffff, 0); netif_device_attach(card->dev); if (rc) @@ -1240,7 +1219,6 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob) { struct qeth_channel *channel = iob->channel; - unsigned long flags; int rc = 0; QETH_CARD_TEXT(card, 5, "osndctrd"); @@ -1249,10 +1227,10 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); qeth_prepare_control_data(card, len, iob); QETH_CARD_TEXT(card, 6, "osnoirqp"); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " "ccw_device_start rc = %i\n", rc); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ada258c01a08..0b161cc1fd2e 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -33,7 +33,6 @@ #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/ip6_fib.h> -#include <net/ip6_checksum.h> #include <net/iucv/af_iucv.h> #include <linux/hashtable.h> @@ -1349,6 +1348,7 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, static int qeth_l3_process_inbound_buffer(struct qeth_card *card, int budget, int *done) { + struct net_device *dev = card->dev; int work_done = 0; struct sk_buff *skb; struct qeth_hdr *hdr; @@ -1370,11 +1370,10 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card, magic = *(__u16 *)skb->data; if ((card->info.type == QETH_CARD_TYPE_IQD) && (magic == ETH_P_AF_IUCV)) { - skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); len = skb->len; - card->dev->header_ops->create(skb, card->dev, 0, - card->dev->dev_addr, "FAKELL", len); - skb_reset_mac_header(skb); + dev_hard_header(skb, dev, ETH_P_AF_IUCV, + dev->dev_addr, "FAKELL", len); + skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); } else { qeth_l3_rebuild_skb(card, skb, hdr); @@ -1983,39 +1982,38 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb) rcu_read_unlock(); /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */ - if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) - return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ? - RTN_MULTICAST : RTN_UNICAST; - else if (be16_to_cpu(skb->protocol) == ETH_P_IP) + switch (qeth_get_ip_version(skb)) { + case 4: return ipv4_is_multicast(ip_hdr(skb)->daddr) ? RTN_MULTICAST : RTN_UNICAST; - - /* ... and MAC address */ - if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast)) - return RTN_BROADCAST; - if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) - return RTN_MULTICAST; - - /* default to unicast */ - return RTN_UNICAST; + case 6: + return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ? + RTN_MULTICAST : RTN_UNICAST; + default: + /* ... and MAC address */ + if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, + skb->dev->broadcast)) + return RTN_BROADCAST; + if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) + return RTN_MULTICAST; + /* default to unicast */ + return RTN_UNICAST; + } } static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb, unsigned int data_len) { char daddr[16]; - struct af_iucv_trans_hdr *iucv_hdr; - memset(hdr, 0, sizeof(struct qeth_hdr)); hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; hdr->hdr.l3.length = data_len; hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; - iucv_hdr = (struct af_iucv_trans_hdr *)(skb_mac_header(skb) + ETH_HLEN); memset(daddr, 0, sizeof(daddr)); daddr[0] = 0xfe; daddr[1] = 0x80; - memcpy(&daddr[8], iucv_hdr->destUserID, 8); + memcpy(&daddr[8], iucv_trans_hdr(skb)->destUserID, 8); memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16); } @@ -2034,26 +2032,33 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, struct sk_buff *skb, int ipv, int cast_type, unsigned int data_len) { - memset(hdr, 0, sizeof(struct qeth_hdr)); - hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; + struct vlan_ethhdr *veth = vlan_eth_hdr(skb); + hdr->hdr.l3.length = data_len; - /* - * before we're going to overwrite this location with next hop ip. - * v6 uses passthrough, v4 sets the tag in the QDIO header. - */ - if (skb_vlan_tag_present(skb)) { - if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD)) - hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME; - else - hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG; - hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb); + if (skb_is_gso(skb)) { + hdr->hdr.l3.id = QETH_HEADER_TYPE_L3_TSO; + } else { + hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv); + /* some HW requires combined L3+L4 csum offload: */ + if (ipv == 4) + hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ; + if (card->options.performance_stats) + card->perf_stats.tx_csum++; + } } - if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) { - qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv); - if (card->options.performance_stats) - card->perf_stats.tx_csum++; + if (ipv == 4 || IS_IQD(card)) { + /* NETIF_F_HW_VLAN_CTAG_TX */ + if (skb_vlan_tag_present(skb)) { + hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_VLAN_FRAME; + hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb); + } + } else if (veth->h_vlan_proto == htons(ETH_P_8021Q)) { + hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_INCLUDE_VLAN_TAG; + hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI); } /* OSA only: */ @@ -2094,85 +2099,41 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, rcu_read_unlock(); } -static void qeth_tso_fill_header(struct qeth_card *card, - struct qeth_hdr *qhdr, struct sk_buff *skb) +static void qeth_l3_fixup_headers(struct sk_buff *skb) { - struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr; - struct tcphdr *tcph = tcp_hdr(skb); struct iphdr *iph = ip_hdr(skb); - struct ipv6hdr *ip6h = ipv6_hdr(skb); - - /*fix header to TSO values ...*/ - hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; - /*set values which are fix for the first approach ...*/ - hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso); - hdr->ext.imb_hdr_no = 1; - hdr->ext.hdr_type = 1; - hdr->ext.hdr_version = 1; - hdr->ext.hdr_len = 28; - /*insert non-fix values */ - hdr->ext.mss = skb_shinfo(skb)->gso_size; - hdr->ext.dg_hdr_len = (__u16)(ip_hdrlen(skb) + tcp_hdrlen(skb)); - hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - - sizeof(struct qeth_hdr_tso)); - tcph->check = 0; - if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) { - ip6h->payload_len = 0; - tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, - 0, IPPROTO_TCP, 0); - } else { - /*OSA want us to set these values ...*/ - tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); - iph->tot_len = 0; - iph->check = 0; - } -} -/** - * qeth_l3_get_elements_no_tso() - find number of SBALEs for skb data for tso - * @card: qeth card structure, to check max. elems. - * @skb: SKB address - * @extra_elems: extra elems needed, to check against max. - * - * Returns the number of pages, and thus QDIO buffer elements, needed to cover - * skb data, including linear part and fragments, but excluding TCP header. - * (Exclusion of TCP header distinguishes it from qeth_get_elements_no().) - * Checks if the result plus extra_elems fits under the limit for the card. - * Returns 0 if it does not. - * Note: extra_elems is not included in the returned result. - */ -static int qeth_l3_get_elements_no_tso(struct qeth_card *card, - struct sk_buff *skb, int extra_elems) -{ - addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); - addr_t end = (addr_t)skb->data + skb_headlen(skb); - int elements = qeth_get_elements_for_frags(skb); - - if (start != end) - elements += qeth_get_elements_for_range(start, end); - - if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { - QETH_DBF_MESSAGE(2, - "Invalid size of TSO IP packet (Number=%d / Length=%d). Discarded.\n", - elements + extra_elems, skb->len); - return 0; + /* this is safe, IPv6 traffic takes a different path */ + if (skb->ip_summed == CHECKSUM_PARTIAL) + iph->check = 0; + if (skb_is_gso(skb)) { + iph->tot_len = 0; + tcp_hdr(skb)->check = ~tcp_v4_check(0, iph->saddr, + iph->daddr, 0); } - return elements; } -static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, - struct qeth_qdio_out_q *queue, int ipv, - int cast_type) +static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, int cast_type) { - const unsigned int hw_hdr_len = sizeof(struct qeth_hdr); - unsigned int frame_len, elements; + unsigned int hw_hdr_len, proto_len, frame_len, elements; unsigned char eth_hdr[ETH_HLEN]; + bool is_tso = skb_is_gso(skb); + unsigned int data_offset = 0; struct qeth_hdr *hdr = NULL; unsigned int hd_len = 0; int push_len, rc; bool is_sg; + if (is_tso) { + hw_hdr_len = sizeof(struct qeth_hdr_tso); + proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - + ETH_HLEN; + } else { + hw_hdr_len = sizeof(struct qeth_hdr); + proto_len = 0; + } + /* re-use the L2 header area for the HW header: */ rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN); if (rc) @@ -2181,28 +2142,37 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, skb_pull(skb, ETH_HLEN); frame_len = skb->len; - push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0, + qeth_l3_fixup_headers(skb); + push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len, &elements); if (push_len < 0) return push_len; - if (!push_len) { - /* hdr was added discontiguous from skb->data */ - hd_len = hw_hdr_len; + if (is_tso || !push_len) { + /* HW header needs its own buffer element. */ + hd_len = hw_hdr_len + proto_len; + data_offset = push_len + proto_len; } + memset(hdr, 0, hw_hdr_len); - if (skb->protocol == htons(ETH_P_AF_IUCV)) + if (skb->protocol == htons(ETH_P_AF_IUCV)) { qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len); - else + } else { qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len); + if (is_tso) + qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, + frame_len - proto_len, skb, + proto_len); + } is_sg = skb_is_nonlinear(skb); if (IS_IQD(card)) { - rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len); + rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset, + hd_len); } else { /* TODO: drop skb_orphan() once TX completion is fast enough */ skb_orphan(skb); - rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, - elements); + rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, + hd_len, elements); } if (!rc) { @@ -2210,6 +2180,10 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, card->perf_stats.buf_elements_sent += elements; if (is_sg) card->perf_stats.sg_skbs_sent++; + if (is_tso) { + card->perf_stats.large_send_bytes += frame_len; + card->perf_stats.large_send_cnt++; + } } } else { if (!push_len) @@ -2224,118 +2198,6 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, return rc; } -static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, - struct qeth_qdio_out_q *queue, int ipv, int cast_type) -{ - int elements, len, rc; - __be16 *tag; - struct qeth_hdr *hdr = NULL; - int hdr_elements = 0; - struct sk_buff *new_skb = NULL; - int tx_bytes = skb->len; - unsigned int hd_len; - bool use_tso, is_sg; - - /* Ignore segment size from skb_is_gso(), 1 page is always used. */ - use_tso = skb_is_gso(skb) && - (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4); - - /* create a clone with writeable headroom */ - new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) + - VLAN_HLEN); - if (!new_skb) - return -ENOMEM; - - if (ipv == 4) { - skb_pull(new_skb, ETH_HLEN); - } else if (skb_vlan_tag_present(new_skb)) { - skb_push(new_skb, VLAN_HLEN); - skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); - skb_copy_to_linear_data_offset(new_skb, 4, - new_skb->data + 8, 4); - skb_copy_to_linear_data_offset(new_skb, 8, - new_skb->data + 12, 4); - tag = (__be16 *)(new_skb->data + 12); - *tag = cpu_to_be16(ETH_P_8021Q); - *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb)); - } - - /* fix hardware limitation: as long as we do not have sbal - * chaining we can not send long frag lists - */ - if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || - (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) { - rc = skb_linearize(new_skb); - - if (card->options.performance_stats) { - if (rc) - card->perf_stats.tx_linfail++; - else - card->perf_stats.tx_lin++; - } - if (rc) - goto out; - } - - if (use_tso) { - hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso)); - memset(hdr, 0, sizeof(struct qeth_hdr_tso)); - qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type, - new_skb->len - sizeof(struct qeth_hdr_tso)); - qeth_tso_fill_header(card, hdr, new_skb); - hdr_elements++; - } else { - hdr = skb_push(new_skb, sizeof(struct qeth_hdr)); - qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type, - new_skb->len - sizeof(struct qeth_hdr)); - } - - elements = use_tso ? - qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) : - qeth_get_elements_no(card, new_skb, hdr_elements, 0); - if (!elements) { - rc = -E2BIG; - goto out; - } - elements += hdr_elements; - - if (use_tso) { - hd_len = sizeof(struct qeth_hdr_tso) + - ip_hdrlen(new_skb) + tcp_hdrlen(new_skb); - len = hd_len; - } else { - hd_len = 0; - len = sizeof(struct qeth_hdr_layer3); - } - - if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) { - rc = -EINVAL; - goto out; - } - - is_sg = skb_is_nonlinear(new_skb); - rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len, - elements); -out: - if (!rc) { - if (new_skb != skb) - dev_kfree_skb_any(skb); - if (card->options.performance_stats) { - card->perf_stats.buf_elements_sent += elements; - if (is_sg) - card->perf_stats.sg_skbs_sent++; - if (use_tso) { - card->perf_stats.large_send_bytes += tx_bytes; - card->perf_stats.large_send_cnt++; - } - } - } else { - if (new_skb != skb) - dev_kfree_skb_any(new_skb); - } - return rc; -} - static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -2355,7 +2217,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, goto tx_drop; } - if (card->state != CARD_STATE_UP || !card->lan_online) { + if (card->state != CARD_STATE_UP) { card->stats.tx_carrier_errors++; goto tx_drop; } @@ -2371,10 +2233,11 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, } netif_stop_queue(dev); - if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4)) - rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type); - else + if (ipv == 4 || IS_IQD(card)) rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type); + else + rc = qeth_xmit(card, skb, queue, ipv, cast_type, + qeth_l3_fill_header); if (!rc) { card->stats.tx_packets++; @@ -2412,7 +2275,10 @@ static int __qeth_l3_open(struct net_device *dev) if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { napi_enable(&card->napi); + local_bh_disable(); napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); } else rc = -EIO; return rc; @@ -2476,6 +2342,15 @@ qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np) return 0; } +static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (qeth_get_ip_version(skb) != 4) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + return qeth_features_check(skb, dev, features); +} + static const struct net_device_ops qeth_l3_netdev_ops = { .ndo_open = qeth_l3_open, .ndo_stop = qeth_l3_stop, @@ -2496,7 +2371,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_stop = qeth_l3_stop, .ndo_get_stats = qeth_get_stats, .ndo_start_xmit = qeth_l3_hard_start_xmit, - .ndo_features_check = qeth_features_check, + .ndo_features_check = qeth_l3_osa_features_check, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_rx_mode, .ndo_do_ioctl = qeth_do_ioctl, @@ -2510,6 +2385,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { static int qeth_l3_setup_netdev(struct qeth_card *card) { + unsigned int headroom; int rc; if (card->dev->netdev_ops) @@ -2542,9 +2418,22 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->hw_features |= NETIF_F_IPV6_CSUM; card->dev->vlan_features |= NETIF_F_IPV6_CSUM; } + if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) { + card->dev->hw_features |= NETIF_F_TSO6; + card->dev->vlan_features |= NETIF_F_TSO6; + } + + /* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */ + if (card->dev->hw_features & NETIF_F_TSO6) + headroom = sizeof(struct qeth_hdr_tso) + VLAN_HLEN; + else if (card->dev->hw_features & NETIF_F_TSO) + headroom = sizeof(struct qeth_hdr_tso); + else + headroom = sizeof(struct qeth_hdr) + VLAN_HLEN; } else if (card->info.type == QETH_CARD_TYPE_IQD) { card->dev->flags |= IFF_NOARP; card->dev->netdev_ops = &qeth_l3_netdev_ops; + headroom = sizeof(struct qeth_hdr) - ETH_HLEN; rc = qeth_l3_iqd_read_initial_mac(card); if (rc) @@ -2555,14 +2444,14 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) } else return -ENODEV; + card->dev->needed_headroom = headroom; card->dev->ethtool_ops = &qeth_l3_ethtool_ops; - card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN; card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; netif_keep_dst(card->dev); - if (card->dev->hw_features & NETIF_F_TSO) + if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) netif_set_gso_max_size(card->dev, PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1)); @@ -2591,7 +2480,6 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev) } hash_init(card->ip_htable); hash_init(card->ip_mc_htable); - card->options.layer2 = 0; card->info.hwtrap = 0; return 0; } @@ -2678,10 +2566,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) qeth_set_allowed_threads(card, 0xffffffff, 0); qeth_l3_recover_ip(card); - if (card->lan_online) - netif_carrier_on(card->dev); - else - netif_carrier_off(card->dev); qeth_enable_hw_features(card->dev); if (recover_flag == CARD_STATE_RECOVER) { @@ -2819,9 +2703,6 @@ static int qeth_l3_pm_resume(struct ccwgroup_device *gdev) struct qeth_card *card = dev_get_drvdata(&gdev->dev); int rc = 0; - if (gdev->state == CCWGROUP_OFFLINE) - goto out; - if (card->state == CARD_STATE_RECOVER) { rc = __qeth_l3_set_online(card->gdev, 1); if (rc) { @@ -2831,7 +2712,7 @@ static int qeth_l3_pm_resume(struct ccwgroup_device *gdev) } } else rc = __qeth_l3_set_online(card->gdev, 0); -out: + qeth_set_allowed_threads(card, 0xffffffff, 0); netif_device_attach(card->dev); if (rc) diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index f00045813378..27c8d6ba05bb 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -150,15 +150,11 @@ static void bnx2fc_clean_rx_queue(struct fc_lport *lp) struct fcoe_rcv_info *fr; struct sk_buff_head *list; struct sk_buff *skb, *next; - struct sk_buff *head; bg = &bnx2fc_global; spin_lock_bh(&bg->fcoe_rx_list.lock); list = &bg->fcoe_rx_list; - head = list->next; - for (skb = head; skb != (struct sk_buff *)list; - skb = next) { - next = skb->next; + skb_queue_walk_safe(list, skb, next) { fr = fcoe_dev_from_skb(skb); if (fr->fr_dev == lp) { __skb_unlink(skb, list); diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 3f3af5e74a07..75f876409fb9 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -784,7 +784,8 @@ cxgbi_check_route6(struct sockaddr *dst_addr, int ifindex) csk->mtu = mtu; csk->dst = dst; - if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) { + rt6_get_prefsrc(rt, &pref_saddr); + if (ipv6_addr_any(&pref_saddr)) { struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt); err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL, @@ -794,8 +795,6 @@ cxgbi_check_route6(struct sockaddr *dst_addr, int ifindex) &daddr6->sin6_addr); goto rel_rt; } - } else { - pref_saddr = rt->rt6i_prefsrc.addr; } csk->csk_family = AF_INET6; diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c index 99a4656d113d..3861cb659cb9 100644 --- a/drivers/ssb/driver_chipcommon.c +++ b/drivers/ssb/driver_chipcommon.c @@ -425,7 +425,7 @@ void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc, *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_M2); break; } - /* Fallthough */ + /* Fall through */ default: *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_SB); } diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig index a4c4b83ddc9c..991e154c0eca 100644 --- a/drivers/staging/fsl-dpaa2/Kconfig +++ b/drivers/staging/fsl-dpaa2/Kconfig @@ -9,14 +9,6 @@ config FSL_DPAA2 Build drivers for Freescale DataPath Acceleration Architecture (DPAA2) family of SoCs. -config FSL_DPAA2_ETH - tristate "Freescale DPAA2 Ethernet" - depends on FSL_DPAA2 && FSL_MC_DPIO - depends on NETDEVICES && ETHERNET - ---help--- - Ethernet driver for Freescale DPAA2 SoCs, using the - Freescale MC bus driver - config FSL_DPAA2_ETHSW tristate "Freescale DPAA2 Ethernet Switch" depends on FSL_DPAA2 @@ -24,11 +16,3 @@ config FSL_DPAA2_ETHSW ---help--- Driver for Freescale DPAA2 Ethernet Switch. Select BRIDGE to have support for bridge tools. - -config FSL_DPAA2_PTP_CLOCK - tristate "Freescale DPAA2 PTP Clock" - depends on FSL_DPAA2_ETH && POSIX_TIMERS - select PTP_1588_CLOCK - help - This driver adds support for using the DPAA2 1588 timer module - as a PTP clock. diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile index 9c7062945758..c92ab98c27d9 100644 --- a/drivers/staging/fsl-dpaa2/Makefile +++ b/drivers/staging/fsl-dpaa2/Makefile @@ -2,6 +2,4 @@ # Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers # -obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/ obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/ -obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += rtc/ diff --git a/drivers/staging/fsl-dpaa2/ethernet/Makefile b/drivers/staging/fsl-dpaa2/ethernet/Makefile deleted file mode 100644 index 9315ecdba612..000000000000 --- a/drivers/staging/fsl-dpaa2/ethernet/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Makefile for the Freescale DPAA2 Ethernet controller -# - -obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o - -fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o - -# Needed by the tracing framework -CFLAGS_dpaa2-eth.o := -I$(src) diff --git a/drivers/staging/fsl-dpaa2/ethernet/TODO b/drivers/staging/fsl-dpaa2/ethernet/TODO deleted file mode 100644 index e400a5e427a5..000000000000 --- a/drivers/staging/fsl-dpaa2/ethernet/TODO +++ /dev/null @@ -1,18 +0,0 @@ -* Add a DPAA2 MAC kernel driver in order to allow PHY management; currently - the DPMAC objects and their link to DPNIs are handled by MC internally - and all PHYs are seen as fixed-link -* add more debug support: decide how to expose detailed debug statistics, - add ingress error queue support -* MC firmware uprev; the DPAA2 objects used by the Ethernet driver need to - be kept in sync with binary interface changes in MC -* refine README file -* cleanup - -NOTE: None of the above is must-have before getting the DPAA2 Ethernet driver -out of staging. The main requirement for that is to have the drivers it -depends on, fsl-mc bus and DPIO driver, moved to drivers/bus and drivers/soc -respectively. - - Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>, - ruxandra.radulescu@nxp.com, devel@driverdev.osuosl.org, - linux-kernel@vger.kernel.org diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c deleted file mode 100644 index 8056a95e1265..000000000000 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c +++ /dev/null @@ -1,280 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) -/* Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2016 NXP - */ - -#include <linux/net_tstamp.h> - -#include "dpni.h" /* DPNI_LINK_OPT_* */ -#include "dpaa2-eth.h" - -/* To be kept in sync with DPNI statistics */ -static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { - "[hw] rx frames", - "[hw] rx bytes", - "[hw] rx mcast frames", - "[hw] rx mcast bytes", - "[hw] rx bcast frames", - "[hw] rx bcast bytes", - "[hw] tx frames", - "[hw] tx bytes", - "[hw] tx mcast frames", - "[hw] tx mcast bytes", - "[hw] tx bcast frames", - "[hw] tx bcast bytes", - "[hw] rx filtered frames", - "[hw] rx discarded frames", - "[hw] rx nobuffer discards", - "[hw] tx discarded frames", - "[hw] tx confirmed frames", -}; - -#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) - -static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { - /* per-cpu stats */ - "[drv] tx conf frames", - "[drv] tx conf bytes", - "[drv] tx sg frames", - "[drv] tx sg bytes", - "[drv] tx realloc frames", - "[drv] rx sg frames", - "[drv] rx sg bytes", - "[drv] enqueue portal busy", - /* Channel stats */ - "[drv] dequeue portal busy", - "[drv] channel pull errors", - "[drv] cdan", -}; - -#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras) - -static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, - struct ethtool_drvinfo *drvinfo) -{ - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - - strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); - - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor); - - strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), - sizeof(drvinfo->bus_info)); -} - -static int -dpaa2_eth_get_link_ksettings(struct net_device *net_dev, - struct ethtool_link_ksettings *link_settings) -{ - struct dpni_link_state state = {0}; - int err = 0; - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - - err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); - if (err) { - netdev_err(net_dev, "ERROR %d getting link state\n", err); - goto out; - } - - /* At the moment, we have no way of interrogating the DPMAC - * from the DPNI side - and for that matter there may exist - * no DPMAC at all. So for now we just don't report anything - * beyond the DPNI attributes. - */ - if (state.options & DPNI_LINK_OPT_AUTONEG) - link_settings->base.autoneg = AUTONEG_ENABLE; - if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX)) - link_settings->base.duplex = DUPLEX_FULL; - link_settings->base.speed = state.rate; - -out: - return err; -} - -#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7 -#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1 -static int -dpaa2_eth_set_link_ksettings(struct net_device *net_dev, - const struct ethtool_link_ksettings *link_settings) -{ - struct dpni_link_cfg cfg = {0}; - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - int err = 0; - - /* If using an older MC version, the DPNI must be down - * in order to be able to change link settings. Taking steps to let - * the user know that. - */ - if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR, - DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) { - if (netif_running(net_dev)) { - netdev_info(net_dev, "Interface must be brought down first.\n"); - return -EACCES; - } - } - - cfg.rate = link_settings->base.speed; - if (link_settings->base.autoneg == AUTONEG_ENABLE) - cfg.options |= DPNI_LINK_OPT_AUTONEG; - else - cfg.options &= ~DPNI_LINK_OPT_AUTONEG; - if (link_settings->base.duplex == DUPLEX_HALF) - cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX; - else - cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX; - - err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); - if (err) - /* ethtool will be loud enough if we return an error; no point - * in putting our own error message on the console by default - */ - netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err); - - return err; -} - -static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) -{ - u8 *p = data; - int i; - - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { - strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { - strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - break; - } -} - -static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ - return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; - default: - return -EOPNOTSUPP; - } -} - -/** Fill in hardware counters, as returned by MC. - */ -static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, - struct ethtool_stats *stats, - u64 *data) -{ - int i = 0; - int j, k, err; - int num_cnt; - union dpni_statistics dpni_stats; - u64 cdan = 0; - u64 portal_busy = 0, pull_err = 0; - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - struct dpaa2_eth_drv_stats *extras; - struct dpaa2_eth_ch_stats *ch_stats; - - memset(data, 0, - sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); - - /* Print standard counters, from DPNI statistics */ - for (j = 0; j <= 2; j++) { - err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, - j, &dpni_stats); - if (err != 0) - netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j); - switch (j) { - case 0: - num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64); - break; - case 1: - num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64); - break; - case 2: - num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64); - break; - } - for (k = 0; k < num_cnt; k++) - *(data + i++) = dpni_stats.raw.counter[k]; - } - - /* Print per-cpu extra stats */ - for_each_online_cpu(k) { - extras = per_cpu_ptr(priv->percpu_extras, k); - for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++) - *((__u64 *)data + i + j) += *((__u64 *)extras + j); - } - i += j; - - for (j = 0; j < priv->num_channels; j++) { - ch_stats = &priv->channel[j]->stats; - cdan += ch_stats->cdan; - portal_busy += ch_stats->dequeue_portal_busy; - pull_err += ch_stats->pull_err; - } - - *(data + i++) = portal_busy; - *(data + i++) = pull_err; - *(data + i++) = cdan; -} - -static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, - struct ethtool_rxnfc *rxnfc, u32 *rule_locs) -{ - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - - switch (rxnfc->cmd) { - case ETHTOOL_GRXFH: - /* we purposely ignore cmd->flow_type for now, because the - * classifier only supports a single set of fields for all - * protocols - */ - rxnfc->data = priv->rx_hash_fields; - break; - case ETHTOOL_GRXRINGS: - rxnfc->data = dpaa2_eth_queue_count(priv); - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - -int dpaa2_phc_index = -1; -EXPORT_SYMBOL(dpaa2_phc_index); - -static int dpaa2_eth_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) -{ - info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; - - info->phc_index = dpaa2_phc_index; - - info->tx_types = (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON); - - info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_ALL); - return 0; -} - -const struct ethtool_ops dpaa2_ethtool_ops = { - .get_drvinfo = dpaa2_eth_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_link_ksettings = dpaa2_eth_get_link_ksettings, - .set_link_ksettings = dpaa2_eth_set_link_ksettings, - .get_sset_count = dpaa2_eth_get_sset_count, - .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, - .get_strings = dpaa2_eth_get_strings, - .get_rxnfc = dpaa2_eth_get_rxnfc, - .get_ts_info = dpaa2_eth_get_ts_info, -}; diff --git a/drivers/staging/fsl-dpaa2/rtc/Makefile b/drivers/staging/fsl-dpaa2/rtc/Makefile deleted file mode 100644 index 5468da071163..000000000000 --- a/drivers/staging/fsl-dpaa2/rtc/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Makefile for the Freescale DPAA2 PTP clock -# - -obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += dpaa2-rtc.o - -dpaa2-rtc-objs := rtc.o dprtc.o diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h deleted file mode 100644 index db6a473430cc..000000000000 --- a/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h +++ /dev/null @@ -1,137 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2016-2018 NXP - */ - -#ifndef _FSL_DPRTC_CMD_H -#define _FSL_DPRTC_CMD_H - -/* DPRTC Version */ -#define DPRTC_VER_MAJOR 2 -#define DPRTC_VER_MINOR 0 - -/* Command versioning */ -#define DPRTC_CMD_BASE_VERSION 1 -#define DPRTC_CMD_ID_OFFSET 4 - -#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION) - -/* Command IDs */ -#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800) -#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810) -#define DPRTC_CMDID_CREATE DPRTC_CMD(0x910) -#define DPRTC_CMDID_DESTROY DPRTC_CMD(0x990) -#define DPRTC_CMDID_GET_API_VERSION DPRTC_CMD(0xa10) - -#define DPRTC_CMDID_ENABLE DPRTC_CMD(0x002) -#define DPRTC_CMDID_DISABLE DPRTC_CMD(0x003) -#define DPRTC_CMDID_GET_ATTR DPRTC_CMD(0x004) -#define DPRTC_CMDID_RESET DPRTC_CMD(0x005) -#define DPRTC_CMDID_IS_ENABLED DPRTC_CMD(0x006) - -#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012) -#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013) -#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014) -#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015) -#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016) -#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017) - -#define DPRTC_CMDID_SET_CLOCK_OFFSET DPRTC_CMD(0x1d0) -#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1) -#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2) -#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3) -#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4) -#define DPRTC_CMDID_SET_ALARM DPRTC_CMD(0x1d5) -#define DPRTC_CMDID_SET_PERIODIC_PULSE DPRTC_CMD(0x1d6) -#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE DPRTC_CMD(0x1d7) -#define DPRTC_CMDID_SET_EXT_TRIGGER DPRTC_CMD(0x1d8) -#define DPRTC_CMDID_CLEAR_EXT_TRIGGER DPRTC_CMD(0x1d9) -#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP DPRTC_CMD(0x1dA) - -/* Macros for accessing command fields smaller than 1byte */ -#define DPRTC_MASK(field) \ - GENMASK(DPRTC_##field##_SHIFT + DPRTC_##field##_SIZE - 1, \ - DPRTC_##field##_SHIFT) -#define dprtc_get_field(var, field) \ - (((var) & DPRTC_MASK(field)) >> DPRTC_##field##_SHIFT) - -#pragma pack(push, 1) -struct dprtc_cmd_open { - __le32 dprtc_id; -}; - -struct dprtc_cmd_destroy { - __le32 object_id; -}; - -#define DPRTC_ENABLE_SHIFT 0 -#define DPRTC_ENABLE_SIZE 1 - -struct dprtc_rsp_is_enabled { - u8 en; -}; - -struct dprtc_cmd_get_irq { - __le32 pad; - u8 irq_index; -}; - -struct dprtc_cmd_set_irq_enable { - u8 en; - u8 pad[3]; - u8 irq_index; -}; - -struct dprtc_rsp_get_irq_enable { - u8 en; -}; - -struct dprtc_cmd_set_irq_mask { - __le32 mask; - u8 irq_index; -}; - -struct dprtc_rsp_get_irq_mask { - __le32 mask; -}; - -struct dprtc_cmd_get_irq_status { - __le32 status; - u8 irq_index; -}; - -struct dprtc_rsp_get_irq_status { - __le32 status; -}; - -struct dprtc_cmd_clear_irq_status { - __le32 status; - u8 irq_index; -}; - -struct dprtc_rsp_get_attributes { - __le32 pad; - __le32 id; -}; - -struct dprtc_cmd_set_clock_offset { - __le64 offset; -}; - -struct dprtc_get_freq_compensation { - __le32 freq_compensation; -}; - -struct dprtc_time { - __le64 time; -}; - -struct dprtc_rsp_get_api_version { - __le16 major; - __le16 minor; -}; - -#pragma pack(pop) - -#endif /* _FSL_DPRTC_CMD_H */ diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc.c b/drivers/staging/fsl-dpaa2/rtc/dprtc.c deleted file mode 100644 index 68ae6ffefbf5..000000000000 --- a/drivers/staging/fsl-dpaa2/rtc/dprtc.c +++ /dev/null @@ -1,701 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2016-2018 NXP - */ - -#include <linux/fsl/mc.h> - -#include "dprtc.h" -#include "dprtc-cmd.h" - -/** - * dprtc_open() - Open a control session for the specified object. - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @dprtc_id: DPRTC unique ID - * @token: Returned token; use in subsequent API calls - * - * This function can be used to open a control session for an - * already created object; an object may have been declared in - * the DPL or by calling the dprtc_create function. - * This function returns a unique authentication token, - * associated with the specific object ID and the specific MC - * portal; this token must be used in all subsequent commands for - * this specific object - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_open(struct fsl_mc_io *mc_io, - u32 cmd_flags, - int dprtc_id, - u16 *token) -{ - struct dprtc_cmd_open *cmd_params; - struct fsl_mc_command cmd = { 0 }; - int err; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN, - cmd_flags, - 0); - cmd_params = (struct dprtc_cmd_open *)cmd.params; - cmd_params->dprtc_id = cpu_to_le32(dprtc_id); - - err = mc_send_command(mc_io, &cmd); - if (err) - return err; - - *token = mc_cmd_hdr_read_token(&cmd); - - return 0; -} - -/** - * dprtc_close() - Close the control session of the object - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * - * After this function is called, no further operations are - * allowed on the object without opening a new control session. - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_close(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token) -{ - struct fsl_mc_command cmd = { 0 }; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags, - token); - - return mc_send_command(mc_io, &cmd); -} - -/** - * dprtc_create() - Create the DPRTC object. - * @mc_io: Pointer to MC portal's I/O object - * @dprc_token: Parent container token; '0' for default container - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @cfg: Configuration structure - * @obj_id: Returned object id - * - * Create the DPRTC object, allocate required resources and - * perform required initialization. - * - * The function accepts an authentication token of a parent - * container that this object should be assigned to. The token - * can be '0' so the object will be assigned to the default container. - * The newly created object can be opened with the returned - * object id and using the container's associated tokens and MC portals. - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_create(struct fsl_mc_io *mc_io, - u16 dprc_token, - u32 cmd_flags, - const struct dprtc_cfg *cfg, - u32 *obj_id) -{ - struct fsl_mc_command cmd = { 0 }; - int err; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE, - cmd_flags, - dprc_token); - - err = mc_send_command(mc_io, &cmd); - if (err) - return err; - - *obj_id = mc_cmd_read_object_id(&cmd); - - return 0; -} - -/** - * dprtc_destroy() - Destroy the DPRTC object and release all its resources. - * @mc_io: Pointer to MC portal's I/O object - * @dprc_token: Parent container token; '0' for default container - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @object_id: The object id; it must be a valid id within the container that - * created this object; - * - * The function accepts the authentication token of the parent container that - * created the object (not the one that currently owns the object). The object - * is searched within parent using the provided 'object_id'. - * All tokens to the object must be closed before calling destroy. - * - * Return: '0' on Success; error code otherwise. - */ -int dprtc_destroy(struct fsl_mc_io *mc_io, - u16 dprc_token, - u32 cmd_flags, - u32 object_id) -{ - struct dprtc_cmd_destroy *cmd_params; - struct fsl_mc_command cmd = { 0 }; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY, - cmd_flags, - dprc_token); - cmd_params = (struct dprtc_cmd_destroy *)cmd.params; - cmd_params->object_id = cpu_to_le32(object_id); - - return mc_send_command(mc_io, &cmd); -} - -/** - * dprtc_enable() - Enable the DPRTC. - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token) -{ - struct fsl_mc_command cmd = { 0 }; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags, - token); - - return mc_send_command(mc_io, &cmd); -} - -/** - * dprtc_disable() - Disable the DPRTC. - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_disable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token) -{ - struct fsl_mc_command cmd = { 0 }; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE, - cmd_flags, - token); - - return mc_send_command(mc_io, &cmd); -} - -/** - * dprtc_is_enabled() - Check if the DPRTC is enabled. - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * @en: Returns '1' if object is enabled; '0' otherwise - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_is_enabled(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - int *en) -{ - struct dprtc_rsp_is_enabled *rsp_params; - struct fsl_mc_command cmd = { 0 }; - int err; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags, - token); - - err = mc_send_command(mc_io, &cmd); - if (err) - return err; - - rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params; - *en = dprtc_get_field(rsp_params->en, ENABLE); - - return 0; -} - -/** - * dprtc_reset() - Reset the DPRTC, returns the object to initial state. - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_reset(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token) -{ - struct fsl_mc_command cmd = { 0 }; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET, - cmd_flags, - token); - - return mc_send_command(mc_io, &cmd); -} - -/** - * dprtc_set_irq_enable() - Set overall interrupt state. - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * @irq_index: The interrupt index to configure - * @en: Interrupt state - enable = 1, disable = 0 - * - * Allows GPP software to control when interrupts are generated. - * Each interrupt can have up to 32 causes. The enable/disable control's the - * overall interrupt state. if the interrupt is disabled no causes will cause - * an interrupt. - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_set_irq_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u8 en) -{ - struct dprtc_cmd_set_irq_enable *cmd_params; - struct fsl_mc_command cmd = { 0 }; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE, - cmd_flags, - token); - cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params; - cmd_params->irq_index = irq_index; - cmd_params->en = en; - - return mc_send_command(mc_io, &cmd); -} - -/** - * dprtc_get_irq_enable() - Get overall interrupt state - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * @irq_index: The interrupt index to configure - * @en: Returned interrupt state - enable = 1, disable = 0 - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_get_irq_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u8 *en) -{ - struct dprtc_rsp_get_irq_enable *rsp_params; - struct dprtc_cmd_get_irq *cmd_params; - struct fsl_mc_command cmd = { 0 }; - int err; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE, - cmd_flags, - token); - cmd_params = (struct dprtc_cmd_get_irq *)cmd.params; - cmd_params->irq_index = irq_index; - - err = mc_send_command(mc_io, &cmd); - if (err) - return err; - - rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params; - *en = rsp_params->en; - - return 0; -} - -/** - * dprtc_set_irq_mask() - Set interrupt mask. - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * @irq_index: The interrupt index to configure - * @mask: Event mask to trigger interrupt; - * each bit: - * 0 = ignore event - * 1 = consider event for asserting IRQ - * - * Every interrupt can have up to 32 causes and the interrupt model supports - * masking/unmasking each cause independently - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_set_irq_mask(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 mask) -{ - struct dprtc_cmd_set_irq_mask *cmd_params; - struct fsl_mc_command cmd = { 0 }; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK, - cmd_flags, - token); - cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params; - cmd_params->mask = cpu_to_le32(mask); - cmd_params->irq_index = irq_index; - - return mc_send_command(mc_io, &cmd); -} - -/** - * dprtc_get_irq_mask() - Get interrupt mask. - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * @irq_index: The interrupt index to configure - * @mask: Returned event mask to trigger interrupt - * - * Every interrupt can have up to 32 causes and the interrupt model supports - * masking/unmasking each cause independently - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_get_irq_mask(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 *mask) -{ - struct dprtc_rsp_get_irq_mask *rsp_params; - struct dprtc_cmd_get_irq *cmd_params; - struct fsl_mc_command cmd = { 0 }; - int err; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK, - cmd_flags, - token); - cmd_params = (struct dprtc_cmd_get_irq *)cmd.params; - cmd_params->irq_index = irq_index; - - err = mc_send_command(mc_io, &cmd); - if (err) - return err; - - rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params; - *mask = le32_to_cpu(rsp_params->mask); - - return 0; -} - -/** - * dprtc_get_irq_status() - Get the current status of any pending interrupts. - * - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * @irq_index: The interrupt index to configure - * @status: Returned interrupts status - one bit per cause: - * 0 = no interrupt pending - * 1 = interrupt pending - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_get_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 *status) -{ - struct dprtc_cmd_get_irq_status *cmd_params; - struct dprtc_rsp_get_irq_status *rsp_params; - struct fsl_mc_command cmd = { 0 }; - int err; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS, - cmd_flags, - token); - cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params; - cmd_params->status = cpu_to_le32(*status); - cmd_params->irq_index = irq_index; - - err = mc_send_command(mc_io, &cmd); - if (err) - return err; - - rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params; - *status = le32_to_cpu(rsp_params->status); - - return 0; -} - -/** - * dprtc_clear_irq_status() - Clear a pending interrupt's status - * - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * @irq_index: The interrupt index to configure - * @status: Bits to clear (W1C) - one bit per cause: - * 0 = don't change - * 1 = clear status bit - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_clear_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 status) -{ - struct dprtc_cmd_clear_irq_status *cmd_params; - struct fsl_mc_command cmd = { 0 }; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS, - cmd_flags, - token); - cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params; - cmd_params->irq_index = irq_index; - cmd_params->status = cpu_to_le32(status); - - return mc_send_command(mc_io, &cmd); -} - -/** - * dprtc_get_attributes - Retrieve DPRTC attributes. - * - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * @attr: Returned object's attributes - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_get_attributes(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - struct dprtc_attr *attr) -{ - struct dprtc_rsp_get_attributes *rsp_params; - struct fsl_mc_command cmd = { 0 }; - int err; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR, - cmd_flags, - token); - - err = mc_send_command(mc_io, &cmd); - if (err) - return err; - - rsp_params = (struct dprtc_rsp_get_attributes *)cmd.params; - attr->id = le32_to_cpu(rsp_params->id); - - return 0; -} - -/** - * dprtc_set_clock_offset() - Sets the clock's offset - * (usually relative to another clock). - * - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * @offset: New clock offset (in nanoseconds). - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_set_clock_offset(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - int64_t offset) -{ - struct dprtc_cmd_set_clock_offset *cmd_params; - struct fsl_mc_command cmd = { 0 }; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET, - cmd_flags, - token); - cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params; - cmd_params->offset = cpu_to_le64(offset); - - return mc_send_command(mc_io, &cmd); -} - -/** - * dprtc_set_freq_compensation() - Sets a new frequency compensation value. - * - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * @freq_compensation: The new frequency compensation value to set. - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u32 freq_compensation) -{ - struct dprtc_get_freq_compensation *cmd_params; - struct fsl_mc_command cmd = { 0 }; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION, - cmd_flags, - token); - cmd_params = (struct dprtc_get_freq_compensation *)cmd.params; - cmd_params->freq_compensation = cpu_to_le32(freq_compensation); - - return mc_send_command(mc_io, &cmd); -} - -/** - * dprtc_get_freq_compensation() - Retrieves the frequency compensation value - * - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * @freq_compensation: Frequency compensation value - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u32 *freq_compensation) -{ - struct dprtc_get_freq_compensation *rsp_params; - struct fsl_mc_command cmd = { 0 }; - int err; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION, - cmd_flags, - token); - - err = mc_send_command(mc_io, &cmd); - if (err) - return err; - - rsp_params = (struct dprtc_get_freq_compensation *)cmd.params; - *freq_compensation = le32_to_cpu(rsp_params->freq_compensation); - - return 0; -} - -/** - * dprtc_get_time() - Returns the current RTC time. - * - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * @time: Current RTC time. - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_get_time(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - uint64_t *time) -{ - struct dprtc_time *rsp_params; - struct fsl_mc_command cmd = { 0 }; - int err; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME, - cmd_flags, - token); - - err = mc_send_command(mc_io, &cmd); - if (err) - return err; - - rsp_params = (struct dprtc_time *)cmd.params; - *time = le64_to_cpu(rsp_params->time); - - return 0; -} - -/** - * dprtc_set_time() - Updates current RTC time. - * - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * @time: New RTC time. - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_set_time(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - uint64_t time) -{ - struct dprtc_time *cmd_params; - struct fsl_mc_command cmd = { 0 }; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME, - cmd_flags, - token); - cmd_params = (struct dprtc_time *)cmd.params; - cmd_params->time = cpu_to_le64(time); - - return mc_send_command(mc_io, &cmd); -} - -/** - * dprtc_set_alarm() - Defines and sets alarm. - * - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPRTC object - * @time: In nanoseconds, the time when the alarm - * should go off - must be a multiple of - * 1 microsecond - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_set_alarm(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, uint64_t time) -{ - struct dprtc_time *cmd_params; - struct fsl_mc_command cmd = { 0 }; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM, - cmd_flags, - token); - cmd_params = (struct dprtc_time *)cmd.params; - cmd_params->time = cpu_to_le64(time); - - return mc_send_command(mc_io, &cmd); -} - -/** - * dprtc_get_api_version() - Get Data Path Real Time Counter API version - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @major_ver: Major version of data path real time counter API - * @minor_ver: Minor version of data path real time counter API - * - * Return: '0' on Success; Error code otherwise. - */ -int dprtc_get_api_version(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 *major_ver, - u16 *minor_ver) -{ - struct dprtc_rsp_get_api_version *rsp_params; - struct fsl_mc_command cmd = { 0 }; - int err; - - cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION, - cmd_flags, - 0); - - err = mc_send_command(mc_io, &cmd); - if (err) - return err; - - rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params; - *major_ver = le16_to_cpu(rsp_params->major); - *minor_ver = le16_to_cpu(rsp_params->minor); - - return 0; -} diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc.h b/drivers/staging/fsl-dpaa2/rtc/dprtc.h deleted file mode 100644 index 08f7c7bebbca..000000000000 --- a/drivers/staging/fsl-dpaa2/rtc/dprtc.h +++ /dev/null @@ -1,164 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2016-2018 NXP - */ - -#ifndef __FSL_DPRTC_H -#define __FSL_DPRTC_H - -/* Data Path Real Time Counter API - * Contains initialization APIs and runtime control APIs for RTC - */ - -struct fsl_mc_io; - -/** - * Number of irq's - */ -#define DPRTC_MAX_IRQ_NUM 1 -#define DPRTC_IRQ_INDEX 0 - -/** - * Interrupt event masks: - */ - -/** - * Interrupt event mask indicating alarm event had occurred - */ -#define DPRTC_EVENT_ALARM 0x40000000 -/** - * Interrupt event mask indicating periodic pulse event had occurred - */ -#define DPRTC_EVENT_PPS 0x08000000 - -int dprtc_open(struct fsl_mc_io *mc_io, - u32 cmd_flags, - int dprtc_id, - u16 *token); - -int dprtc_close(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token); - -/** - * struct dprtc_cfg - Structure representing DPRTC configuration - * @options: place holder - */ -struct dprtc_cfg { - u32 options; -}; - -int dprtc_create(struct fsl_mc_io *mc_io, - u16 dprc_token, - u32 cmd_flags, - const struct dprtc_cfg *cfg, - u32 *obj_id); - -int dprtc_destroy(struct fsl_mc_io *mc_io, - u16 dprc_token, - u32 cmd_flags, - u32 object_id); - -int dprtc_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token); - -int dprtc_disable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token); - -int dprtc_is_enabled(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - int *en); - -int dprtc_reset(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token); - -int dprtc_set_clock_offset(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - int64_t offset); - -int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u32 freq_compensation); - -int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u32 *freq_compensation); - -int dprtc_get_time(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - uint64_t *time); - -int dprtc_set_time(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - uint64_t time); - -int dprtc_set_alarm(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - uint64_t time); - -int dprtc_set_irq_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u8 en); - -int dprtc_get_irq_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u8 *en); - -int dprtc_set_irq_mask(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 mask); - -int dprtc_get_irq_mask(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 *mask); - -int dprtc_get_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 *status); - -int dprtc_clear_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 status); - -/** - * struct dprtc_attr - Structure representing DPRTC attributes - * @id: DPRTC object ID - */ -struct dprtc_attr { - int id; -}; - -int dprtc_get_attributes(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - struct dprtc_attr *attr); - -int dprtc_get_api_version(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 *major_ver, - u16 *minor_ver); - -#endif /* __FSL_DPRTC_H */ diff --git a/drivers/staging/mt7621-eth/mdio.c b/drivers/staging/mt7621-eth/mdio.c index 7ad0c4141205..2c6e1800a3fd 100644 --- a/drivers/staging/mt7621-eth/mdio.c +++ b/drivers/staging/mt7621-eth/mdio.c @@ -112,7 +112,7 @@ static void phy_init(struct mtk_eth *eth, struct mtk_mac *mac, phy->autoneg = AUTONEG_ENABLE; phy->speed = 0; phy->duplex = 0; - phy->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phy, SPEED_100); phy->advertising = phy->supported | ADVERTISED_Autoneg; phy_start_aneg(phy); diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index d2605158546b..96f265eee007 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -1149,7 +1149,7 @@ static enum reset_type _rtl92e_tx_check_stuck(struct net_device *dev) if (skb_queue_len(&ring->queue) == 0) { continue; } else { - skb = (&ring->queue)->next; + skb = __skb_peek(&ring->queue); tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->nStuckCount++; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 4e656f89cb22..ab11b2bee273 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -116,6 +116,8 @@ struct vhost_net_virtqueue { * For RX, number of batched heads */ int done_idx; + /* Number of XDP frames batched */ + int batched_xdp; /* an array of userspace buffers info */ struct ubuf_info *ubuf_info; /* Reference counting for outstanding ubufs. @@ -123,6 +125,8 @@ struct vhost_net_virtqueue { struct vhost_net_ubuf_ref *ubufs; struct ptr_ring *rx_ring; struct vhost_net_buf rxq; + /* Batched XDP buffs */ + struct xdp_buff *xdp; }; struct vhost_net { @@ -338,6 +342,11 @@ static bool vhost_sock_zcopy(struct socket *sock) sock_flag(sock->sk, SOCK_ZEROCOPY); } +static bool vhost_sock_xdp(struct socket *sock) +{ + return sock_flag(sock->sk, SOCK_XDP); +} + /* In case of DMA done not in order in lower device driver for some reason. * upend_idx is used to track end of used idx, done_idx is used to track head * of used idx. Once lower device DMA done contiguously, we will signal KVM @@ -444,32 +453,120 @@ static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq) nvq->done_idx = 0; } +static void vhost_tx_batch(struct vhost_net *net, + struct vhost_net_virtqueue *nvq, + struct socket *sock, + struct msghdr *msghdr) +{ + struct tun_msg_ctl ctl = { + .type = TUN_MSG_PTR, + .num = nvq->batched_xdp, + .ptr = nvq->xdp, + }; + int err; + + if (nvq->batched_xdp == 0) + goto signal_used; + + msghdr->msg_control = &ctl; + err = sock->ops->sendmsg(sock, msghdr, 0); + if (unlikely(err < 0)) { + vq_err(&nvq->vq, "Fail to batch sending packets\n"); + return; + } + +signal_used: + vhost_net_signal_used(nvq); + nvq->batched_xdp = 0; +} + +static int sock_has_rx_data(struct socket *sock) +{ + if (unlikely(!sock)) + return 0; + + if (sock->ops->peek_len) + return sock->ops->peek_len(sock); + + return skb_queue_empty(&sock->sk->sk_receive_queue); +} + +static void vhost_net_busy_poll_try_queue(struct vhost_net *net, + struct vhost_virtqueue *vq) +{ + if (!vhost_vq_avail_empty(&net->dev, vq)) { + vhost_poll_queue(&vq->poll); + } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { + vhost_disable_notify(&net->dev, vq); + vhost_poll_queue(&vq->poll); + } +} + +static void vhost_net_busy_poll(struct vhost_net *net, + struct vhost_virtqueue *rvq, + struct vhost_virtqueue *tvq, + bool *busyloop_intr, + bool poll_rx) +{ + unsigned long busyloop_timeout; + unsigned long endtime; + struct socket *sock; + struct vhost_virtqueue *vq = poll_rx ? tvq : rvq; + + mutex_lock_nested(&vq->mutex, poll_rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX); + vhost_disable_notify(&net->dev, vq); + sock = rvq->private_data; + + busyloop_timeout = poll_rx ? rvq->busyloop_timeout: + tvq->busyloop_timeout; + + preempt_disable(); + endtime = busy_clock() + busyloop_timeout; + + while (vhost_can_busy_poll(endtime)) { + if (vhost_has_work(&net->dev)) { + *busyloop_intr = true; + break; + } + + if ((sock_has_rx_data(sock) && + !vhost_vq_avail_empty(&net->dev, rvq)) || + !vhost_vq_avail_empty(&net->dev, tvq)) + break; + + cpu_relax(); + } + + preempt_enable(); + + if (poll_rx || sock_has_rx_data(sock)) + vhost_net_busy_poll_try_queue(net, vq); + else if (!poll_rx) /* On tx here, sock has no rx data. */ + vhost_enable_notify(&net->dev, rvq); + + mutex_unlock(&vq->mutex); +} + static int vhost_net_tx_get_vq_desc(struct vhost_net *net, - struct vhost_net_virtqueue *nvq, + struct vhost_net_virtqueue *tnvq, unsigned int *out_num, unsigned int *in_num, - bool *busyloop_intr) + struct msghdr *msghdr, bool *busyloop_intr) { - struct vhost_virtqueue *vq = &nvq->vq; - unsigned long uninitialized_var(endtime); - int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), + struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; + struct vhost_virtqueue *rvq = &rnvq->vq; + struct vhost_virtqueue *tvq = &tnvq->vq; + + int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov), out_num, in_num, NULL, NULL); - if (r == vq->num && vq->busyloop_timeout) { - if (!vhost_sock_zcopy(vq->private_data)) - vhost_net_signal_used(nvq); - preempt_disable(); - endtime = busy_clock() + vq->busyloop_timeout; - while (vhost_can_busy_poll(endtime)) { - if (vhost_has_work(vq->dev)) { - *busyloop_intr = true; - break; - } - if (!vhost_vq_avail_empty(vq->dev, vq)) - break; - cpu_relax(); - } - preempt_enable(); - r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), + if (r == tvq->num && tvq->busyloop_timeout) { + /* Flush batched packets first */ + if (!vhost_sock_zcopy(tvq->private_data)) + vhost_tx_batch(net, tnvq, tvq->private_data, msghdr); + + vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false); + + r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov), out_num, in_num, NULL, NULL); } @@ -512,7 +609,7 @@ static int get_tx_bufs(struct vhost_net *net, struct vhost_virtqueue *vq = &nvq->vq; int ret; - ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr); + ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr); if (ret < 0 || ret == vq->num) return ret; @@ -540,6 +637,80 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len) !vhost_vq_avail_empty(vq->dev, vq); } +#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) + +static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, + struct iov_iter *from) +{ + struct vhost_virtqueue *vq = &nvq->vq; + struct socket *sock = vq->private_data; + struct page_frag *alloc_frag = ¤t->task_frag; + struct virtio_net_hdr *gso; + struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp]; + struct tun_xdp_hdr *hdr; + size_t len = iov_iter_count(from); + int headroom = vhost_sock_xdp(sock) ? XDP_PACKET_HEADROOM : 0; + int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen); + int sock_hlen = nvq->sock_hlen; + void *buf; + int copied; + + if (unlikely(len < nvq->sock_hlen)) + return -EFAULT; + + if (SKB_DATA_ALIGN(len + pad) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) + return -ENOSPC; + + buflen += SKB_DATA_ALIGN(len + pad); + alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); + if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) + return -ENOMEM; + + buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; + copied = copy_page_from_iter(alloc_frag->page, + alloc_frag->offset + + offsetof(struct tun_xdp_hdr, gso), + sock_hlen, from); + if (copied != sock_hlen) + return -EFAULT; + + hdr = buf; + gso = &hdr->gso; + + if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && + vhost16_to_cpu(vq, gso->csum_start) + + vhost16_to_cpu(vq, gso->csum_offset) + 2 > + vhost16_to_cpu(vq, gso->hdr_len)) { + gso->hdr_len = cpu_to_vhost16(vq, + vhost16_to_cpu(vq, gso->csum_start) + + vhost16_to_cpu(vq, gso->csum_offset) + 2); + + if (vhost16_to_cpu(vq, gso->hdr_len) > len) + return -EINVAL; + } + + len -= sock_hlen; + copied = copy_page_from_iter(alloc_frag->page, + alloc_frag->offset + pad, + len, from); + if (copied != len) + return -EFAULT; + + xdp->data_hard_start = buf; + xdp->data = buf + pad; + xdp->data_end = xdp->data + len; + hdr->buflen = buflen; + + get_page(alloc_frag->page); + alloc_frag->offset += buflen; + + ++nvq->batched_xdp; + + return 0; +} + static void handle_tx_copy(struct vhost_net *net, struct socket *sock) { struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; @@ -556,10 +727,14 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) size_t len, total_len = 0; int err; int sent_pkts = 0; + bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX); for (;;) { bool busyloop_intr = false; + if (nvq->done_idx == VHOST_NET_BATCH) + vhost_tx_batch(net, nvq, sock, &msg); + head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, &busyloop_intr); /* On error, stop handling until the next kick. */ @@ -577,14 +752,34 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) break; } - vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); - vq->heads[nvq->done_idx].len = 0; - total_len += len; - if (tx_can_batch(vq, total_len)) - msg.msg_flags |= MSG_MORE; - else - msg.msg_flags &= ~MSG_MORE; + + /* For simplicity, TX batching is only enabled if + * sndbuf is unlimited. + */ + if (sock_can_batch) { + err = vhost_net_build_xdp(nvq, &msg.msg_iter); + if (!err) { + goto done; + } else if (unlikely(err != -ENOSPC)) { + vhost_tx_batch(net, nvq, sock, &msg); + vhost_discard_vq_desc(vq, 1); + vhost_net_enable_vq(net, vq); + break; + } + + /* We can't build XDP buff, go for single + * packet path but let's flush batched + * packets. + */ + vhost_tx_batch(net, nvq, sock, &msg); + msg.msg_control = NULL; + } else { + if (tx_can_batch(vq, total_len)) + msg.msg_flags |= MSG_MORE; + else + msg.msg_flags &= ~MSG_MORE; + } /* TODO: Check specific error and bomb out unless ENOBUFS? */ err = sock->ops->sendmsg(sock, &msg, len); @@ -596,15 +791,17 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) if (err != len) pr_debug("Truncated TX packet: len %d != %zd\n", err, len); - if (++nvq->done_idx >= VHOST_NET_BATCH) - vhost_net_signal_used(nvq); +done: + vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); + vq->heads[nvq->done_idx].len = 0; + ++nvq->done_idx; if (vhost_exceeds_weight(++sent_pkts, total_len)) { vhost_poll_queue(&vq->poll); break; } } - vhost_net_signal_used(nvq); + vhost_tx_batch(net, nvq, sock, &msg); } static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) @@ -620,6 +817,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) .msg_controllen = 0, .msg_flags = MSG_DONTWAIT, }; + struct tun_msg_ctl ctl; size_t len, total_len = 0; int err; struct vhost_net_ubuf_ref *uninitialized_var(ubufs); @@ -664,8 +862,10 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) ubuf->ctx = nvq->ubufs; ubuf->desc = nvq->upend_idx; refcount_set(&ubuf->refcnt, 1); - msg.msg_control = ubuf; - msg.msg_controllen = sizeof(ubuf); + msg.msg_control = &ctl; + ctl.type = TUN_MSG_UBUF; + ctl.ptr = ubuf; + msg.msg_controllen = sizeof(ctl); ubufs = nvq->ubufs; atomic_inc(&ubufs->refcount); nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; @@ -716,7 +916,7 @@ static void handle_tx(struct vhost_net *net) struct vhost_virtqueue *vq = &nvq->vq; struct socket *sock; - mutex_lock(&vq->mutex); + mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX); sock = vq->private_data; if (!sock) goto out; @@ -757,16 +957,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) return len; } -static int sk_has_rx_data(struct sock *sk) -{ - struct socket *sock = sk->sk_socket; - - if (sock->ops->peek_len) - return sock->ops->peek_len(sock); - - return skb_queue_empty(&sk->sk_receive_queue); -} - static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, bool *busyloop_intr) { @@ -774,41 +964,13 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX]; struct vhost_virtqueue *rvq = &rnvq->vq; struct vhost_virtqueue *tvq = &tnvq->vq; - unsigned long uninitialized_var(endtime); int len = peek_head_len(rnvq, sk); - if (!len && tvq->busyloop_timeout) { + if (!len && rvq->busyloop_timeout) { /* Flush batched heads first */ vhost_net_signal_used(rnvq); /* Both tx vq and rx socket were polled here */ - mutex_lock_nested(&tvq->mutex, 1); - vhost_disable_notify(&net->dev, tvq); - - preempt_disable(); - endtime = busy_clock() + tvq->busyloop_timeout; - - while (vhost_can_busy_poll(endtime)) { - if (vhost_has_work(&net->dev)) { - *busyloop_intr = true; - break; - } - if ((sk_has_rx_data(sk) && - !vhost_vq_avail_empty(&net->dev, rvq)) || - !vhost_vq_avail_empty(&net->dev, tvq)) - break; - cpu_relax(); - } - - preempt_enable(); - - if (!vhost_vq_avail_empty(&net->dev, tvq)) { - vhost_poll_queue(&tvq->poll); - } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) { - vhost_disable_notify(&net->dev, tvq); - vhost_poll_queue(&tvq->poll); - } - - mutex_unlock(&tvq->mutex); + vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true); len = peek_head_len(rnvq, sk); } @@ -923,7 +1085,7 @@ static void handle_rx(struct vhost_net *net) __virtio16 num_buffers; int recv_pkts = 0; - mutex_lock_nested(&vq->mutex, 0); + mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX); sock = vq->private_data; if (!sock) goto out; @@ -1078,6 +1240,7 @@ static int vhost_net_open(struct inode *inode, struct file *f) struct vhost_dev *dev; struct vhost_virtqueue **vqs; void **queue; + struct xdp_buff *xdp; int i; n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL); @@ -1098,6 +1261,15 @@ static int vhost_net_open(struct inode *inode, struct file *f) } n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue; + xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL); + if (!xdp) { + kfree(vqs); + kvfree(n); + kfree(queue); + return -ENOMEM; + } + n->vqs[VHOST_NET_VQ_TX].xdp = xdp; + dev = &n->dev; vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; @@ -1108,6 +1280,7 @@ static int vhost_net_open(struct inode *inode, struct file *f) n->vqs[i].ubuf_info = NULL; n->vqs[i].upend_idx = 0; n->vqs[i].done_idx = 0; + n->vqs[i].batched_xdp = 0; n->vqs[i].vhost_hlen = 0; n->vqs[i].sock_hlen = 0; n->vqs[i].rx_ring = NULL; @@ -1191,6 +1364,7 @@ static int vhost_net_release(struct inode *inode, struct file *f) * since jobs can re-queue themselves. */ vhost_net_flush(n); kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue); + kfree(n->vqs[VHOST_NET_VQ_TX].xdp); kfree(n->dev.vqs); kvfree(n); return 0; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index b13c6b4b2c66..f52008bb8df7 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -294,8 +294,11 @@ static void vhost_vq_meta_reset(struct vhost_dev *d) { int i; - for (i = 0; i < d->nvqs; ++i) + for (i = 0; i < d->nvqs; ++i) { + mutex_lock(&d->vqs[i]->mutex); __vhost_vq_meta_reset(d->vqs[i]); + mutex_unlock(&d->vqs[i]->mutex); + } } static void vhost_vq_reset(struct vhost_dev *dev, @@ -891,20 +894,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, #define vhost_get_used(vq, x, ptr) \ vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) -static void vhost_dev_lock_vqs(struct vhost_dev *d) -{ - int i = 0; - for (i = 0; i < d->nvqs; ++i) - mutex_lock_nested(&d->vqs[i]->mutex, i); -} - -static void vhost_dev_unlock_vqs(struct vhost_dev *d) -{ - int i = 0; - for (i = 0; i < d->nvqs; ++i) - mutex_unlock(&d->vqs[i]->mutex); -} - static int vhost_new_umem_range(struct vhost_umem *umem, u64 start, u64 size, u64 end, u64 userspace_addr, int perm) @@ -954,7 +943,10 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d, if (msg->iova <= vq_msg->iova && msg->iova + msg->size - 1 >= vq_msg->iova && vq_msg->type == VHOST_IOTLB_MISS) { + mutex_lock(&node->vq->mutex); vhost_poll_queue(&node->vq->poll); + mutex_unlock(&node->vq->mutex); + list_del(&node->node); kfree(node); } @@ -986,7 +978,6 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, int ret = 0; mutex_lock(&dev->mutex); - vhost_dev_lock_vqs(dev); switch (msg->type) { case VHOST_IOTLB_UPDATE: if (!dev->iotlb) { @@ -1020,7 +1011,6 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, break; } - vhost_dev_unlock_vqs(dev); mutex_unlock(&dev->mutex); return ret; diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c index 025a9a5e1c32..55a756c60746 100644 --- a/fs/afs/addr_list.c +++ b/fs/afs/addr_list.c @@ -17,11 +17,6 @@ #include "internal.h" #include "afs_fs.h" -//#define AFS_MAX_ADDRESSES -// ((unsigned int)((PAGE_SIZE - sizeof(struct afs_addr_list)) / -// sizeof(struct sockaddr_rxrpc))) -#define AFS_MAX_ADDRESSES ((unsigned int)(sizeof(unsigned long) * 8)) - /* * Release an address list. */ @@ -43,11 +38,15 @@ struct afs_addr_list *afs_alloc_addrlist(unsigned int nr, _enter("%u,%u,%u", nr, service, port); + if (nr > AFS_MAX_ADDRESSES) + nr = AFS_MAX_ADDRESSES; + alist = kzalloc(struct_size(alist, addrs, nr), GFP_KERNEL); if (!alist) return NULL; refcount_set(&alist->usage, 1); + alist->max_addrs = nr; for (i = 0; i < nr; i++) { struct sockaddr_rxrpc *srx = &alist->addrs[i]; @@ -109,8 +108,6 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len, } while (p < end); _debug("%u/%u addresses", nr, AFS_MAX_ADDRESSES); - if (nr > AFS_MAX_ADDRESSES) - nr = AFS_MAX_ADDRESSES; alist = afs_alloc_addrlist(nr, service, port); if (!alist) @@ -119,8 +116,10 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len, /* Extract the addresses */ p = text; do { - struct sockaddr_rxrpc *srx = &alist->addrs[alist->nr_addrs]; const char *q, *stop; + unsigned int xport = port; + __be32 x[4]; + int family; if (*p == delim) { p++; @@ -136,19 +135,12 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len, break; } - if (in4_pton(p, q - p, - (u8 *)&srx->transport.sin6.sin6_addr.s6_addr32[3], - -1, &stop)) { - srx->transport.sin6.sin6_addr.s6_addr32[0] = 0; - srx->transport.sin6.sin6_addr.s6_addr32[1] = 0; - srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); - } else if (in6_pton(p, q - p, - srx->transport.sin6.sin6_addr.s6_addr, - -1, &stop)) { - /* Nothing to do */ - } else { + if (in4_pton(p, q - p, (u8 *)&x[0], -1, &stop)) + family = AF_INET; + else if (in6_pton(p, q - p, (u8 *)x, -1, &stop)) + family = AF_INET6; + else goto bad_address; - } if (stop != q) goto bad_address; @@ -160,7 +152,7 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len, if (p < end) { if (*p == '+') { /* Port number specification "+1234" */ - unsigned int xport = 0; + xport = 0; p++; if (p >= end || !isdigit(*p)) goto bad_address; @@ -171,7 +163,6 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len, goto bad_address; p++; } while (p < end && isdigit(*p)); - srx->transport.sin6.sin6_port = htons(xport); } else if (*p == delim) { p++; } else { @@ -179,8 +170,12 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len, } } - alist->nr_addrs++; - } while (p < end && alist->nr_addrs < AFS_MAX_ADDRESSES); + if (family == AF_INET) + afs_merge_fs_addr4(alist, x[0], xport); + else + afs_merge_fs_addr6(alist, x, xport); + + } while (p < end); _leave(" = [nr %u]", alist->nr_addrs); return alist; @@ -237,19 +232,23 @@ struct afs_addr_list *afs_dns_query(struct afs_cell *cell, time64_t *_expiry) */ void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port) { - struct sockaddr_in6 *a; - __be16 xport = htons(port); + struct sockaddr_rxrpc *srx; + u32 addr = ntohl(xdr); int i; + if (alist->nr_addrs >= alist->max_addrs) + return; + for (i = 0; i < alist->nr_ipv4; i++) { - a = &alist->addrs[i].transport.sin6; - if (xdr == a->sin6_addr.s6_addr32[3] && - xport == a->sin6_port) + struct sockaddr_in *a = &alist->addrs[i].transport.sin; + u32 a_addr = ntohl(a->sin_addr.s_addr); + u16 a_port = ntohs(a->sin_port); + + if (addr == a_addr && port == a_port) return; - if (xdr == a->sin6_addr.s6_addr32[3] && - (u16 __force)xport < (u16 __force)a->sin6_port) + if (addr == a_addr && port < a_port) break; - if ((u32 __force)xdr < (u32 __force)a->sin6_addr.s6_addr32[3]) + if (addr < a_addr) break; } @@ -258,12 +257,11 @@ void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port) alist->addrs + i, sizeof(alist->addrs[0]) * (alist->nr_addrs - i)); - a = &alist->addrs[i].transport.sin6; - a->sin6_port = xport; - a->sin6_addr.s6_addr32[0] = 0; - a->sin6_addr.s6_addr32[1] = 0; - a->sin6_addr.s6_addr32[2] = htonl(0xffff); - a->sin6_addr.s6_addr32[3] = xdr; + srx = &alist->addrs[i]; + srx->transport_len = sizeof(srx->transport.sin); + srx->transport.sin.sin_family = AF_INET; + srx->transport.sin.sin_port = htons(port); + srx->transport.sin.sin_addr.s_addr = xdr; alist->nr_ipv4++; alist->nr_addrs++; } @@ -273,18 +271,20 @@ void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port) */ void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port) { - struct sockaddr_in6 *a; - __be16 xport = htons(port); + struct sockaddr_rxrpc *srx; int i, diff; + if (alist->nr_addrs >= alist->max_addrs) + return; + for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) { - a = &alist->addrs[i].transport.sin6; + struct sockaddr_in6 *a = &alist->addrs[i].transport.sin6; + u16 a_port = ntohs(a->sin6_port); + diff = memcmp(xdr, &a->sin6_addr, 16); - if (diff == 0 && - xport == a->sin6_port) + if (diff == 0 && port == a_port) return; - if (diff == 0 && - (u16 __force)xport < (u16 __force)a->sin6_port) + if (diff == 0 && port < a_port) break; if (diff < 0) break; @@ -295,12 +295,11 @@ void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port) alist->addrs + i, sizeof(alist->addrs[0]) * (alist->nr_addrs - i)); - a = &alist->addrs[i].transport.sin6; - a->sin6_port = xport; - a->sin6_addr.s6_addr32[0] = xdr[0]; - a->sin6_addr.s6_addr32[1] = xdr[1]; - a->sin6_addr.s6_addr32[2] = xdr[2]; - a->sin6_addr.s6_addr32[3] = xdr[3]; + srx = &alist->addrs[i]; + srx->transport_len = sizeof(srx->transport.sin6); + srx->transport.sin6.sin6_family = AF_INET6; + srx->transport.sin6.sin6_port = htons(port); + memcpy(&srx->transport.sin6.sin6_addr, xdr, 16); alist->nr_addrs++; } diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 34c02fdcc25f..72de1f157d20 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -73,12 +73,14 @@ struct afs_addr_list { struct rcu_head rcu; /* Must be first */ refcount_t usage; u32 version; /* Version */ - unsigned short nr_addrs; - unsigned short index; /* Address currently in use */ - unsigned short nr_ipv4; /* Number of IPv4 addresses */ + unsigned char max_addrs; + unsigned char nr_addrs; + unsigned char index; /* Address currently in use */ + unsigned char nr_ipv4; /* Number of IPv4 addresses */ unsigned long probed; /* Mask of servers that have been probed */ unsigned long yfs; /* Mask of servers that are YFS */ struct sockaddr_rxrpc addrs[]; +#define AFS_MAX_ADDRESSES ((unsigned int)(sizeof(unsigned long) * 8)) }; /* diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h index 697161f80eb5..9eb2ec2b2ea9 100644 --- a/include/dt-bindings/net/mscc-phy-vsc8531.h +++ b/include/dt-bindings/net/mscc-phy-vsc8531.h @@ -18,9 +18,11 @@ #define VSC8531_LINK_100_1000_ACTIVITY 4 #define VSC8531_LINK_10_1000_ACTIVITY 5 #define VSC8531_LINK_10_100_ACTIVITY 6 +#define VSC8584_LINK_100FX_1000X_ACTIVITY 7 #define VSC8531_DUPLEX_COLLISION 8 #define VSC8531_COLLISION 9 #define VSC8531_ACTIVITY 10 +#define VSC8584_100FX_1000X_ACTIVITY 11 #define VSC8531_AUTONEG_FAULT 12 #define VSC8531_SERIAL_MODE 13 #define VSC8531_FORCE_LED_OFF 14 diff --git a/include/dt-bindings/phy/phy-ocelot-serdes.h b/include/dt-bindings/phy/phy-ocelot-serdes.h new file mode 100644 index 000000000000..bd28f21206f6 --- /dev/null +++ b/include/dt-bindings/phy/phy-ocelot-serdes.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* Copyright (c) 2018 Microsemi Corporation */ +#ifndef __PHY_OCELOT_SERDES_H__ +#define __PHY_OCELOT_SERDES_H__ + +#define SERDES1G(x) (x) +#define SERDES1G_MAX SERDES1G(5) +#define SERDES6G(x) (SERDES1G_MAX + 1 + (x)) +#define SERDES6G_MAX SERDES6G(2) +#define SERDES_MAX SERDES6G_MAX + +#endif diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 212b3822d180..2c9756bd9c4c 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -252,6 +252,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 #define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 +/* Define below the capability flags that are not offloads */ +#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ VIRTCHNL_VF_OFFLOAD_VLAN | \ VIRTCHNL_VF_OFFLOAD_RSS_PF) @@ -573,7 +575,7 @@ struct virtchnl_filter { enum virtchnl_flow_type flow_type; enum virtchnl_action action; u32 action_meta; - __u8 field_flags; + u8 field_flags; }; VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); @@ -596,10 +598,23 @@ enum virtchnl_event_codes { struct virtchnl_pf_event { enum virtchnl_event_codes event; union { + /* If the PF driver does not support the new speed reporting + * capabilities then use link_event else use link_event_adv to + * get the speed and link information. The ability to understand + * new speeds is indicated by setting the capability flag + * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter + * in virtchnl_vf_resource struct and can be used to determine + * which link event struct to use below. + */ struct { enum virtchnl_link_speed link_speed; bool link_status; } link_event; + struct { + /* link_speed provided in Mbps */ + u32 link_speed; + u8 link_status; + } link_event_adv; } event_data; int severity; diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index f91b0f8ff3a9..588dd5f0bd85 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -2,6 +2,7 @@ #ifndef _BPF_CGROUP_H #define _BPF_CGROUP_H +#include <linux/bpf.h> #include <linux/errno.h> #include <linux/jump_label.h> #include <linux/percpu.h> @@ -22,7 +23,11 @@ struct bpf_cgroup_storage; extern struct static_key_false cgroup_bpf_enabled_key; #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) -DECLARE_PER_CPU(void*, bpf_cgroup_storage); +DECLARE_PER_CPU(struct bpf_cgroup_storage*, + bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); + +#define for_each_cgroup_storage_type(stype) \ + for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) struct bpf_cgroup_storage_map; @@ -32,7 +37,10 @@ struct bpf_storage_buffer { }; struct bpf_cgroup_storage { - struct bpf_storage_buffer *buf; + union { + struct bpf_storage_buffer *buf; + void __percpu *percpu_buf; + }; struct bpf_cgroup_storage_map *map; struct bpf_cgroup_storage_key key; struct list_head list; @@ -43,7 +51,7 @@ struct bpf_cgroup_storage { struct bpf_prog_list { struct list_head node; struct bpf_prog *prog; - struct bpf_cgroup_storage *storage; + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; }; struct bpf_prog_array; @@ -101,18 +109,26 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, short access, enum bpf_attach_type type); -static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) +static inline enum bpf_cgroup_storage_type cgroup_storage_type( + struct bpf_map *map) { - struct bpf_storage_buffer *buf; + if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) + return BPF_CGROUP_STORAGE_PERCPU; + + return BPF_CGROUP_STORAGE_SHARED; +} - if (!storage) - return; +static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage + *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) +{ + enum bpf_cgroup_storage_type stype; - buf = READ_ONCE(storage->buf); - this_cpu_write(bpf_cgroup_storage, &buf->data[0]); + for_each_cgroup_storage_type(stype) + this_cpu_write(bpf_cgroup_storage[stype], storage[stype]); } -struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog); +struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, + enum bpf_cgroup_storage_type stype); void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, struct cgroup *cgroup, @@ -121,6 +137,10 @@ void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map); void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map); +int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, + void *value, u64 flags); + /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ ({ \ @@ -265,15 +285,24 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, return -EINVAL; } -static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {} +static inline void bpf_cgroup_storage_set( + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map) { return 0; } static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map) {} static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( - struct bpf_prog *prog) { return 0; } + struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return 0; } static inline void bpf_cgroup_storage_free( struct bpf_cgroup_storage *storage) {} +static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, + void *value) { + return 0; +} +static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, + void *key, void *value, u64 flags) { + return 0; +} #define cgroup_bpf_enabled (0) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) @@ -293,6 +322,8 @@ static inline void bpf_cgroup_storage_free( #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) +#define for_each_cgroup_storage_type(stype) for (; false; ) + #endif /* CONFIG_CGROUP_BPF */ #endif /* _BPF_CGROUP_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 523481a3471b..9b558713447f 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -154,6 +154,7 @@ enum bpf_arg_type { ARG_PTR_TO_CTX, /* pointer to context */ ARG_ANYTHING, /* any (initialized) argument is ok */ + ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */ }; /* type of values returned from helper functions */ @@ -162,6 +163,7 @@ enum bpf_return_type { RET_VOID, /* function doesn't return anything */ RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ + RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ }; /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs @@ -212,6 +214,9 @@ enum bpf_reg_type { PTR_TO_PACKET_META, /* skb->data - meta_len */ PTR_TO_PACKET, /* reg points to skb->data */ PTR_TO_PACKET_END, /* skb->data + headlen */ + PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ + PTR_TO_SOCKET, /* reg points to struct bpf_sock */ + PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ }; /* The information passed from prog-specific *_is_valid_access @@ -258,6 +263,7 @@ struct bpf_verifier_ops { struct bpf_prog_offload_ops { int (*insn_hook)(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); + int (*finalize)(struct bpf_verifier_env *env); }; struct bpf_prog_offload { @@ -271,6 +277,14 @@ struct bpf_prog_offload { u32 jited_len; }; +enum bpf_cgroup_storage_type { + BPF_CGROUP_STORAGE_SHARED, + BPF_CGROUP_STORAGE_PERCPU, + __BPF_CGROUP_STORAGE_MAX +}; + +#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX + struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; @@ -288,7 +302,7 @@ struct bpf_prog_aux { struct bpf_prog *prog; struct user_struct *user; u64 load_time; /* ns since boottime */ - struct bpf_map *cgroup_storage; + struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; char name[BPF_OBJ_NAME_LEN]; #ifdef CONFIG_SECURITY void *security; @@ -334,6 +348,11 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void); typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, unsigned long off, unsigned long len); +typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, + const struct bpf_insn *src, + struct bpf_insn *dst, + struct bpf_prog *prog, + u32 *target_size); u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); @@ -357,7 +376,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, */ struct bpf_prog_array_item { struct bpf_prog *prog; - struct bpf_cgroup_storage *cgroup_storage; + struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; }; struct bpf_prog_array { @@ -827,4 +846,29 @@ extern const struct bpf_func_proto bpf_get_local_storage_proto; void bpf_user_rnd_init_once(void); u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +#if defined(CONFIG_NET) +bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info); +u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size); +#else +static inline bool bpf_sock_is_valid_access(int off, int size, + enum bpf_access_type type, + struct bpf_insn_access_aux *info) +{ + return false; +} +static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size) +{ + return 0; +} +#endif + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index cd26c090e7c0..5432f4c9f50e 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -16,6 +16,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local) BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops) BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb) BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg) +BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector) #endif #ifdef CONFIG_BPF_EVENTS BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe) @@ -42,6 +43,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops) #endif #ifdef CONFIG_CGROUP_BPF BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, cgroup_storage_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 38b04f559ad3..9e8056ec20fa 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -41,6 +41,7 @@ enum bpf_reg_liveness { }; struct bpf_reg_state { + /* Ordering of fields matters. See states_equal() */ enum bpf_reg_type type; union { /* valid when type == PTR_TO_PACKET */ @@ -57,9 +58,10 @@ struct bpf_reg_state { * offset, so they can share range knowledge. * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we * came from, when one is tested for != NULL. + * For PTR_TO_SOCKET this is used to share which pointers retain the + * same reference to the socket, to determine proper reference freeing. */ u32 id; - /* Ordering of fields matters. See states_equal() */ /* For scalar types (SCALAR_VALUE), this represents our knowledge of * the actual value. * For pointer types, this represents the variable part of the offset @@ -76,15 +78,15 @@ struct bpf_reg_state { s64 smax_value; /* maximum possible (s64)value */ u64 umin_value; /* minimum possible (u64)value */ u64 umax_value; /* maximum possible (u64)value */ + /* parentage chain for liveness checking */ + struct bpf_reg_state *parent; /* Inside the callee two registers can be both PTR_TO_STACK like * R1=fp-8 and R2=fp-8, but one of them points to this function stack * while another to the caller's stack. To differentiate them 'frameno' * is used which is an index in bpf_verifier_state->frame[] array * pointing to bpf_func_state. - * This field must be second to last, for states_equal() reasons. */ u32 frameno; - /* This field must be last, for states_equal() reasons. */ enum bpf_reg_liveness live; }; @@ -102,12 +104,22 @@ struct bpf_stack_state { u8 slot_type[BPF_REG_SIZE]; }; +struct bpf_reference_state { + /* Track each reference created with a unique id, even if the same + * instruction creates the reference multiple times (eg, via CALL). + */ + int id; + /* Instruction where the allocation of this reference occurred. This + * is used purely to inform the user of a reference leak. + */ + int insn_idx; +}; + /* state of the program: * type of all registers and stack info */ struct bpf_func_state { struct bpf_reg_state regs[MAX_BPF_REG]; - struct bpf_verifier_state *parent; /* index of call instruction that called into this func */ int callsite; /* stack frame number of this function state from pov of @@ -120,7 +132,9 @@ struct bpf_func_state { */ u32 subprogno; - /* should be second to last. See copy_func_state() */ + /* The following fields should be last. See copy_func_state() */ + int acquired_refs; + struct bpf_reference_state *refs; int allocated_stack; struct bpf_stack_state *stack; }; @@ -129,10 +143,20 @@ struct bpf_func_state { struct bpf_verifier_state { /* call stack tracking */ struct bpf_func_state *frame[MAX_CALL_FRAMES]; - struct bpf_verifier_state *parent; u32 curframe; }; +#define bpf_get_spilled_reg(slot, frame) \ + (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ + (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ + ? &frame->stack[slot].spilled_ptr : NULL) + +/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ +#define bpf_for_each_spilled_reg(iter, frame, reg) \ + for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ + iter < frame->allocated_stack / BPF_REG_SIZE; \ + iter++, reg = bpf_get_spilled_reg(iter, frame)) + /* linked list of verifier states used to prune search */ struct bpf_verifier_state_list { struct bpf_verifier_state state; @@ -206,15 +230,21 @@ __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, const char *fmt, ...); -static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) +static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) { struct bpf_verifier_state *cur = env->cur_state; - return cur->frame[cur->curframe]->regs; + return cur->frame[cur->curframe]; +} + +static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) +{ + return cur_func(env)->regs; } int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); +int bpf_prog_offload_finalize(struct bpf_verifier_env *env); #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h index 6ac3cad9aef1..34a744a1bafc 100644 --- a/include/linux/dns_resolver.h +++ b/include/linux/dns_resolver.h @@ -24,11 +24,9 @@ #ifndef _LINUX_DNS_RESOLVER_H #define _LINUX_DNS_RESOLVER_H -#ifdef __KERNEL__ +#include <uapi/linux/dns_resolver.h> extern int dns_query(const char *type, const char *name, size_t namelen, const char *options, char **_result, time64_t *_expiry); -#endif /* KERNEL */ - #endif /* _LINUX_DNS_RESOLVER_H */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index f8a2245b70ac..afd9596ce636 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -183,14 +183,6 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, /** * struct ethtool_ops - optional netdev operations - * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings - * API. Get various device settings including Ethernet link - * settings. The @cmd parameter is expected to have been cleared - * before get_settings is called. Returns a negative error code - * or zero. - * @set_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings - * API. Set various device settings including Ethernet link - * settings. Returns a negative error code or zero. * @get_drvinfo: Report driver/device information. Should only set the * @driver, @version, @fw_version and @bus_info fields. If not * implemented, the @driver and @bus_info fields will be filled in @@ -297,19 +289,16 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, * a TX queue has this number, return -EINVAL. If only a RX queue or a TX * queue has this number, ignore the inapplicable fields. * Returns a negative error code or zero. - * @get_link_ksettings: When defined, takes precedence over the - * %get_settings method. Get various device settings - * including Ethernet link settings. The %cmd and - * %link_mode_masks_nwords fields should be ignored (use - * %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), any - * change to them will be overwritten by kernel. Returns a - * negative error code or zero. - * @set_link_ksettings: When defined, takes precedence over the - * %set_settings method. Set various device settings including - * Ethernet link settings. The %cmd and %link_mode_masks_nwords - * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS - * instead of the latter), any change to them will be overwritten - * by kernel. Returns a negative error code or zero. + * @get_link_ksettings: Get various device settings including Ethernet link + * settings. The %cmd and %link_mode_masks_nwords fields should be + * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), + * any change to them will be overwritten by kernel. Returns a negative + * error code or zero. + * @set_link_ksettings: Set various device settings including Ethernet link + * settings. The %cmd and %link_mode_masks_nwords fields should be + * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), + * any change to them will be overwritten by kernel. Returns a negative + * error code or zero. * @get_fecparam: Get the network device Forward Error Correction parameters. * @set_fecparam: Set the network device Forward Error Correction parameters. * @get_ethtool_phy_stats: Return extended statistics about the PHY device. @@ -329,8 +318,6 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, * of the generic netdev features interface. */ struct ethtool_ops { - int (*get_settings)(struct net_device *, struct ethtool_cmd *); - int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 9c03a7d5e400..0ef67f837ae1 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1460,13 +1460,16 @@ struct ieee80211_ht_operation { * STA can receive. Rate expressed in units of 1 Mbps. * If this field is 0 this value should not be used to * consider the highest RX data rate supported. - * The top 3 bits of this field are reserved. + * The top 3 bits of this field indicate the Maximum NSTS,total + * (a beamformee capability.) * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams * @tx_highest: Indicates highest long GI VHT PPDU data rate * STA can transmit. Rate expressed in units of 1 Mbps. * If this field is 0 this value should not be used to * consider the highest TX data rate supported. - * The top 3 bits of this field are reserved. + * The top 2 bits of this field are reserved, the + * 3rd bit from the top indiciates VHT Extended NSS BW + * Capability. */ struct ieee80211_vht_mcs_info { __le16 rx_mcs_map; @@ -1475,6 +1478,13 @@ struct ieee80211_vht_mcs_info { __le16 tx_highest; } __packed; +/* for rx_highest */ +#define IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT 13 +#define IEEE80211_VHT_MAX_NSTS_TOTAL_MASK (7 << IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT) + +/* for tx_highest */ +#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13) + /** * enum ieee80211_vht_mcs_support - VHT MCS support definitions * @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the @@ -1545,11 +1555,11 @@ struct ieee80211_vht_operation { * struct ieee80211_he_cap_elem - HE capabilities element * * This structure is the "HE capabilities element" fixed fields as - * described in P802.11ax_D2.0 section 9.4.2.237.2 and 9.4.2.237.3 + * described in P802.11ax_D3.0 section 9.4.2.237.2 and 9.4.2.237.3 */ struct ieee80211_he_cap_elem { - u8 mac_cap_info[5]; - u8 phy_cap_info[9]; + u8 mac_cap_info[6]; + u8 phy_cap_info[11]; } __packed; #define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5 @@ -1650,6 +1660,7 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C +#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 2 #define IEEE80211_VHT_CAP_RXLDPC 0x00000010 #define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020 #define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040 @@ -1659,6 +1670,7 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300 #define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400 #define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700 +#define IEEE80211_VHT_CAP_RXSTBC_SHIFT 8 #define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800 #define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000 #define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13 @@ -1678,6 +1690,26 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000 #define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000 #define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000 +#define IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT 30 +#define IEEE80211_VHT_CAP_EXT_NSS_BW_MASK 0xc0000000 + +/** + * ieee80211_get_vht_max_nss - return max NSS for a given bandwidth/MCS + * @cap: VHT capabilities of the peer + * @bw: bandwidth to use + * @mcs: MCS index to use + * @ext_nss_bw_capable: indicates whether or not the local transmitter + * (rate scaling algorithm) can deal with the new logic + * (dot11VHTExtendedNSSBWCapable) + * + * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can + * vary for a given BW/MCS. This function parses the data. + * + * Note: This function is exported by cfg80211. + */ +int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, + enum ieee80211_vht_chanwidth bw, + int mcs, bool ext_nss_bw_capable); /* 802.11ax HE MAC capabilities */ #define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01 @@ -1707,15 +1739,15 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04 #define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08 #define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_1 0x00 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_2 0x10 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_3 0x20 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_4 0x30 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_5 0x40 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_6 0x50 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_7 0x60 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8 0x70 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_MASK 0x70 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1 0x00 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_2 0x10 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_3 0x20 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_4 0x30 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_5 0x40 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_6 0x50 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_7 0x60 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8 0x70 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_MASK 0x70 /* Link adaptation is split between byte HE_MAC_CAP1 and * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE @@ -1729,14 +1761,13 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01 #define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02 -#define IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED 0x04 +#define IEEE80211_HE_MAC_CAP2_TRS 0x04 #define IEEE80211_HE_MAC_CAP2_BSR 0x08 #define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10 #define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20 #define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40 #define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80 -#define IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU 0x01 #define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02 #define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04 @@ -1744,25 +1775,34 @@ struct ieee80211_mu_edca_param_set { * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the * same field in the HE capabilities. */ -#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_USE_VHT 0x00 -#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_1 0x08 -#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2 0x10 -#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_RESERVED 0x18 -#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK 0x18 -#define IEEE80211_HE_MAC_CAP3_A_AMSDU_FRAG 0x20 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_USE_VHT 0x00 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_1 0x08 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2 0x10 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED 0x18 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18 +#define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20 #define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40 #define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80 #define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01 #define IEEE80211_HE_MAC_CAP4_QTP 0x02 #define IEEE80211_HE_MAC_CAP4_BQR 0x04 -#define IEEE80211_HE_MAC_CAP4_SR_RESP 0x08 +#define IEEE80211_HE_MAC_CAP4_SRP_RESP 0x08 #define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10 #define IEEE80211_HE_MAC_CAP4_OPS 0x20 #define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40 +/* Multi TID agg TX is split between byte #4 and #5 + * The value is a combination of B39,B40,B41 + */ +#define IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39 0x80 + +#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01 +#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02 +#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION 0x04 +#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08 +#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10 /* 802.11ax HE PHY capabilities */ -#define IEEE80211_HE_PHY_CAP0_DUAL_BAND 0x01 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08 @@ -1779,10 +1819,10 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10 #define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20 #define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40 -/* Midamble RX Max NSTS is split between byte #2 and byte #3 */ -#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS 0x80 +/* Midamble RX/TX Max NSTS is split between byte #2 and byte #3 */ +#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80 -#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS 0x01 +#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01 #define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02 #define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04 #define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08 @@ -1883,7 +1923,19 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04 #define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08 #define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10 -#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_2X_AND_1XLTF 0x20 +#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20 +#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_20MHZ 0x00 +#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_40MHZ 0x40 +#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_80MHZ 0x80 +#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ 0xc0 +#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_MASK 0xc0 + +#define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01 +#define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02 +#define IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU 0x04 +#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08 +#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10 +#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20 /* 802.11ax HE TX/RX MCS NSS Support */ #define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3) @@ -1963,8 +2015,8 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) #define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10 -#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x000100000 -#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x000200000 +#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x00100000 +#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00200000 #define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000 #define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000 #define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000 diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 3d2996dc7d85..12e3eebf0ce6 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -16,9 +16,23 @@ #define __IF_TUN_H #include <uapi/linux/if_tun.h> +#include <uapi/linux/virtio_net.h> #define TUN_XDP_FLAG 0x1UL +#define TUN_MSG_UBUF 1 +#define TUN_MSG_PTR 2 +struct tun_msg_ctl { + unsigned short type; + unsigned short num; + void *ptr; +}; + +struct tun_xdp_hdr { + int buflen; + struct virtio_net_hdr gso; +}; + #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); struct ptr_ring *tun_get_tx_ring(struct file *file); diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 8415bf1a9776..495e834c1367 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -274,7 +274,8 @@ struct ipv6_pinfo { */ dontfrag:1, autoflowlabel:1, - autoflowlabel_set:1; + autoflowlabel_set:1, + mc_all:1; __u8 min_hopcount; __u8 tclass; __be32 rcv_flowinfo; diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h new file mode 100644 index 000000000000..22443d7fb5cd --- /dev/null +++ b/include/linux/linkmode.h @@ -0,0 +1,76 @@ +#ifndef __LINKMODE_H +#define __LINKMODE_H + +#include <linux/bitmap.h> +#include <linux/ethtool.h> +#include <uapi/linux/ethtool.h> + +static inline void linkmode_zero(unsigned long *dst) +{ + bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_copy(unsigned long *dst, const unsigned long *src) +{ + bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_and(unsigned long *dst, const unsigned long *a, + const unsigned long *b) +{ + bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_or(unsigned long *dst, const unsigned long *a, + const unsigned long *b) +{ + bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline bool linkmode_empty(const unsigned long *src) +{ + return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2) +{ + return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_set_bit(int nr, volatile unsigned long *addr) +{ + __set_bit(nr, addr); +} + +static inline void linkmode_set_bit_array(const int *array, int array_size, + unsigned long *addr) +{ + int i; + + for (i = 0; i < array_size; i++) + linkmode_set_bit(array[i], addr); +} + +static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr) +{ + __clear_bit(nr, addr); +} + +static inline void linkmode_change_bit(int nr, volatile unsigned long *addr) +{ + __change_bit(nr, addr); +} + +static inline int linkmode_test_bit(int nr, volatile unsigned long *addr) +{ + return test_bit(nr, addr); +} + +static inline int linkmode_equal(const unsigned long *src1, + const unsigned long *src2) +{ + return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +#endif /* __LINKMODE_H */ diff --git a/include/linux/mii.h b/include/linux/mii.h index 55000ee5c6ad..2da85b02e1c0 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -10,6 +10,7 @@ #include <linux/if.h> +#include <linux/linkmode.h> #include <uapi/linux/mii.h> struct ethtool_cmd; @@ -132,6 +133,34 @@ static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv) } /** + * linkmode_adv_to_mii_adv_t + * @advertising: the linkmode advertisement settings + * + * A small helper function that translates linkmode advertisement + * settings to phy autonegotiation advertisements for the + * MII_ADVERTISE register. + */ +static inline u32 linkmode_adv_to_mii_adv_t(unsigned long *advertising) +{ + u32 result = 0; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, advertising)) + result |= ADVERTISE_10HALF; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, advertising)) + result |= ADVERTISE_10FULL; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, advertising)) + result |= ADVERTISE_100HALF; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, advertising)) + result |= ADVERTISE_100FULL; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising)) + result |= ADVERTISE_PAUSE_CAP; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising)) + result |= ADVERTISE_PAUSE_ASYM; + + return result; +} + +/** * mii_adv_to_ethtool_adv_t * @adv: value of the MII_ADVERTISE register * @@ -179,6 +208,28 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv) } /** + * linkmode_adv_to_mii_ctrl1000_t + * advertising: the linkmode advertisement settings + * + * A small helper function that translates linkmode advertisement + * settings to phy autonegotiation advertisements for the + * MII_CTRL1000 register when in 1000T mode. + */ +static inline u32 linkmode_adv_to_mii_ctrl1000_t(unsigned long *advertising) +{ + u32 result = 0; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + advertising)) + result |= ADVERTISE_1000HALF; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + advertising)) + result |= ADVERTISE_1000FULL; + + return result; +} + +/** * mii_ctrl1000_to_ethtool_adv_t * @adv: value of the MII_CTRL1000 register * @@ -303,6 +354,56 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa) } /** + * mii_adv_to_linkmode_adv_t + * @advertising:pointer to destination link mode. + * @adv: value of the MII_ADVERTISE register + * + * A small helper function that translates MII_ADVERTISE bits + * to linkmode advertisement settings. + */ +static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising, + u32 adv) +{ + linkmode_zero(advertising); + + if (adv & ADVERTISE_10HALF) + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + advertising); + if (adv & ADVERTISE_10FULL) + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + advertising); + if (adv & ADVERTISE_100HALF) + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + advertising); + if (adv & ADVERTISE_100FULL) + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + advertising); + if (adv & ADVERTISE_PAUSE_CAP) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising); + if (adv & ADVERTISE_PAUSE_ASYM) + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising); +} + +/** + * ethtool_adv_to_lcl_adv_t + * @advertising:pointer to ethtool advertising + * + * A small helper function that translates ethtool advertising to LVL + * pause capabilities. + */ +static inline u32 ethtool_adv_to_lcl_adv_t(u32 advertising) +{ + u32 lcl_adv = 0; + + if (advertising & ADVERTISED_Pause) + lcl_adv |= ADVERTISE_PAUSE_CAP; + if (advertising & ADVERTISED_Asym_Pause) + lcl_adv |= ADVERTISE_PAUSE_ASYM; + + return lcl_adv; +} + +/** * mii_advertise_flowctrl - get flow control advertisement flags * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both) */ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 11fa4e66afc5..e9b502d5bcc1 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -504,6 +504,10 @@ struct health_buffer { __be16 ext_synd; }; +enum mlx5_cmd_addr_l_sz_offset { + MLX5_NIC_IFC_OFFSET = 8, +}; + struct mlx5_init_seg { __be32 fw_rev; __be32 cmdif_rev_fw_sub; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 66d94b4557cf..26a92462f4ce 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -583,10 +583,11 @@ struct mlx5_irq_info { }; struct mlx5_fc_stats { - struct rb_root counters; - struct list_head addlist; - /* protect addlist add/splice operations */ - spinlock_t addlist_lock; + spinlock_t counters_idr_lock; /* protects counters_idr */ + struct idr counters_idr; + struct list_head counters; + struct llist_head addlist; + struct llist_head dellist; struct workqueue_struct *wq; struct delayed_work work; @@ -804,7 +805,7 @@ struct mlx5_pps { }; struct mlx5_clock { - rwlock_t lock; + seqlock_t lock; struct cyclecounter cycles; struct timecounter tc; struct hwtstamp_config hwtstamp_config; @@ -837,6 +838,7 @@ struct mlx5_core_dev { u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; } caps; + u64 sys_image_guid; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; enum mlx5_device_state state; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f043d65b9bac..6e8a882052b1 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -896,7 +896,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_mkey[0x6]; u8 reserved_at_f0[0x8]; u8 dump_fill_mkey[0x1]; - u8 reserved_at_f9[0x3]; + u8 reserved_at_f9[0x2]; + u8 fast_teardown[0x1]; u8 log_max_eq[0x4]; u8 max_indirection[0x8]; @@ -3352,12 +3353,13 @@ struct mlx5_ifc_teardown_hca_out_bits { u8 reserved_at_40[0x3f]; - u8 force_state[0x1]; + u8 state[0x1]; }; enum { MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1, + MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN = 0x2, }; struct mlx5_ifc_teardown_hca_in_bits { diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 7e7c6dfcfb09..9c694808c212 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -121,4 +121,6 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status); int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev, struct mlx5_core_dev *port_mdev); int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev); + +u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev); #endif /* __MLX5_VPORT_H__ */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d837dad24b4c..22e4ef7bb701 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -535,6 +535,32 @@ static inline void napi_synchronize(const struct napi_struct *n) barrier(); } +/** + * napi_if_scheduled_mark_missed - if napi is running, set the + * NAPIF_STATE_MISSED + * @n: NAPI context + * + * If napi is running, set the NAPIF_STATE_MISSED, and return true if + * NAPI is scheduled. + **/ +static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) +{ + unsigned long val, new; + + do { + val = READ_ONCE(n->state); + if (val & NAPIF_STATE_DISABLE) + return true; + + if (!(val & NAPIF_STATE_SCHED)) + return false; + + new = val | NAPIF_STATE_MISSED; + } while (cmpxchg(&n->state, val, new) != val); + + return true; +} + enum netdev_queue_state_t { __QUEUE_STATE_DRV_XOFF, __QUEUE_STATE_STACK_XOFF, @@ -583,6 +609,9 @@ struct netdev_queue { /* Subordinate device that the queue has been assigned to */ struct net_device *sb_dev; +#ifdef CONFIG_XDP_SOCKETS + struct xdp_umem *umem; +#endif /* * write-mostly part */ @@ -712,6 +741,9 @@ struct netdev_rx_queue { struct kobject kobj; struct net_device *dev; struct xdp_rxq_info xdp_rxq; +#ifdef CONFIG_XDP_SOCKETS + struct xdp_umem *umem; +#endif } ____cacheline_aligned_in_smp; /* @@ -1976,7 +2008,6 @@ struct net_device { struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; struct pcpu_dstats __percpu *dstats; - struct pcpu_vstats __percpu *vstats; }; #if IS_ENABLED(CONFIG_GARP) @@ -2320,6 +2351,7 @@ static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb, struct packet_type { __be16 type; /* This is really htons(ether_type). */ + bool ignore_outgoing; struct net_device *dev; /* NULL is wildcarded here */ int (*func) (struct sk_buff *, struct net_device *, @@ -2358,6 +2390,12 @@ struct pcpu_sw_netstats { struct u64_stats_sync syncp; }; +struct pcpu_lstats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + #define __netdev_alloc_pcpu_stats(type, gfp) \ ({ \ typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 03097fa70975..e142b2b5f1ea 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -19,7 +19,4 @@ struct ip_conntrack_stat { unsigned int search_restart; }; -/* call to create an explicit dependency on nf_conntrack. */ -void need_conntrack(void); - #endif /* _NF_CONNTRACK_COMMON_H */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 71f121b66ca8..72580f1a72a2 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -176,8 +176,10 @@ struct netlink_callback { void *data; /* the module that dump function belong to */ struct module *module; + struct netlink_ext_ack *extack; u16 family; u16 min_dump_alloc; + bool strict_check; unsigned int prev_seq, seq; long args[6]; }; diff --git a/include/linux/phy.h b/include/linux/phy.h index cd6f637cbbfb..3ea87f774a76 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -19,6 +19,7 @@ #include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/ethtool.h> +#include <linux/linkmode.h> #include <linux/mdio.h> #include <linux/mii.h> #include <linux/module.h> @@ -41,13 +42,21 @@ #define PHY_1000BT_FEATURES (SUPPORTED_1000baseT_Half | \ SUPPORTED_1000baseT_Full) -#define PHY_BASIC_FEATURES (PHY_10BT_FEATURES | \ - PHY_100BT_FEATURES | \ - PHY_DEFAULT_FEATURES) - -#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \ - PHY_1000BT_FEATURES) - +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init; +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init; +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init; +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; + +#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features) +#define PHY_BASIC_T1_FEATURES ((unsigned long *)&phy_basic_t1_features) +#define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features) +#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) +#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features) +#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) +#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) /* * Set phydev->irq to PHY_POLL if interrupts are not supported, @@ -509,7 +518,7 @@ struct phy_driver { u32 phy_id; char *name; u32 phy_id_mask; - u32 features; + const unsigned long * const features; u32 flags; const void *driver_data; @@ -967,6 +976,12 @@ static inline void phy_device_reset(struct phy_device *phydev, int value) #define phydev_err(_phydev, format, args...) \ dev_err(&_phydev->mdio.dev, format, ##args) +#define phydev_info(_phydev, format, args...) \ + dev_info(&_phydev->mdio.dev, format, ##args) + +#define phydev_warn(_phydev, format, args...) \ + dev_warn(&_phydev->mdio.dev, format, ##args) + #define phydev_dbg(_phydev, format, args...) \ dev_dbg(&_phydev->mdio.dev, format, ##args) @@ -1039,7 +1054,7 @@ void phy_change_work(struct work_struct *work); void phy_mac_interrupt(struct phy_device *phydev); void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); -void phy_trigger_machine(struct phy_device *phydev, bool sync); +void phy_trigger_machine(struct phy_device *phydev); int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); void phy_ethtool_ksettings_get(struct phy_device *phydev, struct ethtool_link_ksettings *cmd); @@ -1049,6 +1064,14 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); int phy_start_interrupts(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); +void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode); +void phy_support_sym_pause(struct phy_device *phydev); +void phy_support_asym_pause(struct phy_device *phydev); +void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx, + bool autoneg); +void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx); +bool phy_validate_pause(struct phy_device *phydev, + struct ethtool_pauseparam *pp); int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, int (*run)(struct phy_device *)); diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index 9713aebdd348..03b319f89a34 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -37,9 +37,11 @@ enum phy_mode { PHY_MODE_USB_OTG, PHY_MODE_SGMII, PHY_MODE_2500SGMII, + PHY_MODE_QSGMII, PHY_MODE_10GKR, PHY_MODE_UFS_HS_A, PHY_MODE_UFS_HS_B, + PHY_MODE_PCIE, }; /** diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 0081fa6d1268..03f59a28fefd 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -110,7 +110,7 @@ #define FW_MAJOR_VERSION 8 #define FW_MINOR_VERSION 37 -#define FW_REVISION_VERSION 2 +#define FW_REVISION_VERSION 7 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -931,12 +931,12 @@ struct db_rdma_dpm_params { #define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 #define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 #define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 -#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 +#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_SHIFT 28 #define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 #define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 -#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 30 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 }; diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index b34c573f2b30..66aba505ec56 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -896,7 +896,7 @@ struct e4_ustorm_iscsi_task_ag_ctx { __le32 exp_cont_len; __le32 total_data_acked; __le32 exp_data_acked; - u8 next_tid_valid; + u8 byte2; u8 byte3; __le16 word1; __le16 next_tid; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 8cd34645e892..dee3c9c744f7 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -670,10 +670,11 @@ enum qed_link_mode_bits { QED_LM_1000baseT_Half_BIT = BIT(4), QED_LM_1000baseT_Full_BIT = BIT(5), QED_LM_10000baseKR_Full_BIT = BIT(6), - QED_LM_25000baseKR_Full_BIT = BIT(7), - QED_LM_40000baseLR4_Full_BIT = BIT(8), - QED_LM_50000baseKR2_Full_BIT = BIT(9), - QED_LM_100000baseKR4_Full_BIT = BIT(10), + QED_LM_20000baseKR2_Full_BIT = BIT(7), + QED_LM_25000baseKR_Full_BIT = BIT(8), + QED_LM_40000baseLR4_Full_BIT = BIT(9), + QED_LM_50000baseKR2_Full_BIT = BIT(10), + QED_LM_100000baseKR4_Full_BIT = BIT(11), QED_LM_COUNT = 11 }; diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 5225832bd6ff..bb9cb84114c1 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -6,6 +6,7 @@ #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/wait.h> +#include <linux/refcount.h> #include <uapi/linux/rtnetlink.h> extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); @@ -34,6 +35,7 @@ extern void rtnl_unlock(void); extern int rtnl_trylock(void); extern int rtnl_is_locked(void); extern int rtnl_lock_killable(void); +extern bool refcount_dec_and_rtnl_lock(refcount_t *r); extern wait_queue_head_t netdev_unregistering_wq; extern struct rw_semaphore pernet_ops_rwsem; @@ -83,6 +85,11 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) return rtnl_dereference(dev->ingress_queue); } +static inline struct netdev_queue *dev_ingress_queue_rcu(struct net_device *dev) +{ + return rcu_dereference(dev->ingress_queue); +} + struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); #ifdef CONFIG_NET_INGRESS diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 17a13e4785fc..119d092c6b13 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -243,6 +243,8 @@ struct scatterlist; struct pipe_inode_info; struct iov_iter; struct napi_struct; +struct bpf_prog; +union bpf_attr; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack { @@ -689,7 +691,7 @@ struct sk_buff { union { ktime_t tstamp; - u64 skb_mstamp; + u64 skb_mstamp_ns; /* earliest departure time */ }; /* * This is the control buffer. It is free to use for every @@ -1080,11 +1082,6 @@ static inline int skb_pad(struct sk_buff *skb, int pad) } #define dev_kfree_skb(a) consume_skb(a) -int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, - int getfrag(void *from, char *to, int offset, - int len, int odd, struct sk_buff *skb), - void *from, int length); - int skb_append_pagefrags(struct sk_buff *skb, struct page *page, int offset, size_t size); @@ -1192,6 +1189,24 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector, const struct flow_dissector_key *key, unsigned int key_count); +#ifdef CONFIG_NET +int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog); + +int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr); +#else +static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} + +static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) +{ + return -EOPNOTSUPP; +} +#endif + bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container, @@ -1339,6 +1354,17 @@ static inline void skb_zcopy_abort(struct sk_buff *skb) } } +static inline void skb_mark_not_on_list(struct sk_buff *skb) +{ + skb->next = NULL; +} + +static inline void skb_list_del_init(struct sk_buff *skb) +{ + __list_del_entry(&skb->list); + skb_mark_not_on_list(skb); +} + /** * skb_queue_empty - check if a queue is empty * @list: queue head @@ -1593,6 +1619,17 @@ static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) } /** + * __skb_peek - peek at the head of a non-empty &sk_buff_head + * @list_: list to peek at + * + * Like skb_peek(), but the caller knows that the list is not empty. + */ +static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_) +{ + return list_->next; +} + +/** * skb_peek_next - peek skb following the given one from a queue * @skb: skb to start from * @list_: list to peek at diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 263e37271afd..848f5b25e178 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -248,6 +248,8 @@ struct tcp_sock { syn_smc:1; /* SYN includes SMC */ u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ + u64 tcp_wstamp_ns; /* departure time for next sent data packet */ + /* RTT measurement */ u64 tcp_mstamp; /* most recent packet received/sent */ u32 srtt_us; /* smoothed round trip time << 3 in usecs */ diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index e2ec3582e549..d8860f2d0976 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -28,7 +28,7 @@ struct usbnet { /* housekeeping */ struct usb_device *udev; struct usb_interface *intf; - struct driver_info *driver_info; + const struct driver_info *driver_info; const char *driver_name; void *driver_priv; wait_queue_head_t wait; diff --git a/include/net/act_api.h b/include/net/act_api.h index 970303448c90..05c7df41d737 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -13,7 +13,7 @@ #include <net/netns/generic.h> struct tcf_idrinfo { - spinlock_t lock; + struct mutex lock; struct idr action_idr; }; @@ -31,10 +31,12 @@ struct tc_action { int tcfa_action; struct tcf_t tcfa_tm; struct gnet_stats_basic_packed tcfa_bstats; + struct gnet_stats_basic_packed tcfa_bstats_hw; struct gnet_stats_queue tcfa_qstats; struct net_rate_estimator __rcu *tcfa_rate_est; spinlock_t tcfa_lock; struct gnet_stats_basic_cpu __percpu *cpu_bstats; + struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw; struct gnet_stats_queue __percpu *cpu_qstats; struct tc_cookie __rcu *act_cookie; struct tcf_chain *goto_chain; @@ -85,8 +87,7 @@ struct tc_action_ops { struct tcf_result *); /* called under RCU BH lock*/ int (*dump)(struct sk_buff *, struct tc_action *, int, int); void (*cleanup)(struct tc_action *); - int (*lookup)(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack); + int (*lookup)(struct net *net, struct tc_action **a, u32 index); int (*init)(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **act, int ovr, int bind, bool rtnl_held, @@ -95,7 +96,7 @@ struct tc_action_ops { struct netlink_callback *, int, const struct tc_action_ops *, struct netlink_ext_ack *); - void (*stats_update)(struct tc_action *, u64, u32, u64); + void (*stats_update)(struct tc_action *, u64, u32, u64, bool); size_t (*get_fill_size)(const struct tc_action *act); struct net_device *(*get_dev)(const struct tc_action *a); void (*put_dev)(struct net_device *dev); @@ -116,7 +117,7 @@ int tc_action_net_init(struct tc_action_net *tn, if (!tn->idrinfo) return -ENOMEM; tn->ops = ops; - spin_lock_init(&tn->idrinfo->lock); + mutex_init(&tn->idrinfo->lock); idr_init(&tn->idrinfo->action_idr); return err; } @@ -183,13 +184,13 @@ int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); #endif /* CONFIG_NET_CLS_ACT */ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, - u64 packets, u64 lastuse) + u64 packets, u64 lastuse, bool hw) { #ifdef CONFIG_NET_CLS_ACT if (!a->ops->stats_update) return; - a->ops->stats_update(a, bytes, packets, lastuse); + a->ops->stats_update(a, bytes, packets, lastuse, hw); #endif } diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index f53edb3754bc..de587948042a 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -13,6 +13,7 @@ #define _NET_RXRPC_H #include <linux/rxrpc.h> +#include <linux/ktime.h> struct key; struct sock; @@ -77,5 +78,8 @@ int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *, int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *, enum rxrpc_call_completion *, u32 *); u32 rxrpc_kernel_check_life(struct socket *, struct rxrpc_call *); +u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); +bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *, + ktime_t *); #endif /* _NET_RXRPC_H */ diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index cdd9f1fe7cfa..c36dc1e20556 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1517,6 +1517,20 @@ struct hci_cp_le_write_def_data_len { __le16 tx_time; } __packed; +#define HCI_OP_LE_ADD_TO_RESOLV_LIST 0x2027 +struct hci_cp_le_add_to_resolv_list { + __u8 bdaddr_type; + bdaddr_t bdaddr; + __u8 peer_irk[16]; + __u8 local_irk[16]; +} __packed; + +#define HCI_OP_LE_DEL_FROM_RESOLV_LIST 0x2028 +struct hci_cp_le_del_from_resolv_list { + __u8 bdaddr_type; + bdaddr_t bdaddr; +} __packed; + #define HCI_OP_LE_CLEAR_RESOLV_LIST 0x2029 #define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 0db1b9b428b7..e5ea633ea368 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -103,6 +103,14 @@ struct bdaddr_list { u8 bdaddr_type; }; +struct bdaddr_list_with_irk { + struct list_head list; + bdaddr_t bdaddr; + u8 bdaddr_type; + u8 peer_irk[16]; + u8 local_irk[16]; +}; + struct bt_uuid { struct list_head list; u8 uuid[16]; @@ -259,6 +267,8 @@ struct hci_dev { __u16 le_max_tx_time; __u16 le_max_rx_len; __u16 le_max_rx_time; + __u8 le_max_key_size; + __u8 le_min_key_size; __u16 discov_interleaved_timeout; __u16 conn_info_min_age; __u16 conn_info_max_age; @@ -1058,8 +1068,15 @@ int hci_inquiry(void __user *arg); struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list, bdaddr_t *bdaddr, u8 type); +struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( + struct list_head *list, bdaddr_t *bdaddr, + u8 type); int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type); +int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, + u8 type, u8 *peer_irk, u8 *local_irk); int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type); +int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, + u8 type); void hci_bdaddr_list_clear(struct list_head *list); struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 0697fd413087..3555440e14fc 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -455,9 +455,6 @@ struct l2cap_conn_param_update_rsp { #define L2CAP_CONN_PARAM_ACCEPTED 0x0000 #define L2CAP_CONN_PARAM_REJECTED 0x0001 -#define L2CAP_LE_MAX_CREDITS 10 -#define L2CAP_LE_DEFAULT_MPS 230 - struct l2cap_le_conn_req { __le16 psm; __le16 scid; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 4de121e24ce5..1fa41b7a1be3 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -149,7 +149,7 @@ enum ieee80211_channel_flags { */ struct ieee80211_channel { enum nl80211_band band; - u16 center_freq; + u32 center_freq; u16 hw_value; u32 flags; int max_antenna_gain; @@ -775,6 +775,12 @@ struct cfg80211_crypto_settings { * @assocresp_ies_len: length of assocresp_ies in octets * @probe_resp_len: length of probe response template (@probe_resp) * @probe_resp: probe response template (AP mode only) + * @ftm_responder: enable FTM responder functionality; -1 for no change + * (which also implies no change in LCI/civic location data) + * @lci: LCI subelement content + * @civicloc: Civic location subelement content + * @lci_len: LCI data length + * @civicloc_len: Civic location data length */ struct cfg80211_beacon_data { const u8 *head, *tail; @@ -782,12 +788,17 @@ struct cfg80211_beacon_data { const u8 *proberesp_ies; const u8 *assocresp_ies; const u8 *probe_resp; + const u8 *lci; + const u8 *civicloc; + s8 ftm_responder; size_t head_len, tail_len; size_t beacon_ies_len; size_t proberesp_ies_len; size_t assocresp_ies_len; size_t probe_resp_len; + size_t lci_len; + size_t civicloc_len; }; struct mac_address { @@ -849,6 +860,7 @@ struct cfg80211_bitrate_mask { * @beacon_rate: bitrate to be used for beacons * @ht_cap: HT capabilities (or %NULL if HT isn't enabled) * @vht_cap: VHT capabilities (or %NULL if VHT isn't enabled) + * @he_cap: HE capabilities (or %NULL if HE isn't enabled) * @ht_required: stations must support HT * @vht_required: stations must support VHT */ @@ -874,6 +886,7 @@ struct cfg80211_ap_settings { const struct ieee80211_ht_cap *ht_cap; const struct ieee80211_vht_cap *vht_cap; + const struct ieee80211_he_cap_elem *he_cap; bool ht_required, vht_required; }; @@ -1290,6 +1303,10 @@ struct cfg80211_tid_stats { * @ack_signal: signal strength (in dBm) of the last ACK frame. * @avg_ack_signal: average rssi value of ack packet for the no of msdu's has * been sent. + * @rx_mpdu_count: number of MPDUs received from this station + * @fcs_err_count: number of packets (MPDUs) received from this station with + * an FCS error. This counter should be incremented only when TA of the + * received packet with an FCS error matches the peer MAC address. */ struct station_info { u64 filled; @@ -1336,6 +1353,9 @@ struct station_info { struct cfg80211_tid_stats *pertid; s8 ack_signal; s8 avg_ack_signal; + + u32 rx_mpdu_count; + u32 fcs_err_count; }; #if IS_ENABLED(CONFIG_CFG80211) @@ -2795,6 +2815,40 @@ struct cfg80211_external_auth_params { }; /** + * cfg80211_ftm_responder_stats - FTM responder statistics + * + * @filled: bitflag of flags using the bits of &enum nl80211_ftm_stats to + * indicate the relevant values in this struct for them + * @success_num: number of FTM sessions in which all frames were successfully + * answered + * @partial_num: number of FTM sessions in which part of frames were + * successfully answered + * @failed_num: number of failed FTM sessions + * @asap_num: number of ASAP FTM sessions + * @non_asap_num: number of non-ASAP FTM sessions + * @total_duration_ms: total sessions durations - gives an indication + * of how much time the responder was busy + * @unknown_triggers_num: number of unknown FTM triggers - triggers from + * initiators that didn't finish successfully the negotiation phase with + * the responder + * @reschedule_requests_num: number of FTM reschedule requests - initiator asks + * for a new scheduling although it already has scheduled FTM slot + * @out_of_window_triggers_num: total FTM triggers out of scheduled window + */ +struct cfg80211_ftm_responder_stats { + u32 filled; + u32 success_num; + u32 partial_num; + u32 failed_num; + u32 asap_num; + u32 non_asap_num; + u64 total_duration_ms; + u32 unknown_triggers_num; + u32 reschedule_requests_num; + u32 out_of_window_triggers_num; +}; + +/** * struct cfg80211_ops - backend description for wireless configuration * * This struct is registered by fullmac card drivers and/or wireless stacks @@ -3126,6 +3180,9 @@ struct cfg80211_external_auth_params { * * @tx_control_port: TX a control port frame (EAPoL). The noencrypt parameter * tells the driver that the frame should not be encrypted. + * + * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available. + * Statistics should be cumulative, currently no way to reset is provided. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -3431,6 +3488,10 @@ struct cfg80211_ops { const u8 *buf, size_t len, const u8 *dest, const __be16 proto, const bool noencrypt); + + int (*get_ftm_responder_stats)(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_ftm_responder_stats *ftm_stats); }; /* @@ -3958,7 +4019,6 @@ struct wiphy_iftype_ext_capab { * by the driver in the .connect() callback. The bit position maps to the * attribute indices defined in &enum nl80211_bss_select_attr. * - * @cookie_counter: unique generic cookie counter, used to identify objects. * @nan_supported_bands: bands supported by the device in NAN mode, a * bitmap of &enum nl80211_band values. For instance, for * NL80211_BAND_2GHZ, bit 0 would be set @@ -4097,8 +4157,6 @@ struct wiphy { u32 bss_select_support; - u64 cookie_counter; - u8 nan_supported_bands; u32 txq_limit; @@ -4733,6 +4791,17 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type, const u8 *ies, int len); /** + * cfg80211_send_layer2_update - send layer 2 update frame + * + * @dev: network device + * @addr: STA MAC address + * + * Wireless drivers can use this function to update forwarding tables in bridge + * devices upon STA association. + */ +void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr); + +/** * DOC: Regulatory enforcement infrastructure * * TODO diff --git a/include/net/devlink.h b/include/net/devlink.h index 99efc156a309..45db0c79462d 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -362,6 +362,9 @@ enum devlink_param_generic_id { DEVLINK_PARAM_GENERIC_ID_MAX_MACS, DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, + DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, /* add new param generic ids above here*/ __DEVLINK_PARAM_GENERIC_ID_MAX, @@ -380,6 +383,15 @@ enum devlink_param_generic_id { #define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME "region_snapshot_enable" #define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE DEVLINK_PARAM_TYPE_BOOL +#define DEVLINK_PARAM_GENERIC_IGNORE_ARI_NAME "ignore_ari" +#define DEVLINK_PARAM_GENERIC_IGNORE_ARI_TYPE DEVLINK_PARAM_TYPE_BOOL + +#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_NAME "msix_vec_per_pf_max" +#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_TYPE DEVLINK_PARAM_TYPE_U32 + +#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME "msix_vec_per_pf_min" +#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE DEVLINK_PARAM_TYPE_U32 + #define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \ { \ .id = DEVLINK_PARAM_GENERIC_ID_##_id, \ @@ -451,11 +463,14 @@ struct devlink_ops { u32 *p_cur, u32 *p_max); int (*eswitch_mode_get)(struct devlink *devlink, u16 *p_mode); - int (*eswitch_mode_set)(struct devlink *devlink, u16 mode); + int (*eswitch_mode_set)(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack); int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode); - int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode); + int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode, + struct netlink_ext_ack *extack); int (*eswitch_encap_mode_get)(struct devlink *devlink, u8 *p_encap_mode); - int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode); + int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode, + struct netlink_ext_ack *extack); }; static inline void *devlink_priv(struct devlink *devlink) diff --git a/include/net/dsa.h b/include/net/dsa.h index 461e8a7661b7..23690c44e167 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -35,6 +35,7 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_BRCM_PREPEND, DSA_TAG_PROTO_DSA, DSA_TAG_PROTO_EDSA, + DSA_TAG_PROTO_GSWIP, DSA_TAG_PROTO_KSZ, DSA_TAG_PROTO_LAN9303, DSA_TAG_PROTO_MTK, diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h index 883bb9085f15..946bd53a9f81 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -44,6 +44,10 @@ void __gnet_stats_copy_basic(const seqcount_t *running, struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b); +int gnet_stats_copy_basic_hw(const seqcount_t *running, + struct gnet_dump *d, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b); int gnet_stats_copy_rate_est(struct gnet_dump *d, struct net_rate_estimator __rcu **ptr); int gnet_stats_copy_queue(struct gnet_dump *d, diff --git a/include/net/genetlink.h b/include/net/genetlink.h index decf6012a401..aa2e5888f18d 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -112,7 +112,7 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net) #define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg) static inline int genl_err_attr(struct genl_info *info, int err, - struct nlattr *attr) + const struct nlattr *attr) { info->extack->bad_attr = attr; diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index feef706e1158..8014153bdd49 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -75,6 +75,8 @@ enum ieee80211_radiotap_presence { IEEE80211_RADIOTAP_TIMESTAMP = 22, IEEE80211_RADIOTAP_HE = 23, IEEE80211_RADIOTAP_HE_MU = 24, + IEEE80211_RADIOTAP_ZERO_LEN_PSDU = 26, + IEEE80211_RADIOTAP_LSIG = 27, /* valid in every it_present bitmap, even vendor namespaces */ IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29, @@ -325,6 +327,25 @@ enum ieee80211_radiotap_he_mu_bits { IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 0x0800, }; +enum ieee80211_radiotap_lsig_data1 { + IEEE80211_RADIOTAP_LSIG_DATA1_RATE_KNOWN = 0x0001, + IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN = 0x0002, +}; + +enum ieee80211_radiotap_lsig_data2 { + IEEE80211_RADIOTAP_LSIG_DATA2_RATE = 0x000f, + IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH = 0xfff0, +}; + +struct ieee80211_radiotap_lsig { + __le16 data1, data2; +}; + +enum ieee80211_radiotap_zero_len_psdu_type { + IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING = 0, + IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR = 0xff, +}; + /** * ieee80211_get_radiotap_len - get radiotap header length */ diff --git a/include/net/ip.h b/include/net/ip.h index e44b1a44f67a..72593e171d14 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -420,8 +420,35 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk, return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); } -int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len, - u32 *metrics); +struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx, + int fc_mx_len); +static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics) +{ + if (fib_metrics != &dst_default_metrics && + refcount_dec_and_test(&fib_metrics->refcnt)) + kfree(fib_metrics); +} + +/* ipv4 and ipv6 both use refcounted metrics if it is not the default */ +static inline +void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics) +{ + dst_init_metrics(dst, fib_metrics->metrics, true); + + if (fib_metrics != &dst_default_metrics) { + dst->_metrics |= DST_METRICS_REFCOUNTED; + refcount_inc(&fib_metrics->refcnt); + } +} + +static inline +void ip_dst_metrics_put(struct dst_entry *dst) +{ + struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); + + if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) + kfree(p); +} u32 ip_idents_reserve(u32 hash, int segs); void __ip_select_ident(struct net *net, struct iphdr *iph, int segs); diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 3d4930528db0..caabfd84a098 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -182,7 +182,6 @@ struct rt6_info { struct in6_addr rt6i_gateway; struct inet6_dev *rt6i_idev; u32 rt6i_flags; - struct rt6key rt6i_prefsrc; struct list_head rt6i_uncached; struct uncached_list *rt6i_uncached_list; @@ -408,11 +407,33 @@ struct fib6_node *fib6_locate(struct fib6_node *root, void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *arg), void *arg); +void fib6_clean_all_skip_notify(struct net *net, + int (*func)(struct fib6_info *, void *arg), + void *arg); int fib6_add(struct fib6_node *root, struct fib6_info *rt, struct nl_info *info, struct netlink_ext_ack *extack); int fib6_del(struct fib6_info *rt, struct nl_info *info); +static inline +void rt6_get_prefsrc(const struct rt6_info *rt, struct in6_addr *addr) +{ + const struct fib6_info *from; + + rcu_read_lock(); + + from = rcu_dereference(rt->from); + if (from) { + *addr = from->fib6_prefsrc.addr; + } else { + struct in6_addr in6_zero = {}; + + *addr = in6_zero; + } + + rcu_read_unlock(); +} + static inline struct net_device *fib6_info_nh_dev(const struct fib6_info *f6i) { return f6i->fib6_nh.nh_dev; diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 7b9c82de11cc..cef186dbd2ce 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -165,8 +165,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif, void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu); void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, kuid_t uid); -void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif, - u32 mark); +void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif); void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk); struct netlink_callback; diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index c9b7b136939d..852e4ebf2209 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -373,6 +373,7 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net, extern const struct nla_policy rtm_ipv4_policy[]; void ip_fib_init(void); __be32 fib_compute_spec_dst(struct sk_buff *skb); +bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev); int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos, int oif, struct net_device *dev, struct in_device *idev, u32 *itag); @@ -452,4 +453,6 @@ static inline void fib_proc_exit(struct net *net) u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr); +int ip_valid_fib_dump_req(const struct nlmsghdr *nlh, + struct netlink_ext_ack *extack); #endif /* _NET_FIB_H */ diff --git a/include/net/ipv6.h b/include/net/ipv6.h index ff33f498c137..829650540780 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1089,8 +1089,6 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; } #endif #ifdef CONFIG_SYSCTL -extern struct ctl_table ipv6_route_table_template[]; - struct ctl_table *ipv6_icmp_sysctl_init(struct net *net); struct ctl_table *ipv6_route_sysctl_init(struct net *net); int ipv6_sysctl_register(void); diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h index f4c21b5a1242..14a490246be9 100644 --- a/include/net/iucv/af_iucv.h +++ b/include/net/iucv/af_iucv.h @@ -80,6 +80,11 @@ struct af_iucv_trans_hdr { u8 pad; /* total 104 bytes */ } __packed; +static inline struct af_iucv_trans_hdr *iucv_trans_hdr(struct sk_buff *skb) +{ + return (struct af_iucv_trans_hdr *)skb_network_header(skb); +} + enum iucv_tx_notify { /* transmission of skb is completed and was successful */ TX_NOTIFY_OK = 0, diff --git a/include/net/llc.h b/include/net/llc.h index 890a87318014..df282d9b4017 100644 --- a/include/net/llc.h +++ b/include/net/llc.h @@ -66,6 +66,7 @@ struct llc_sap { int sk_count; struct hlist_nulls_head sk_laddr_hash[LLC_SK_LADDR_HASH_ENTRIES]; struct hlist_head sk_dev_hash[LLC_SK_DEV_HASH_ENTRIES]; + struct rcu_head rcu; }; static inline diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 5790f55c241d..71985e95d2d9 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -101,8 +101,9 @@ * Drivers indicate that they use this model by implementing the .wake_tx_queue * driver operation. * - * Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with a - * single per-vif queue for multicast data frames. + * Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with + * another per-sta for non-data/non-mgmt and bufferable management frames, and + * a single per-vif queue for multicast data frames. * * The driver is expected to initialize its private per-queue data for stations * and interfaces in the .add_interface and .sta_add ops. @@ -308,6 +309,8 @@ struct ieee80211_vif_chanctx_switch { * @BSS_CHANGED_KEEP_ALIVE: keep alive options (idle period or protected * keep alive) changed. * @BSS_CHANGED_MCAST_RATE: Multicast Rate setting changed for this interface + * @BSS_CHANGED_FTM_RESPONDER: fime timing reasurement request responder + * functionality changed for this BSS (AP mode). * */ enum ieee80211_bss_change { @@ -337,6 +340,7 @@ enum ieee80211_bss_change { BSS_CHANGED_MU_GROUPS = 1<<23, BSS_CHANGED_KEEP_ALIVE = 1<<24, BSS_CHANGED_MCAST_RATE = 1<<25, + BSS_CHANGED_FTM_RESPONDER = 1<<26, /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -463,6 +467,21 @@ struct ieee80211_mu_group_data { }; /** + * ieee80211_ftm_responder_params - FTM responder parameters + * + * @lci: LCI subelement content + * @civicloc: CIVIC location subelement content + * @lci_len: LCI data length + * @civicloc_len: Civic data length + */ +struct ieee80211_ftm_responder_params { + const u8 *lci; + const u8 *civicloc; + size_t lci_len; + size_t civicloc_len; +}; + +/** * struct ieee80211_bss_conf - holds the BSS's changing parameters * * This structure keeps information about a BSS (and an association @@ -561,6 +580,9 @@ struct ieee80211_mu_group_data { * @protected_keep_alive: if set, indicates that the station should send an RSN * protected frame to the AP to reset the idle timer at the AP for the * station. + * @ftm_responder: whether to enable or disable fine timing measurement FTM + * responder functionality. + * @ftmr_params: configurable lci/civic parameter when enabling FTM responder. */ struct ieee80211_bss_conf { const u8 *bssid; @@ -611,6 +633,8 @@ struct ieee80211_bss_conf { bool allow_p2p_go_ps; u16 max_idle_period; bool protected_keep_alive; + bool ftm_responder; + struct ieee80211_ftm_responder_params *ftmr_params; }; /** @@ -1140,6 +1164,11 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * from the RX info data, so leave those zeroed when building this data) * @RX_FLAG_RADIOTAP_HE_MU: HE MU radiotap data is present * (&struct ieee80211_radiotap_he_mu) + * @RX_FLAG_RADIOTAP_LSIG: L-SIG radiotap data is present + * @RX_FLAG_NO_PSDU: use the frame only for radiotap reporting, with + * the "0-length PSDU" field included there. The value for it is + * in &struct ieee80211_rx_status. Note that if this value isn't + * known the frame shouldn't be reported. */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), @@ -1170,6 +1199,8 @@ enum mac80211_rx_flags { RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(25), RX_FLAG_RADIOTAP_HE = BIT(26), RX_FLAG_RADIOTAP_HE_MU = BIT(27), + RX_FLAG_RADIOTAP_LSIG = BIT(28), + RX_FLAG_NO_PSDU = BIT(29), }; /** @@ -1242,6 +1273,7 @@ enum mac80211_rx_encoding { * @ampdu_reference: A-MPDU reference number, must be a different value for * each A-MPDU but the same for each subframe within one A-MPDU * @ampdu_delimiter_crc: A-MPDU delimiter CRC + * @zero_length_psdu_type: radiotap type of the 0-length PSDU */ struct ieee80211_rx_status { u64 mactime; @@ -1262,6 +1294,7 @@ struct ieee80211_rx_status { u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; u8 ampdu_delimiter_crc; + u8 zero_length_psdu_type; }; /** @@ -1504,6 +1537,8 @@ enum ieee80211_vif_flags { * @drv_priv: data area for driver use, will always be aligned to * sizeof(void \*). * @txq: the multicast data TX queue (if driver uses the TXQ abstraction) + * @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped, + * protected by fq->lock. */ struct ieee80211_vif { enum nl80211_iftype type; @@ -1528,6 +1563,8 @@ struct ieee80211_vif { unsigned int probe_req_reg; + bool txqs_stopped[IEEE80211_NUM_ACS]; + /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; @@ -1839,7 +1876,9 @@ struct ieee80211_sta_rates { * unlimited. * @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not. * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control. - * @txq: per-TID data TX queues (if driver uses the TXQ abstraction) + * @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID + * @txq: per-TID data TX queues (if driver uses the TXQ abstraction); note that + * the last entry (%IEEE80211_NUM_TIDS) is used for non-data frames */ struct ieee80211_sta { u32 supp_rates[NUM_NL80211_BANDS]; @@ -1879,8 +1918,9 @@ struct ieee80211_sta { u16 max_amsdu_len; bool support_p2p_ps; u16 max_rc_amsdu_len; + u16 max_tid_amsdu_len[IEEE80211_NUM_TIDS]; - struct ieee80211_txq *txq[IEEE80211_NUM_TIDS]; + struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1]; /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); @@ -1914,7 +1954,8 @@ struct ieee80211_tx_control { * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @sta: station table entry, %NULL for per-vif queue - * @tid: the TID for this queue (unused for per-vif queue) + * @tid: the TID for this queue (unused for per-vif queue), + * %IEEE80211_NUM_TIDS for non-data (if enabled) * @ac: the AC for this queue * @drv_priv: driver private area, sized by hw->txq_data_size * @@ -2127,6 +2168,19 @@ struct ieee80211_txq { * @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't * support QoS NDP for AP probing - that's most likely a driver bug. * + * @IEEE80211_HW_BUFF_MMPDU_TXQ: use the TXQ for bufferable MMPDUs, this of + * course requires the driver to use TXQs to start with. + * + * @IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW: (Hardware) rate control supports VHT + * extended NSS BW (dot11VHTExtendedNSSBWCapable). This flag will be set if + * the selected rate control algorithm sets %RATE_CTRL_CAPA_VHT_EXT_NSS_BW + * but if the rate control is built-in then it must be set by the driver. + * See also the documentation for that flag. + * + * @IEEE80211_HW_STA_MMPDU_TXQ: use the extra non-TID per-station TXQ for all + * MMPDUs on station interfaces. This of course requires the driver to use + * TXQs to start with. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -2172,6 +2226,9 @@ enum ieee80211_hw_flags { IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA, IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP, + IEEE80211_HW_BUFF_MMPDU_TXQ, + IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, + IEEE80211_HW_STA_MMPDU_TXQ, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS @@ -2290,6 +2347,10 @@ enum ieee80211_hw_flags { * supported by HW. * @max_nan_de_entries: maximum number of NAN DE functions supported by the * device. + * + * @tx_sk_pacing_shift: Pacing shift to set on TCP sockets when frames from + * them are encountered. The default should typically not be changed, + * unless the driver has good reasons for needing more buffers. */ struct ieee80211_hw { struct ieee80211_conf conf; @@ -2325,6 +2386,7 @@ struct ieee80211_hw { u8 n_cipher_schemes; const struct ieee80211_cipher_scheme *cipher_schemes; u8 max_nan_de_entries; + u8 tx_sk_pacing_shift; }; static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw, @@ -2506,6 +2568,19 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * The set_default_unicast_key() call updates the default WEP key index * configured to the hardware for WEP encryption type. This is required * for devices that support offload of data packets (e.g. ARP responses). + * + * Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag + * when they are able to replace in-use PTK keys according to to following + * requirements: + * 1) They do not hand over frames decrypted with the old key to + mac80211 once the call to set_key() with command %DISABLE_KEY has been + completed when also setting @IEEE80211_KEY_FLAG_GENERATE_IV for any key, + 2) either drop or continue to use the old key for any outgoing frames queued + at the time of the key deletion (including re-transmits), + 3) never send out a frame queued prior to the set_key() %SET_KEY command + encrypted with the new key and + 4) never send out a frame unencrypted when it should be encrypted. + Mac80211 will not queue any new frames for a deleted key to the driver. */ /** @@ -3542,6 +3617,12 @@ enum ieee80211_reconfig_type { * @del_nan_func: Remove a NAN function. The driver must call * ieee80211_nan_func_terminated() with * NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST reason code upon removal. + * @can_aggregate_in_amsdu: Called in order to determine if HW supports + * aggregating two specific frames in the same A-MSDU. The relation + * between the skbs should be symmetric and transitive. Note that while + * skb is always a real frame, head may or may not be an A-MSDU. + * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available. + * Statistics should be cumulative, currently no way to reset is provided. */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, @@ -3824,6 +3905,12 @@ struct ieee80211_ops { void (*del_nan_func)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 instance_id); + bool (*can_aggregate_in_amsdu)(struct ieee80211_hw *hw, + struct sk_buff *head, + struct sk_buff *skb); + int (*get_ftm_responder_stats)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_ftm_responder_stats *ftm_stats); }; /** @@ -4293,6 +4380,21 @@ void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta, u32 thr); /** + * ieee80211_tx_rate_update - transmit rate update callback + * + * Drivers should call this functions with a non-NULL pub sta + * This function can be used in drivers that does not have provision + * in updating the tx rate in data path. + * + * @hw: the hardware the frame was transmitted by + * @pubsta: the station to update the tx rate for. + * @info: tx status information + */ +void ieee80211_tx_rate_update(struct ieee80211_hw *hw, + struct ieee80211_sta *pubsta, + struct ieee80211_tx_info *info); + +/** * ieee80211_tx_status - transmit status callback * * Call this function for all transmitted frames after they have been @@ -5644,7 +5746,22 @@ struct ieee80211_tx_rate_control { bool bss; }; +/** + * enum rate_control_capabilities - rate control capabilities + */ +enum rate_control_capabilities { + /** + * @RATE_CTRL_CAPA_VHT_EXT_NSS_BW: + * Support for extended NSS BW support (dot11VHTExtendedNSSCapable) + * Note that this is only looked at if the minimum number of chains + * that the AP uses is < the number of TX chains the hardware has, + * otherwise the NSS difference doesn't bother us. + */ + RATE_CTRL_CAPA_VHT_EXT_NSS_BW = BIT(0), +}; + struct rate_control_ops { + unsigned long capa; const char *name; void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir); void (*free)(void *priv); diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 6c1eecd56a4d..f58b384aa6c9 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -323,6 +323,7 @@ void __neigh_set_probe_once(struct neighbour *neigh); bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl); void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); +int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev); int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb); int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb); @@ -544,4 +545,19 @@ static inline void neigh_update_ext_learned(struct neighbour *neigh, u32 flags, *notify = 1; } } + +static inline void neigh_update_is_router(struct neighbour *neigh, u32 flags, + int *notify) +{ + u8 ndm_flags = 0; + + ndm_flags |= (flags & NEIGH_UPDATE_F_ISROUTER) ? NTF_ROUTER : 0; + if ((neigh->flags ^ ndm_flags) & NTF_ROUTER) { + if (ndm_flags & NTF_ROUTER) + neigh->flags |= NTF_ROUTER; + else + neigh->flags &= ~NTF_ROUTER; + *notify = 1; + } +} #endif diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 9b5fdc50519a..99d4148e0f90 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -43,6 +43,7 @@ struct ctl_table_header; struct net_generic; struct uevent_sock; struct netns_ipvs; +struct bpf_prog; #define NETDEV_HASHBITS 8 @@ -145,6 +146,8 @@ struct net { #endif struct net_generic __rcu *gen; + struct bpf_prog __rcu *flow_dissector_prog; + /* Note : following structs are cache line aligned */ #ifdef CONFIG_XFRM struct netns_xfrm xfrm; diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h index c84b51682f08..135ee702c7b0 100644 --- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h +++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h @@ -10,20 +10,17 @@ #ifndef _NF_CONNTRACK_IPV4_H #define _NF_CONNTRACK_IPV4_H -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4; -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp; extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp; #ifdef CONFIG_NF_CT_PROTO_DCCP -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp; #endif #ifdef CONFIG_NF_CT_PROTO_SCTP -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp; #endif #ifdef CONFIG_NF_CT_PROTO_UDPLITE -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite; #endif -int nf_conntrack_ipv4_compat_init(void); -void nf_conntrack_ipv4_compat_fini(void); - #endif /*_NF_CONNTRACK_IPV4_H*/ diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h index effa8dfba68c..7b3c873f8839 100644 --- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h +++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h @@ -2,20 +2,7 @@ #ifndef _NF_CONNTRACK_IPV6_H #define _NF_CONNTRACK_IPV6_H -extern const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6; - -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; -#ifdef CONFIG_NF_CT_PROTO_DCCP -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6; -#endif -#ifdef CONFIG_NF_CT_PROTO_SCTP -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6; -#endif -#ifdef CONFIG_NF_CT_PROTO_UDPLITE -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6; -#endif #include <linux/sysctl.h> extern struct ctl_table nf_ct_ipv6_sysctl_table[]; diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 2a3e0974a6af..afc9b3620473 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -20,8 +20,7 @@ /* This header is used to share core functionality between the standalone connection tracking module, and the compatibility layer's use of connection tracking. */ -unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, - struct sk_buff *skb); +unsigned int nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state); int nf_conntrack_init_net(struct net *net); void nf_conntrack_cleanup_net(struct net *net); diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 8465263b297d..eed04af9b75e 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -18,9 +18,6 @@ struct seq_file; struct nf_conntrack_l4proto { - /* L3 Protocol number. */ - u_int16_t l3proto; - /* L4 Protocol number. */ u_int8_t l4proto; @@ -43,22 +40,14 @@ struct nf_conntrack_l4proto { /* Returns verdict for packet, or -1 for invalid. */ int (*packet)(struct nf_conn *ct, - const struct sk_buff *skb, + struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo); - - /* Called when a new connection for this protocol found; - * returns TRUE if it's OK. If so, packet() called next. */ - bool (*new)(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff); + enum ip_conntrack_info ctinfo, + const struct nf_hook_state *state); /* Called when a conntrack entry is destroyed */ void (*destroy)(struct nf_conn *ct); - int (*error)(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, - unsigned int dataoff, - u_int8_t pf, unsigned int hooknum); - /* called by gc worker if table is full */ bool (*can_early_drop)(const struct nf_conn *ct); @@ -92,7 +81,7 @@ struct nf_conntrack_l4proto { #endif unsigned int *net_id; /* Init l4proto pernet data */ - int (*init_net)(struct net *net, u_int16_t proto); + int (*init_net)(struct net *net); /* Return the per-net protocol part. */ struct nf_proto_net *(*get_net_proto)(struct net *net); @@ -101,16 +90,23 @@ struct nf_conntrack_l4proto { struct module *me; }; +int nf_conntrack_icmpv4_error(struct nf_conn *tmpl, + struct sk_buff *skb, + unsigned int dataoff, + const struct nf_hook_state *state); + +int nf_conntrack_icmpv6_error(struct nf_conn *tmpl, + struct sk_buff *skb, + unsigned int dataoff, + const struct nf_hook_state *state); /* Existing built-in generic protocol */ extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic; -#define MAX_NF_CT_PROTO 256 +#define MAX_NF_CT_PROTO IPPROTO_UDPLITE -const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u_int16_t l3proto, - u_int8_t l4proto); +const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u8 l4proto); -const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u_int16_t l3proto, - u_int8_t l4proto); +const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u8 l4proto); void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p); /* Protocol pernet registration. */ diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 0f39ac487012..841835a387e1 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -470,6 +470,9 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding); void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding); +void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set, + struct nft_set_binding *binding); +void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set); /** * enum nft_set_extensions - set extension type IDs @@ -724,7 +727,9 @@ struct nft_expr_type { * @eval: Expression evaluation function * @size: full expression size, including private data size * @init: initialization function - * @destroy: destruction function + * @activate: activate expression in the next generation + * @deactivate: deactivate expression in next generation + * @destroy: destruction function, called after synchronize_rcu * @dump: function to dump parameters * @type: expression type * @validate: validate expression, called during loop detection @@ -1293,12 +1298,14 @@ static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext) * * @list: used internally * @msg_type: message type + * @put_net: ctx->net needs to be put * @ctx: transaction context * @data: internal information related to the transaction */ struct nft_trans { struct list_head list; int msg_type; + bool put_net; struct nft_ctx ctx; char data[0]; }; diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index 8da837d2aaf9..2046d104f323 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h @@ -16,6 +16,10 @@ extern struct nft_expr_type nft_meta_type; extern struct nft_expr_type nft_rt_type; extern struct nft_expr_type nft_exthdr_type; +#ifdef CONFIG_NETWORK_SECMARK +extern struct nft_object_type nft_secmark_obj_type; +#endif + int nf_tables_core_module_init(void); void nf_tables_core_module_exit(void); diff --git a/include/net/netlink.h b/include/net/netlink.h index 39e1d875d507..4c1e99303b5a 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -172,7 +172,7 @@ enum { NLA_FLAG, NLA_MSECS, NLA_NESTED, - NLA_NESTED_COMPAT, + NLA_NESTED_ARRAY, NLA_NUL_STRING, NLA_BINARY, NLA_S8, @@ -180,14 +180,28 @@ enum { NLA_S32, NLA_S64, NLA_BITFIELD32, + NLA_REJECT, + NLA_EXACT_LEN, + NLA_EXACT_LEN_WARN, __NLA_TYPE_MAX, }; #define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1) +enum nla_policy_validation { + NLA_VALIDATE_NONE, + NLA_VALIDATE_RANGE, + NLA_VALIDATE_MIN, + NLA_VALIDATE_MAX, + NLA_VALIDATE_FUNCTION, +}; + /** * struct nla_policy - attribute validation policy * @type: Type of attribute or NLA_UNSPEC + * @validation_type: type of attribute validation done in addition to + * type-specific validation (e.g. range, function call), see + * &enum nla_policy_validation * @len: Type specific length of payload * * Policies are defined as arrays of this struct, the array must be @@ -198,9 +212,11 @@ enum { * NLA_NUL_STRING Maximum length of string (excluding NUL) * NLA_FLAG Unused * NLA_BINARY Maximum length of attribute payload - * NLA_NESTED Don't use `len' field -- length verification is - * done by checking len of nested header (or empty) - * NLA_NESTED_COMPAT Minimum length of structure payload + * NLA_NESTED, + * NLA_NESTED_ARRAY Length verification is done by checking len of + * nested header (or empty); len field is used if + * validation_data is also used, for the max attr + * number in the nested policy. * NLA_U8, NLA_U16, * NLA_U32, NLA_U64, * NLA_S8, NLA_S16, @@ -208,9 +224,59 @@ enum { * NLA_MSECS Leaving the length field zero will verify the * given type fits, using it verifies minimum length * just like "All other" - * NLA_BITFIELD32 A 32-bit bitmap/bitselector attribute + * NLA_BITFIELD32 Unused + * NLA_REJECT Unused + * NLA_EXACT_LEN Attribute must have exactly this length, otherwise + * it is rejected. + * NLA_EXACT_LEN_WARN Attribute should have exactly this length, a warning + * is logged if it is longer, shorter is rejected. * All other Minimum length of attribute payload * + * Meaning of `validation_data' field: + * NLA_BITFIELD32 This is a 32-bit bitmap/bitselector attribute and + * validation data must point to a u32 value of valid + * flags + * NLA_REJECT This attribute is always rejected and validation data + * may point to a string to report as the error instead + * of the generic one in extended ACK. + * NLA_NESTED Points to a nested policy to validate, must also set + * `len' to the max attribute number. + * Note that nla_parse() will validate, but of course not + * parse, the nested sub-policies. + * NLA_NESTED_ARRAY Points to a nested policy to validate, must also set + * `len' to the max attribute number. The difference to + * NLA_NESTED is the structure - NLA_NESTED has the + * nested attributes directly inside, while an array has + * the nested attributes at another level down and the + * attributes directly in the nesting don't matter. + * All other Unused - but note that it's a union + * + * Meaning of `min' and `max' fields, use via NLA_POLICY_MIN, NLA_POLICY_MAX + * and NLA_POLICY_RANGE: + * NLA_U8, + * NLA_U16, + * NLA_U32, + * NLA_U64, + * NLA_S8, + * NLA_S16, + * NLA_S32, + * NLA_S64 These are used depending on the validation_type + * field, if that is min/max/range then the minimum, + * maximum and both are used (respectively) to check + * the value of the integer attribute. + * Note that in the interest of code simplicity and + * struct size both limits are s16, so you cannot + * enforce a range that doesn't fall within the range + * of s16 - do that as usual in the code instead. + * All other Unused - but note that it's a union + * + * Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN: + * NLA_BINARY Validation function called for the attribute, + * not compatible with use of the validation_data + * as in NLA_BITFIELD32, NLA_REJECT, NLA_NESTED and + * NLA_NESTED_ARRAY. + * All other Unused - but note that it's a union + * * Example: * static const struct nla_policy my_policy[ATTR_MAX+1] = { * [ATTR_FOO] = { .type = NLA_U16 }, @@ -220,11 +286,69 @@ enum { * }; */ struct nla_policy { - u16 type; + u8 type; + u8 validation_type; u16 len; - void *validation_data; + union { + const void *validation_data; + struct { + s16 min, max; + }; + int (*validate)(const struct nlattr *attr, + struct netlink_ext_ack *extack); + }; }; +#define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_EXACT_LEN, .len = _len } +#define NLA_POLICY_EXACT_LEN_WARN(_len) { .type = NLA_EXACT_LEN_WARN, \ + .len = _len } + +#define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN) +#define NLA_POLICY_ETH_ADDR_COMPAT NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN) + +#define NLA_POLICY_NESTED(maxattr, policy) \ + { .type = NLA_NESTED, .validation_data = policy, .len = maxattr } +#define NLA_POLICY_NESTED_ARRAY(maxattr, policy) \ + { .type = NLA_NESTED_ARRAY, .validation_data = policy, .len = maxattr } + +#define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition)) +#define NLA_ENSURE_INT_TYPE(tp) \ + (__NLA_ENSURE(tp == NLA_S8 || tp == NLA_U8 || \ + tp == NLA_S16 || tp == NLA_U16 || \ + tp == NLA_S32 || tp == NLA_U32 || \ + tp == NLA_S64 || tp == NLA_U64) + tp) +#define NLA_ENSURE_NO_VALIDATION_PTR(tp) \ + (__NLA_ENSURE(tp != NLA_BITFIELD32 && \ + tp != NLA_REJECT && \ + tp != NLA_NESTED && \ + tp != NLA_NESTED_ARRAY) + tp) + +#define NLA_POLICY_RANGE(tp, _min, _max) { \ + .type = NLA_ENSURE_INT_TYPE(tp), \ + .validation_type = NLA_VALIDATE_RANGE, \ + .min = _min, \ + .max = _max \ +} + +#define NLA_POLICY_MIN(tp, _min) { \ + .type = NLA_ENSURE_INT_TYPE(tp), \ + .validation_type = NLA_VALIDATE_MIN, \ + .min = _min, \ +} + +#define NLA_POLICY_MAX(tp, _max) { \ + .type = NLA_ENSURE_INT_TYPE(tp), \ + .validation_type = NLA_VALIDATE_MAX, \ + .max = _max, \ +} + +#define NLA_POLICY_VALIDATE_FN(tp, fn, ...) { \ + .type = NLA_ENSURE_NO_VALIDATION_PTR(tp), \ + .validation_type = NLA_VALIDATE_FUNCTION, \ + .validate = fn, \ + .len = __VA_ARGS__ + 0, \ +} + /** * struct nl_info - netlink source information * @nlh: Netlink message header of original request @@ -249,6 +373,9 @@ int nla_validate(const struct nlattr *head, int len, int maxtype, int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, int len, const struct nla_policy *policy, struct netlink_ext_ack *extack); +int nla_parse_strict(struct nlattr **tb, int maxtype, const struct nlattr *head, + int len, const struct nla_policy *policy, + struct netlink_ext_ack *extack); int nla_policy_len(const struct nla_policy *, int); struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype); size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize); @@ -392,13 +519,29 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen, const struct nla_policy *policy, struct netlink_ext_ack *extack) { - if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) + if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) { + NL_SET_ERR_MSG(extack, "Invalid header length"); return -EINVAL; + } return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), policy, extack); } +static inline int nlmsg_parse_strict(const struct nlmsghdr *nlh, int hdrlen, + struct nlattr *tb[], int maxtype, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) { + NL_SET_ERR_MSG(extack, "Invalid header length"); + return -EINVAL; + } + + return nla_parse_strict(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), + nlmsg_attrlen(nlh, hdrlen), policy, extack); +} + /** * nlmsg_find_attr - find a specific attribute in a netlink message * @nlh: netlink message header diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index f0e396ab9bec..ef1ed529f33c 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -45,6 +45,7 @@ struct netns_sysctl_ipv6 { int max_dst_opts_len; int max_hbh_opts_len; int seg6_flowlabel; + bool skip_notify_on_dev_down; }; struct netns_ipv6 { diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 75a3f3fdb359..72ffb3120ced 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -65,11 +65,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block) return block->q; } -static inline struct net_device *tcf_block_dev(struct tcf_block *block) -{ - return tcf_block_q(block)->dev_queue->dev; -} - void *tcf_block_cb_priv(struct tcf_block_cb *block_cb); struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident); @@ -122,11 +117,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block) return NULL; } -static inline struct net_device *tcf_block_dev(struct tcf_block *block) -{ - return NULL; -} - static inline int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_priv) @@ -318,7 +308,7 @@ tcf_exts_stats_update(const struct tcf_exts *exts, for (i = 0; i < exts->nr_actions; i++) { struct tc_action *a = exts->actions[i]; - tcf_action_stats_update(a, bytes, packets, lastuse); + tcf_action_stats_update(a, bytes, packets, lastuse, true); } preempt_enable(); diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 7dc769e5452b..a16fbe9a2a67 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -102,6 +102,7 @@ int qdisc_set_default(const char *id); void qdisc_hash_add(struct Qdisc *q, bool invisible); void qdisc_hash_del(struct Qdisc *q); struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); +struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle); struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab, struct netlink_ext_ack *extack); diff --git a/include/net/route.h b/include/net/route.h index bb53cdba38dc..9883dc82f723 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -201,10 +201,9 @@ static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, } void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif, - u32 mark, u8 protocol, int flow_flags); + u8 protocol); void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu); -void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, - u8 protocol, int flow_flags); +void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u8 protocol); void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk); void ip_rt_send_redirect(struct sk_buff *skb); diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 0bbaa5488423..cf26e5aacac4 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -165,6 +165,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm); int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, struct netlink_ext_ack *exterr); +struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid); #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index a6d00093f35e..4d736427a4cb 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -19,6 +19,7 @@ struct Qdisc_ops; struct qdisc_walker; struct tcf_walker; struct module; +struct bpf_flow_keys; typedef int tc_setup_cb_t(enum tc_setup_type type, void *type_data, void *cb_priv); @@ -105,6 +106,7 @@ struct Qdisc { spinlock_t busylock ____cacheline_aligned_in_smp; spinlock_t seqlock; + struct rcu_head rcu; }; static inline void qdisc_refcount_inc(struct Qdisc *qdisc) @@ -114,6 +116,19 @@ static inline void qdisc_refcount_inc(struct Qdisc *qdisc) refcount_inc(&qdisc->refcnt); } +/* Intended to be used by unlocked users, when concurrent qdisc release is + * possible. + */ + +static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc) +{ + if (qdisc->flags & TCQ_F_BUILTIN) + return qdisc; + if (refcount_inc_not_zero(&qdisc->refcnt)) + return qdisc; + return NULL; +} + static inline bool qdisc_is_running(struct Qdisc *qdisc) { if (qdisc->flags & TCQ_F_NOLOCK) @@ -307,9 +322,14 @@ struct tcf_proto { }; struct qdisc_skb_cb { - unsigned int pkt_len; - u16 slave_dev_queue_mapping; - u16 tc_classid; + union { + struct { + unsigned int pkt_len; + u16 slave_dev_queue_mapping; + u16 tc_classid; + }; + struct bpf_flow_keys *flow_keys; + }; #define QDISC_CB_PRIV_LEN 20 unsigned char data[QDISC_CB_PRIV_LEN]; }; @@ -331,7 +351,7 @@ struct tcf_chain { struct tcf_block { struct list_head chain_list; u32 index; /* block index for shared blocks */ - unsigned int refcnt; + refcount_t refcnt; struct net *net; struct Qdisc *q; struct list_head cb_list; @@ -343,6 +363,7 @@ struct tcf_block { struct tcf_chain *chain; struct list_head filter_chain_list; } chain0; + struct rcu_head rcu; }; static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) @@ -362,7 +383,7 @@ static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) } static inline void -tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt, +tc_cls_offload_cnt_update(struct tcf_block *block, u32 *cnt, u32 *flags, bool add) { if (add) { @@ -554,7 +575,8 @@ void dev_deactivate_many(struct list_head *head); struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, struct Qdisc *qdisc); void qdisc_reset(struct Qdisc *qdisc); -void qdisc_destroy(struct Qdisc *qdisc); +void qdisc_put(struct Qdisc *qdisc); +void qdisc_put_unlocked(struct Qdisc *qdisc); void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, unsigned int len); struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, @@ -828,8 +850,8 @@ static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) qh->qlen = 0; } -static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, - struct qdisc_skb_head *qh) +static inline void __qdisc_enqueue_tail(struct sk_buff *skb, + struct qdisc_skb_head *qh) { struct sk_buff *last = qh->tail; @@ -842,14 +864,24 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, qh->head = skb; } qh->qlen++; - qdisc_qstats_backlog_inc(sch, skb); +} +static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) +{ + __qdisc_enqueue_tail(skb, &sch->q); + qdisc_qstats_backlog_inc(sch, skb); return NET_XMIT_SUCCESS; } -static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) +static inline void __qdisc_enqueue_head(struct sk_buff *skb, + struct qdisc_skb_head *qh) { - return __qdisc_enqueue_tail(skb, sch, &sch->q); + skb->next = qh->head; + + if (!qh->head) + qh->tail = skb; + qh->head = skb; + qh->qlen++; } static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) diff --git a/include/net/sock.h b/include/net/sock.h index 433f45fc2d68..751549ac0a84 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -800,6 +800,7 @@ enum sock_flags { SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */ SOCK_TXTIME, + SOCK_XDP, /* XDP is attached */ }; #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) @@ -1491,6 +1492,7 @@ static inline void lock_sock(struct sock *sk) lock_sock_nested(sk, 0); } +void __release_sock(struct sock *sk); void release_sock(struct sock *sk); /* BH context may only use the following locking interface. */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 770917d0caa7..0d2929223c70 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -732,7 +732,7 @@ void tcp_send_window_probe(struct sock *sk); static inline u64 tcp_clock_ns(void) { - return local_clock(); + return ktime_get_ns(); } static inline u64 tcp_clock_us(void) @@ -752,17 +752,7 @@ static inline u32 tcp_time_stamp_raw(void) return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ); } - -/* Refresh 1us clock of a TCP socket, - * ensuring monotically increasing values. - */ -static inline void tcp_mstamp_refresh(struct tcp_sock *tp) -{ - u64 val = tcp_clock_us(); - - if (val > tp->tcp_mstamp) - tp->tcp_mstamp = val; -} +void tcp_mstamp_refresh(struct tcp_sock *tp); static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0) { @@ -771,7 +761,13 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0) static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) { - return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ); + return div_u64(skb->skb_mstamp_ns, NSEC_PER_SEC / TCP_TS_HZ); +} + +/* provide the departure time in us unit */ +static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb) +{ + return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC); } @@ -817,7 +813,7 @@ struct tcp_skb_cb { #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ #define TCPCB_LOST 0x04 /* SKB is lost */ #define TCPCB_TAGBITS 0x07 /* All tag bits */ -#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp) */ +#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */ #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \ TCPCB_REPAIRED) @@ -1940,7 +1936,7 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk) { const struct sk_buff *skb = tcp_rtx_queue_head(sk); u32 rto = inet_csk(sk)->icsk_rto; - u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto); + u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto); return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; } diff --git a/include/net/tls.h b/include/net/tls.h index 0a769cf2f5f3..5e853835597e 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -41,7 +41,7 @@ #include <linux/tcp.h> #include <net/tcp.h> #include <net/strparser.h> - +#include <crypto/aead.h> #include <uapi/linux/tls.h> @@ -93,24 +93,47 @@ enum { TLS_NUM_CONFIG, }; -struct tls_sw_context_tx { - struct crypto_aead *aead_send; - struct crypto_wait async_wait; +/* TLS records are maintained in 'struct tls_rec'. It stores the memory pages + * allocated or mapped for each TLS record. After encryption, the records are + * stores in a linked list. + */ +struct tls_rec { + struct list_head list; + int tx_ready; + int tx_flags; + int inplace_crypto; - char aad_space[TLS_AAD_SPACE_SIZE]; + /* AAD | sg_plaintext_data | sg_tag */ + struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS + 1]; + /* AAD | sg_encrypted_data (data contain overhead for hdr&iv&tag) */ + struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS + 1]; unsigned int sg_plaintext_size; - int sg_plaintext_num_elem; - struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS]; - unsigned int sg_encrypted_size; + int sg_plaintext_num_elem; int sg_encrypted_num_elem; - struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS]; - /* AAD | sg_plaintext_data | sg_tag */ - struct scatterlist sg_aead_in[2]; - /* AAD | sg_encrypted_data (data contain overhead for hdr&iv&tag) */ - struct scatterlist sg_aead_out[2]; + char aad_space[TLS_AAD_SPACE_SIZE]; + struct aead_request aead_req; + u8 aead_req_ctx[]; +}; + +struct tx_work { + struct delayed_work work; + struct sock *sk; +}; + +struct tls_sw_context_tx { + struct crypto_aead *aead_send; + struct crypto_wait async_wait; + struct tx_work tx_work; + struct tls_rec *open_rec; + struct list_head tx_list; + atomic_t encrypt_pending; + int async_notify; + +#define BIT_TX_SCHEDULED 0 + unsigned long tx_bitmask; }; struct tls_sw_context_rx { @@ -124,6 +147,8 @@ struct tls_sw_context_rx { struct sk_buff *recv_pkt; u8 control; bool decrypted; + atomic_t decrypt_pending; + bool async_notify; }; struct tls_record_info { @@ -195,6 +220,7 @@ struct tls_context { struct scatterlist *partially_sent_record; u16 partially_sent_offset; + unsigned long flags; bool in_tcp_sendpages; @@ -259,6 +285,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page, void tls_device_sk_destruct(struct sock *sk); void tls_device_init(void); void tls_device_cleanup(void); +int tls_tx_records(struct sock *sk, int flags); struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, u32 seq, u64 *p_record_sn); @@ -277,6 +304,9 @@ void tls_sk_destruct(struct sock *sk, struct tls_context *ctx); int tls_push_sg(struct sock *sk, struct tls_context *ctx, struct scatterlist *sg, u16 first_offset, int flags); +int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, + int flags); + int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx, int flags, long *timeo); @@ -310,6 +340,17 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) return tls_ctx->pending_open_record_frags; } +static inline bool is_tx_ready(struct tls_sw_context_tx *ctx) +{ + struct tls_rec *rec; + + rec = list_first_entry(&ctx->tx_list, struct tls_rec, list); + if (!rec) + return false; + + return READ_ONCE(rec->tx_ready); +} + struct sk_buff * tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, struct sk_buff *skb); diff --git a/include/net/udp.h b/include/net/udp.h index 8482a990b0bb..9e82cb391dea 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -443,8 +443,10 @@ int udpv4_offload_init(void); void udp_init(void); +DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key); void udp_encap_enable(void); #if IS_ENABLED(CONFIG_IPV6) +DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key); void udpv6_encap_enable(void); #endif diff --git a/include/net/vxlan.h b/include/net/vxlan.h index b99a02ae3934..7ef15179f263 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -5,7 +5,6 @@ #include <linux/if_vlan.h> #include <net/udp_tunnel.h> #include <net/dst_metadata.h> -#include <net/udp_tunnel.h> /* VXLAN protocol (RFC 7348) header: * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ diff --git a/include/net/xdp.h b/include/net/xdp.h index 76b95256c266..0f25b3675c5c 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -91,6 +91,8 @@ static inline void xdp_scrub_frame(struct xdp_frame *frame) frame->dev_rx = NULL; } +struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp); + /* Convert xdp_buff to xdp_frame */ static inline struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) @@ -99,9 +101,8 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) int metasize; int headroom; - /* TODO: implement clone, copy, use "native" MEM_TYPE */ if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) - return NULL; + return xdp_convert_zc_to_xdp_frame(xdp); /* Assure headroom is available for storing info */ headroom = xdp->data - xdp->data_hard_start; @@ -135,6 +136,7 @@ void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq); bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq); int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, enum xdp_mem_type type, void *allocator); +void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq); /* Drivers not supporting XDP metadata can use this helper, which * rejects any room expansion for metadata as a result. diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 7161856bcf9c..13acb9803a6d 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -16,21 +16,23 @@ struct net_device; struct xsk_queue; -struct xdp_umem_props { - u64 chunk_mask; - u64 size; -}; - struct xdp_umem_page { void *addr; dma_addr_t dma; }; +struct xdp_umem_fq_reuse { + u32 nentries; + u32 length; + u64 handles[]; +}; + struct xdp_umem { struct xsk_queue *fq; struct xsk_queue *cq; struct xdp_umem_page *pages; - struct xdp_umem_props props; + u64 chunk_mask; + u64 size; u32 headroom; u32 chunk_size_nohr; struct user_struct *user; @@ -41,6 +43,7 @@ struct xdp_umem { struct page **pgs; u32 npgs; struct net_device *dev; + struct xdp_umem_fq_reuse *fq_reuse; u16 queue_id; bool zc; spinlock_t xsk_list_lock; @@ -79,6 +82,50 @@ void xsk_umem_discard_addr(struct xdp_umem *umem); void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len); void xsk_umem_consume_tx_done(struct xdp_umem *umem); +struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries); +struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, + struct xdp_umem_fq_reuse *newq); +void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq); +struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id); + +static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) +{ + return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1)); +} + +static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) +{ + return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); +} + +/* Reuse-queue aware version of FILL queue helpers */ +static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) +{ + struct xdp_umem_fq_reuse *rq = umem->fq_reuse; + + if (!rq->length) + return xsk_umem_peek_addr(umem, addr); + + *addr = rq->handles[rq->length - 1]; + return addr; +} + +static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) +{ + struct xdp_umem_fq_reuse *rq = umem->fq_reuse; + + if (!rq->length) + xsk_umem_discard_addr(umem); + else + rq->length--; +} + +static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) +{ + struct xdp_umem_fq_reuse *rq = umem->fq_reuse; + + rq->handles[rq->length++] = addr; +} #else static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) { @@ -98,6 +145,74 @@ static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) { return false; } + +static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) +{ + return NULL; +} + +static inline void xsk_umem_discard_addr(struct xdp_umem *umem) +{ +} + +static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) +{ +} + +static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, + u32 *len) +{ + return false; +} + +static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem) +{ +} + +static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) +{ + return NULL; +} + +static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap( + struct xdp_umem *umem, + struct xdp_umem_fq_reuse *newq) +{ + return NULL; +} +static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) +{ +} + +static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, + u16 queue_id) +{ + return NULL; +} + +static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) +{ + return NULL; +} + +static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) +{ + return 0; +} + +static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) +{ + return NULL; +} + +static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) +{ +} + +static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) +{ +} + #endif /* CONFIG_XDP_SOCKETS */ #endif /* _LINUX_XDP_SOCK_H */ diff --git a/drivers/net/ethernet/mscc/ocelot_hsio.h b/include/soc/mscc/ocelot_hsio.h index d93ddec3931b..43112dd7313a 100644 --- a/drivers/net/ethernet/mscc/ocelot_hsio.h +++ b/include/soc/mscc/ocelot_hsio.h @@ -8,6 +8,80 @@ #ifndef _MSCC_OCELOT_HSIO_H_ #define _MSCC_OCELOT_HSIO_H_ +#define HSIO_PLL5G_CFG0 0x0000 +#define HSIO_PLL5G_CFG1 0x0004 +#define HSIO_PLL5G_CFG2 0x0008 +#define HSIO_PLL5G_CFG3 0x000c +#define HSIO_PLL5G_CFG4 0x0010 +#define HSIO_PLL5G_CFG5 0x0014 +#define HSIO_PLL5G_CFG6 0x0018 +#define HSIO_PLL5G_STATUS0 0x001c +#define HSIO_PLL5G_STATUS1 0x0020 +#define HSIO_PLL5G_BIST_CFG0 0x0024 +#define HSIO_PLL5G_BIST_CFG1 0x0028 +#define HSIO_PLL5G_BIST_CFG2 0x002c +#define HSIO_PLL5G_BIST_STAT0 0x0030 +#define HSIO_PLL5G_BIST_STAT1 0x0034 +#define HSIO_RCOMP_CFG0 0x0038 +#define HSIO_RCOMP_STATUS 0x003c +#define HSIO_SYNC_ETH_CFG 0x0040 +#define HSIO_SYNC_ETH_PLL_CFG 0x0048 +#define HSIO_S1G_DES_CFG 0x004c +#define HSIO_S1G_IB_CFG 0x0050 +#define HSIO_S1G_OB_CFG 0x0054 +#define HSIO_S1G_SER_CFG 0x0058 +#define HSIO_S1G_COMMON_CFG 0x005c +#define HSIO_S1G_PLL_CFG 0x0060 +#define HSIO_S1G_PLL_STATUS 0x0064 +#define HSIO_S1G_DFT_CFG0 0x0068 +#define HSIO_S1G_DFT_CFG1 0x006c +#define HSIO_S1G_DFT_CFG2 0x0070 +#define HSIO_S1G_TP_CFG 0x0074 +#define HSIO_S1G_RC_PLL_BIST_CFG 0x0078 +#define HSIO_S1G_MISC_CFG 0x007c +#define HSIO_S1G_DFT_STATUS 0x0080 +#define HSIO_S1G_MISC_STATUS 0x0084 +#define HSIO_MCB_S1G_ADDR_CFG 0x0088 +#define HSIO_S6G_DIG_CFG 0x008c +#define HSIO_S6G_DFT_CFG0 0x0090 +#define HSIO_S6G_DFT_CFG1 0x0094 +#define HSIO_S6G_DFT_CFG2 0x0098 +#define HSIO_S6G_TP_CFG0 0x009c +#define HSIO_S6G_TP_CFG1 0x00a0 +#define HSIO_S6G_RC_PLL_BIST_CFG 0x00a4 +#define HSIO_S6G_MISC_CFG 0x00a8 +#define HSIO_S6G_OB_ANEG_CFG 0x00ac +#define HSIO_S6G_DFT_STATUS 0x00b0 +#define HSIO_S6G_ERR_CNT 0x00b4 +#define HSIO_S6G_MISC_STATUS 0x00b8 +#define HSIO_S6G_DES_CFG 0x00bc +#define HSIO_S6G_IB_CFG 0x00c0 +#define HSIO_S6G_IB_CFG1 0x00c4 +#define HSIO_S6G_IB_CFG2 0x00c8 +#define HSIO_S6G_IB_CFG3 0x00cc +#define HSIO_S6G_IB_CFG4 0x00d0 +#define HSIO_S6G_IB_CFG5 0x00d4 +#define HSIO_S6G_OB_CFG 0x00d8 +#define HSIO_S6G_OB_CFG1 0x00dc +#define HSIO_S6G_SER_CFG 0x00e0 +#define HSIO_S6G_COMMON_CFG 0x00e4 +#define HSIO_S6G_PLL_CFG 0x00e8 +#define HSIO_S6G_ACJTAG_CFG 0x00ec +#define HSIO_S6G_GP_CFG 0x00f0 +#define HSIO_S6G_IB_STATUS0 0x00f4 +#define HSIO_S6G_IB_STATUS1 0x00f8 +#define HSIO_S6G_ACJTAG_STATUS 0x00fc +#define HSIO_S6G_PLL_STATUS 0x0100 +#define HSIO_S6G_REVID 0x0104 +#define HSIO_MCB_S6G_ADDR_CFG 0x0108 +#define HSIO_HW_CFG 0x010c +#define HSIO_HW_QSGMII_CFG 0x0110 +#define HSIO_HW_QSGMII_STAT 0x0114 +#define HSIO_CLK_CFG 0x0118 +#define HSIO_TEMP_SENSOR_CTRL 0x011c +#define HSIO_TEMP_SENSOR_CFG 0x0120 +#define HSIO_TEMP_SENSOR_STAT 0x0124 + #define HSIO_PLL5G_CFG0_ENA_ROT BIT(31) #define HSIO_PLL5G_CFG0_ENA_LANE BIT(30) #define HSIO_PLL5G_CFG0_ENA_CLKTREE BIT(29) diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index ac55b328d61b..2bc9960a31aa 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -56,6 +56,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb, TP_STRUCT__entry( __field(const void *, skbaddr) __field(const void *, skaddr) + __field(int, state) __field(__u16, sport) __field(__u16, dport) __array(__u8, saddr, 4) @@ -70,6 +71,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb, __entry->skbaddr = skb; __entry->skaddr = sk; + __entry->state = sk->sk_state; __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); @@ -84,9 +86,10 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb, sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); ), - TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", + TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s\n", __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, - __entry->saddr_v6, __entry->daddr_v6) + __entry->saddr_v6, __entry->daddr_v6, + show_tcp_state_name(__entry->state)) ); DEFINE_EVENT(tcp_event_sk_skb, tcp_retransmit_skb, diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 66917a4eba27..f9187b41dff6 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -127,6 +127,7 @@ enum bpf_map_type { BPF_MAP_TYPE_SOCKHASH, BPF_MAP_TYPE_CGROUP_STORAGE, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, }; enum bpf_prog_type { @@ -152,6 +153,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_LWT_SEG6LOCAL, BPF_PROG_TYPE_LIRC_MODE2, BPF_PROG_TYPE_SK_REUSEPORT, + BPF_PROG_TYPE_FLOW_DISSECTOR, }; enum bpf_attach_type { @@ -172,6 +174,7 @@ enum bpf_attach_type { BPF_CGROUP_UDP4_SENDMSG, BPF_CGROUP_UDP6_SENDMSG, BPF_LIRC_MODE2, + BPF_FLOW_DISSECTOR, __MAX_BPF_ATTACH_TYPE }; @@ -2141,6 +2144,77 @@ union bpf_attr { * request in the skb. * Return * 0 on success, or a negative error in case of failure. + * + * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * Description + * Look for TCP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-NULL, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is zero, then the socket lookup table in the + * netns associated with the *ctx* will be used. For the TC hooks, + * this in the netns of the device in the skb. For socket hooks, + * this in the netns of the socket. If *netns* is non-zero, then + * it specifies the ID of the netns relative to the netns + * associated with the *ctx*. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * Return + * Pointer to *struct bpf_sock*, or NULL in case of failure. + * + * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * Description + * Look for UDP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-NULL, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is zero, then the socket lookup table in the + * netns associated with the *ctx* will be used. For the TC hooks, + * this in the netns of the device in the skb. For socket hooks, + * this in the netns of the socket. If *netns* is non-zero, then + * it specifies the ID of the netns relative to the netns + * associated with the *ctx*. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * Return + * Pointer to *struct bpf_sock*, or NULL in case of failure. + * + * int bpf_sk_release(struct bpf_sock *sk) + * Description + * Release the reference held by *sock*. *sock* must be a non-NULL + * pointer that was returned from bpf_sk_lookup_xxx\ (). + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2226,7 +2300,10 @@ union bpf_attr { FN(get_current_cgroup_id), \ FN(get_local_storage), \ FN(sk_select_reuseport), \ - FN(skb_ancestor_cgroup_id), + FN(skb_ancestor_cgroup_id), \ + FN(sk_lookup_tcp), \ + FN(sk_lookup_udp), \ + FN(sk_release), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -2333,6 +2410,7 @@ struct __sk_buff { /* ... here. */ __u32 data_meta; + struct bpf_flow_keys *flow_keys; }; struct bpf_tunnel_key { @@ -2395,6 +2473,23 @@ struct bpf_sock { */ }; +struct bpf_sock_tuple { + union { + struct { + __be32 saddr; + __be32 daddr; + __be16 sport; + __be16 dport; + } ipv4; + struct { + __be32 saddr[4]; + __be32 daddr[4]; + __be16 sport; + __be16 dport; + } ipv6; + }; +}; + #define XDP_PACKET_HEADROOM 256 /* User return codes for XDP prog type. @@ -2778,4 +2873,27 @@ enum bpf_task_fd_type { BPF_FD_TYPE_URETPROBE, /* filename + offset */ }; +struct bpf_flow_keys { + __u16 nhoff; + __u16 thoff; + __u16 addr_proto; /* ETH_P_* of valid addrs */ + __u8 is_frag; + __u8 is_first_frag; + __u8 is_encap; + __u8 ip_proto; + __be16 n_proto; + __be16 sport; + __be16 dport; + union { + struct { + __be32 ipv4_src; + __be32 ipv4_dst; + }; + struct { + __u32 ipv6_src[4]; /* in6_addr; network order */ + __u32 ipv6_dst[4]; /* in6_addr; network order */ + }; + }; +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/dns_resolver.h b/include/uapi/linux/dns_resolver.h new file mode 100644 index 000000000000..129745f9c794 --- /dev/null +++ b/include/uapi/linux/dns_resolver.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* DNS resolver interface definitions. + * + * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _UAPI_LINUX_DNS_RESOLVER_H +#define _UAPI_LINUX_DNS_RESOLVER_H + +#include <linux/types.h> + +/* + * Type of payload. + */ +enum dns_payload_content_type { + DNS_PAYLOAD_IS_SERVER_LIST = 0, /* List of servers, requested by srv=1 */ +}; + +/* + * Type of address that might be found in an address record. + */ +enum dns_payload_address_type { + DNS_ADDRESS_IS_IPV4 = 0, /* 4-byte AF_INET address */ + DNS_ADDRESS_IS_IPV6 = 1, /* 16-byte AF_INET6 address */ +}; + +/* + * Type of protocol used to access a server. + */ +enum dns_payload_protocol_type { + DNS_SERVER_PROTOCOL_UNSPECIFIED = 0, + DNS_SERVER_PROTOCOL_UDP = 1, /* Use UDP to talk to the server */ + DNS_SERVER_PROTOCOL_TCP = 2, /* Use TCP to talk to the server */ +}; + +/* + * Source of record included in DNS resolver payload. + */ +enum dns_record_source { + DNS_RECORD_UNAVAILABLE = 0, /* No source available (empty record) */ + DNS_RECORD_FROM_CONFIG = 1, /* From local configuration data */ + DNS_RECORD_FROM_DNS_A = 2, /* From DNS A or AAAA record */ + DNS_RECORD_FROM_DNS_AFSDB = 3, /* From DNS AFSDB record */ + DNS_RECORD_FROM_DNS_SRV = 4, /* From DNS SRV record */ + DNS_RECORD_FROM_NSS = 5, /* From NSS */ + NR__dns_record_source +}; + +/* + * Status of record included in DNS resolver payload. + */ +enum dns_lookup_status { + DNS_LOOKUP_NOT_DONE = 0, /* No lookup has been made */ + DNS_LOOKUP_GOOD = 1, /* Good records obtained */ + DNS_LOOKUP_GOOD_WITH_BAD = 2, /* Good records, some decoding errors */ + DNS_LOOKUP_BAD = 3, /* Couldn't decode results */ + DNS_LOOKUP_GOT_NOT_FOUND = 4, /* Got a "Not Found" result */ + DNS_LOOKUP_GOT_LOCAL_FAILURE = 5, /* Local failure during lookup */ + DNS_LOOKUP_GOT_TEMP_FAILURE = 6, /* Temporary failure during lookup */ + DNS_LOOKUP_GOT_NS_FAILURE = 7, /* Name server failure */ + NR__dns_lookup_status +}; + +/* + * Header at the beginning of binary format payload. + */ +struct dns_payload_header { + __u8 zero; /* Zero byte: marks this as not being text */ + __u8 content; /* enum dns_payload_content_type */ + __u8 version; /* Encoding version */ +} __packed; + +/* + * Header at the beginning of a V1 server list. This is followed directly by + * the server records. Each server records begins with a struct of type + * dns_server_list_v1_server. + */ +struct dns_server_list_v1_header { + struct dns_payload_header hdr; + __u8 source; /* enum dns_record_source */ + __u8 status; /* enum dns_lookup_status */ + __u8 nr_servers; /* Number of server records following this */ +} __packed; + +/* + * Header at the beginning of each V1 server record. This is followed by the + * characters of the name with no NUL-terminator, followed by the address + * records for that server. Each address record begins with a struct of type + * struct dns_server_list_v1_address. + */ +struct dns_server_list_v1_server { + __u16 name_len; /* Length of name (LE) */ + __u16 priority; /* Priority (as SRV record) (LE) */ + __u16 weight; /* Weight (as SRV record) (LE) */ + __u16 port; /* UDP/TCP port number (LE) */ + __u8 source; /* enum dns_record_source */ + __u8 status; /* enum dns_lookup_status */ + __u8 protocol; /* enum dns_payload_protocol_type */ + __u8 nr_addrs; +} __packed; + +/* + * Header at the beginning of each V1 address record. This is followed by the + * bytes of the address, 4 for IPV4 and 16 for IPV6. + */ +struct dns_server_list_v1_address { + __u8 address_type; /* enum dns_payload_address_type */ +} __packed; + +#endif /* _UAPI_LINUX_DNS_RESOLVER_H */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index dc69391d2bba..c8f8e2455bf3 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -91,10 +91,6 @@ * %ETHTOOL_GSET to get the current values before making specific * changes and then applying them with %ETHTOOL_SSET. * - * Drivers that implement set_settings() should validate all fields - * other than @cmd that are not described as read-only or deprecated, - * and must ignore all fields described as read-only. - * * Deprecated fields should be ignored by both users and drivers. */ struct ethtool_cmd { @@ -1800,14 +1796,9 @@ enum ethtool_reset_flags { * rejected. * * Deprecated %ethtool_cmd fields transceiver, maxtxpkt and maxrxpkt - * are not available in %ethtool_link_settings. Until all drivers are - * converted to ignore them or to the new %ethtool_link_settings API, - * for both queries and changes, users should always try - * %ETHTOOL_GLINKSETTINGS first, and if it fails with -ENOTSUPP stick - * only to %ETHTOOL_GSET and %ETHTOOL_SSET consistently. If it - * succeeds, then users should stick to %ETHTOOL_GLINKSETTINGS and - * %ETHTOOL_SLINKSETTINGS (which would support drivers implementing - * either %ethtool_cmd or %ethtool_link_settings). + * are not available in %ethtool_link_settings. These fields will be + * always set to zero in %ETHTOOL_GSET reply and %ETHTOOL_SSET will + * fail if any of them is set to non-zero value. * * Users should assume that all fields not marked read-only are * writable and subject to validation by the driver. They should use diff --git a/include/uapi/linux/gen_stats.h b/include/uapi/linux/gen_stats.h index 24a861c0d29d..065408e16a80 100644 --- a/include/uapi/linux/gen_stats.h +++ b/include/uapi/linux/gen_stats.h @@ -12,6 +12,7 @@ enum { TCA_STATS_APP, TCA_STATS_RATE_EST64, TCA_STATS_PAD, + TCA_STATS_BASIC_HW, __TCA_STATS_MAX, }; #define TCA_STATS_MAX (__TCA_STATS_MAX - 1) diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h index ebaf5701c9db..dfcf3ce0097f 100644 --- a/include/uapi/linux/if_addr.h +++ b/include/uapi/linux/if_addr.h @@ -34,6 +34,7 @@ enum { IFA_MULTICAST, IFA_FLAGS, IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */ + IFA_TARGET_NETNSID, __IFA_MAX, }; diff --git a/include/uapi/linux/if_arp.h b/include/uapi/linux/if_arp.h index 4605527ca41b..c3cc5a9e5eaf 100644 --- a/include/uapi/linux/if_arp.h +++ b/include/uapi/linux/if_arp.h @@ -114,18 +114,18 @@ /* ARP ioctl request. */ struct arpreq { - struct sockaddr arp_pa; /* protocol address */ - struct sockaddr arp_ha; /* hardware address */ - int arp_flags; /* flags */ - struct sockaddr arp_netmask; /* netmask (only for proxy arps) */ - char arp_dev[16]; + struct sockaddr arp_pa; /* protocol address */ + struct sockaddr arp_ha; /* hardware address */ + int arp_flags; /* flags */ + struct sockaddr arp_netmask; /* netmask (only for proxy arps) */ + char arp_dev[IFNAMSIZ]; }; struct arpreq_old { - struct sockaddr arp_pa; /* protocol address */ - struct sockaddr arp_ha; /* hardware address */ - int arp_flags; /* flags */ - struct sockaddr arp_netmask; /* netmask (only for proxy arps) */ + struct sockaddr arp_pa; /* protocol address */ + struct sockaddr arp_ha; /* hardware address */ + int arp_flags; /* flags */ + struct sockaddr arp_netmask; /* netmask (only for proxy arps) */ }; /* ARP Flag values. */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 43391e2d1153..1debfa42cba1 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -161,6 +161,7 @@ enum { IFLA_EVENT, IFLA_NEW_NETNSID, IFLA_IF_NETNSID, + IFLA_TARGET_NETNSID = IFLA_IF_NETNSID, /* new alias */ IFLA_CARRIER_UP_COUNT, IFLA_CARRIER_DOWN_COUNT, IFLA_NEW_IFINDEX, @@ -286,6 +287,7 @@ enum { IFLA_BR_MCAST_STATS_ENABLED, IFLA_BR_MCAST_IGMP_VERSION, IFLA_BR_MCAST_MLD_VERSION, + IFLA_BR_VLAN_STATS_PER_PORT, __IFLA_BR_MAX, }; @@ -554,6 +556,7 @@ enum { IFLA_GENEVE_UDP_ZERO_CSUM6_TX, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, IFLA_GENEVE_LABEL, + IFLA_GENEVE_TTL_INHERIT, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h index 67b61d91d89b..467b654bd4c7 100644 --- a/include/uapi/linux/if_packet.h +++ b/include/uapi/linux/if_packet.h @@ -57,6 +57,7 @@ struct sockaddr_ll { #define PACKET_QDISC_BYPASS 20 #define PACKET_ROLLOVER_STATS 21 #define PACKET_FANOUT_DATA 22 +#define PACKET_IGNORE_OUTGOING 23 #define PACKET_FANOUT_HASH 0 #define PACKET_FANOUT_LB 1 diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h index ed291e55f024..71d82fe15b03 100644 --- a/include/uapi/linux/in6.h +++ b/include/uapi/linux/in6.h @@ -177,6 +177,7 @@ struct in6_flowlabel_req { #define IPV6_V6ONLY 26 #define IPV6_JOIN_ANYCAST 27 #define IPV6_LEAVE_ANYCAST 28 +#define IPV6_MULTICAST_ALL 29 /* IPV6_MTU_DISCOVER values */ #define IPV6_PMTUDISC_DONT 0 diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 904db6148476..998155444e0d 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -43,6 +43,7 @@ enum { #define NTF_PROXY 0x08 /* == ATF_PUBL */ #define NTF_EXT_LEARNED 0x10 #define NTF_OFFLOADED 0x20 +#define NTF_STICKY 0x40 #define NTF_ROUTER 0x80 /* diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index e23290ffdc77..5444e76870bb 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -826,12 +826,14 @@ enum nft_meta_keys { * @NFT_RT_NEXTHOP4: routing nexthop for IPv4 * @NFT_RT_NEXTHOP6: routing nexthop for IPv6 * @NFT_RT_TCPMSS: fetch current path tcp mss + * @NFT_RT_XFRM: boolean, skb->dst->xfrm != NULL */ enum nft_rt_keys { NFT_RT_CLASSID, NFT_RT_NEXTHOP4, NFT_RT_NEXTHOP6, NFT_RT_TCPMSS, + NFT_RT_XFRM, __NFT_RT_MAX }; #define NFT_RT_MAX (__NFT_RT_MAX - 1) @@ -1175,6 +1177,21 @@ enum nft_quota_attributes { #define NFTA_QUOTA_MAX (__NFTA_QUOTA_MAX - 1) /** + * enum nft_secmark_attributes - nf_tables secmark object netlink attributes + * + * @NFTA_SECMARK_CTX: security context (NLA_STRING) + */ +enum nft_secmark_attributes { + NFTA_SECMARK_UNSPEC, + NFTA_SECMARK_CTX, + __NFTA_SECMARK_MAX, +}; +#define NFTA_SECMARK_MAX (__NFTA_SECMARK_MAX - 1) + +/* Max security context length */ +#define NFT_SECMARK_CTX_MAXLEN 256 + +/** * enum nft_reject_types - nf_tables reject expression reject types * * @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable @@ -1430,7 +1447,8 @@ enum nft_ct_timeout_timeout_attributes { #define NFT_OBJECT_CONNLIMIT 5 #define NFT_OBJECT_TUNNEL 6 #define NFT_OBJECT_CT_TIMEOUT 7 -#define __NFT_OBJECT_MAX 8 +#define NFT_OBJECT_SECMARK 8 +#define __NFT_OBJECT_MAX 9 #define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1) /** @@ -1512,6 +1530,35 @@ enum nft_devices_attributes { }; #define NFTA_DEVICE_MAX (__NFTA_DEVICE_MAX - 1) +/* + * enum nft_xfrm_attributes - nf_tables xfrm expr netlink attributes + * + * @NFTA_XFRM_DREG: destination register (NLA_U32) + * @NFTA_XFRM_KEY: enum nft_xfrm_keys (NLA_U32) + * @NFTA_XFRM_DIR: direction (NLA_U8) + * @NFTA_XFRM_SPNUM: index in secpath array (NLA_U32) + */ +enum nft_xfrm_attributes { + NFTA_XFRM_UNSPEC, + NFTA_XFRM_DREG, + NFTA_XFRM_KEY, + NFTA_XFRM_DIR, + NFTA_XFRM_SPNUM, + __NFTA_XFRM_MAX +}; +#define NFTA_XFRM_MAX (__NFTA_XFRM_MAX - 1) + +enum nft_xfrm_keys { + NFT_XFRM_KEY_UNSPEC, + NFT_XFRM_KEY_DADDR_IP4, + NFT_XFRM_KEY_DADDR_IP6, + NFT_XFRM_KEY_SADDR_IP4, + NFT_XFRM_KEY_SADDR_IP6, + NFT_XFRM_KEY_REQID, + NFT_XFRM_KEY_SPI, + __NFT_XFRM_KEY_MAX, +}; +#define NFT_XFRM_KEY_MAX (__NFT_XFRM_KEY_MAX - 1) /** * enum nft_trace_attributes - nf_tables trace netlink attributes diff --git a/include/uapi/linux/netfilter/xt_cgroup.h b/include/uapi/linux/netfilter/xt_cgroup.h index e96dfa1b34f7..b74e370d6133 100644 --- a/include/uapi/linux/netfilter/xt_cgroup.h +++ b/include/uapi/linux/netfilter/xt_cgroup.h @@ -22,4 +22,20 @@ struct xt_cgroup_info_v1 { void *priv __attribute__((aligned(8))); }; +#define XT_CGROUP_PATH_MAX 512 + +struct xt_cgroup_info_v2 { + __u8 has_path; + __u8 has_classid; + __u8 invert_path; + __u8 invert_classid; + union { + char path[XT_CGROUP_PATH_MAX]; + __u32 classid; + }; + + /* kernel internal data */ + void *priv __attribute__((aligned(8))); +}; + #endif /* _UAPI_XT_CGROUP_H */ diff --git a/include/uapi/linux/netfilter/xt_quota.h b/include/uapi/linux/netfilter/xt_quota.h index f3ba5d9e58b6..d72fd52adbba 100644 --- a/include/uapi/linux/netfilter/xt_quota.h +++ b/include/uapi/linux/netfilter/xt_quota.h @@ -15,9 +15,11 @@ struct xt_quota_info { __u32 flags; __u32 pad; __aligned_u64 quota; - - /* Used internally by the kernel */ - struct xt_quota_priv *master; +#ifdef __KERNEL__ + atomic64_t counter; +#else + __aligned_u64 remain; +#endif }; #endif /* _XT_QUOTA_H */ diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index 776bc92e9118..486ed1f0c0bc 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -155,6 +155,7 @@ enum nlmsgerr_attrs { #define NETLINK_LIST_MEMBERSHIPS 9 #define NETLINK_CAP_ACK 10 #define NETLINK_EXT_ACK 11 +#define NETLINK_DUMP_STRICT_CHK 12 struct nl_pktinfo { __u32 group; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 7acc16f34942..6d610bae30a9 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1033,6 +1033,9 @@ * %NL80211_ATTR_CHANNEL_WIDTH,%NL80211_ATTR_NSS attributes with its * address(specified in %NL80211_ATTR_MAC). * + * @NL80211_CMD_GET_FTM_RESPONDER_STATS: Retrieve FTM responder statistics, in + * the %NL80211_ATTR_FTM_RESPONDER_STATS attribute. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1245,6 +1248,8 @@ enum nl80211_commands { NL80211_CMD_CONTROL_PORT_FRAME, + NL80211_CMD_GET_FTM_RESPONDER_STATS, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -2241,6 +2246,14 @@ enum nl80211_commands { * association request when used with NL80211_CMD_NEW_STATION). Can be set * only if %NL80211_STA_FLAG_WME is set. * + * @NL80211_ATTR_FTM_RESPONDER: nested attribute which user-space can include + * in %NL80211_CMD_START_AP or %NL80211_CMD_SET_BEACON for fine timing + * measurement (FTM) responder functionality and containing parameters as + * possible, see &enum nl80211_ftm_responder_attr + * + * @NL80211_ATTR_FTM_RESPONDER_STATS: Nested attribute with FTM responder + * statistics, see &enum nl80211_ftm_responder_stats. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2682,6 +2695,10 @@ enum nl80211_attrs { NL80211_ATTR_HE_CAPABILITY, + NL80211_ATTR_FTM_RESPONDER, + + NL80211_ATTR_FTM_RESPONDER_STATS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3050,8 +3067,13 @@ enum nl80211_sta_bss_param { * received from the station (u64, usec) * @NL80211_STA_INFO_PAD: attribute used for padding for 64-bit alignment * @NL80211_STA_INFO_ACK_SIGNAL: signal strength of the last ACK frame(u8, dBm) - * @NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG: avg signal strength of (data) - * ACK frame (s8, dBm) + * @NL80211_STA_INFO_ACK_SIGNAL_AVG: avg signal strength of ACK frames (s8, dBm) + * @NL80211_STA_INFO_RX_MPDUS: total number of received packets (MPDUs) + * (u32, from this station) + * @NL80211_STA_INFO_FCS_ERROR_COUNT: total number of packets (MPDUs) received + * with an FCS error (u32, from this station). This count may not include + * some packets with an FCS error due to TA corruption. Hence this counter + * might not be fully accurate. * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -3091,13 +3113,19 @@ enum nl80211_sta_info { NL80211_STA_INFO_RX_DURATION, NL80211_STA_INFO_PAD, NL80211_STA_INFO_ACK_SIGNAL, - NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG, + NL80211_STA_INFO_ACK_SIGNAL_AVG, + NL80211_STA_INFO_RX_MPDUS, + NL80211_STA_INFO_FCS_ERROR_COUNT, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, NL80211_STA_INFO_MAX = __NL80211_STA_INFO_AFTER_LAST - 1 }; +/* we renamed this - stay compatible */ +#define NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG NL80211_STA_INFO_ACK_SIGNAL_AVG + + /** * enum nl80211_tid_stats - per TID statistics attributes * @__NL80211_TID_STATS_INVALID: attribute number 0 is reserved @@ -4338,7 +4366,7 @@ enum nl80211_txrate_gi { * enum nl80211_band - Frequency band * @NL80211_BAND_2GHZ: 2.4 GHz ISM band * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz) - * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz) + * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 69.12 GHz) * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace * since newer kernel versions may support more bands */ @@ -5213,9 +5241,8 @@ enum nl80211_feature_flags { * "radar detected" event. * @NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211: Driver supports sending and * receiving control port frames over nl80211 instead of the netdevice. - * @NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT: This Driver support data ack - * rssi if firmware support, this flag is to intimate about ack rssi - * support to nl80211. + * @NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT: This driver/device supports + * (average) ACK signal strength reporting. * @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate * TXQs. * @NL80211_EXT_FEATURE_SCAN_RANDOM_SN: Driver/device supports randomizing the @@ -5223,6 +5250,13 @@ enum nl80211_feature_flags { * @NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT: Driver/device can omit all data * except for supported rates from the probe request content if requested * by the %NL80211_SCAN_FLAG_MIN_PREQ_CONTENT flag. + * @NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER: Driver supports enabling fine + * timing measurement responder role. + * + * @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0: Driver/device confirm that they are + * able to rekey an in-use key correctly. Userspace must not rekey PTK keys + * if this flag is not set. Ignoring this can leak clear text packets and/or + * freeze the connection. * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. @@ -5255,10 +5289,14 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN, NL80211_EXT_FEATURE_DFS_OFFLOAD, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211, - NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT, + NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT, + /* we renamed this - stay compatible */ + NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT, NL80211_EXT_FEATURE_TXQS, NL80211_EXT_FEATURE_SCAN_RANDOM_SN, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT, + NL80211_EXT_FEATURE_CAN_REPLACE_PTK0, + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, @@ -5798,4 +5836,74 @@ enum nl80211_external_auth_action { NL80211_EXTERNAL_AUTH_ABORT, }; +/** + * enum nl80211_ftm_responder_attributes - fine timing measurement + * responder attributes + * @__NL80211_FTM_RESP_ATTR_INVALID: Invalid + * @NL80211_FTM_RESP_ATTR_ENABLED: FTM responder is enabled + * @NL80211_FTM_RESP_ATTR_LCI: The content of Measurement Report Element + * (9.4.2.22 in 802.11-2016) with type 8 - LCI (9.4.2.22.10) + * @NL80211_FTM_RESP_ATTR_CIVIC: The content of Measurement Report Element + * (9.4.2.22 in 802.11-2016) with type 11 - Civic (Section 9.4.2.22.13) + * @__NL80211_FTM_RESP_ATTR_LAST: Internal + * @NL80211_FTM_RESP_ATTR_MAX: highest FTM responder attribute. + */ +enum nl80211_ftm_responder_attributes { + __NL80211_FTM_RESP_ATTR_INVALID, + + NL80211_FTM_RESP_ATTR_ENABLED, + NL80211_FTM_RESP_ATTR_LCI, + NL80211_FTM_RESP_ATTR_CIVICLOC, + + /* keep last */ + __NL80211_FTM_RESP_ATTR_LAST, + NL80211_FTM_RESP_ATTR_MAX = __NL80211_FTM_RESP_ATTR_LAST - 1, +}; + +/* + * enum nl80211_ftm_responder_stats - FTM responder statistics + * + * These attribute types are used with %NL80211_ATTR_FTM_RESPONDER_STATS + * when getting FTM responder statistics. + * + * @__NL80211_FTM_STATS_INVALID: attribute number 0 is reserved + * @NL80211_FTM_STATS_SUCCESS_NUM: number of FTM sessions in which all frames + * were ssfully answered (u32) + * @NL80211_FTM_STATS_PARTIAL_NUM: number of FTM sessions in which part of the + * frames were successfully answered (u32) + * @NL80211_FTM_STATS_FAILED_NUM: number of failed FTM sessions (u32) + * @NL80211_FTM_STATS_ASAP_NUM: number of ASAP sessions (u32) + * @NL80211_FTM_STATS_NON_ASAP_NUM: number of non-ASAP sessions (u32) + * @NL80211_FTM_STATS_TOTAL_DURATION_MSEC: total sessions durations - gives an + * indication of how much time the responder was busy (u64, msec) + * @NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM: number of unknown FTM triggers - + * triggers from initiators that didn't finish successfully the negotiation + * phase with the responder (u32) + * @NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM: number of FTM reschedule requests + * - initiator asks for a new scheduling although it already has scheduled + * FTM slot (u32) + * @NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM: number of FTM triggers out of + * scheduled window (u32) + * @NL80211_FTM_STATS_PAD: used for padding, ignore + * @__NL80211_TXQ_ATTR_AFTER_LAST: Internal + * @NL80211_FTM_STATS_MAX: highest possible FTM responder stats attribute + */ +enum nl80211_ftm_responder_stats { + __NL80211_FTM_STATS_INVALID, + NL80211_FTM_STATS_SUCCESS_NUM, + NL80211_FTM_STATS_PARTIAL_NUM, + NL80211_FTM_STATS_FAILED_NUM, + NL80211_FTM_STATS_ASAP_NUM, + NL80211_FTM_STATS_NON_ASAP_NUM, + NL80211_FTM_STATS_TOTAL_DURATION_MSEC, + NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM, + NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM, + NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM, + NL80211_FTM_STATS_PAD, + + /* keep last */ + __NL80211_FTM_STATS_AFTER_LAST, + NL80211_FTM_STATS_MAX = __NL80211_FTM_STATS_AFTER_LAST - 1 +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index be382fb0592d..401d0c1e612d 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -483,6 +483,8 @@ enum { TCA_FLOWER_KEY_ENC_OPTS, TCA_FLOWER_KEY_ENC_OPTS_MASK, + TCA_FLOWER_IN_HW_COUNT, + __TCA_FLOWER_MAX, }; diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 8975fd1a1421..89ee47c2f17d 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -395,9 +395,9 @@ enum { struct tc_htb_xstats { __u32 lends; __u32 borrows; - __u32 giants; /* too big packets (rate will not be accurate) */ - __u32 tokens; - __u32 ctokens; + __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */ + __s32 tokens; + __s32 ctokens; }; /* HFSC section */ @@ -1084,4 +1084,50 @@ enum { CAKE_ATM_MAX }; + +/* TAPRIO */ +enum { + TC_TAPRIO_CMD_SET_GATES = 0x00, + TC_TAPRIO_CMD_SET_AND_HOLD = 0x01, + TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02, +}; + +enum { + TCA_TAPRIO_SCHED_ENTRY_UNSPEC, + TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */ + TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */ + TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */ + TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */ + __TCA_TAPRIO_SCHED_ENTRY_MAX, +}; +#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1) + +/* The format for schedule entry list is: + * [TCA_TAPRIO_SCHED_ENTRY_LIST] + * [TCA_TAPRIO_SCHED_ENTRY] + * [TCA_TAPRIO_SCHED_ENTRY_CMD] + * [TCA_TAPRIO_SCHED_ENTRY_GATES] + * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] + */ +enum { + TCA_TAPRIO_SCHED_UNSPEC, + TCA_TAPRIO_SCHED_ENTRY, + __TCA_TAPRIO_SCHED_MAX, +}; + +#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1) + +enum { + TCA_TAPRIO_ATTR_UNSPEC, + TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */ + TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */ + TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */ + TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */ + TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */ + TCA_TAPRIO_PAD, + __TCA_TAPRIO_ATTR_MAX, +}; + +#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1) + #endif diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 0c17aab3ce5f..dded84cbe814 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -358,6 +358,29 @@ static void array_map_seq_show_elem(struct bpf_map *map, void *key, rcu_read_unlock(); } +static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, + struct seq_file *m) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + u32 index = *(u32 *)key; + void __percpu *pptr; + int cpu; + + rcu_read_lock(); + + seq_printf(m, "%u: {\n", *(u32 *)key); + pptr = array->pptrs[index & array->index_mask]; + for_each_possible_cpu(cpu) { + seq_printf(m, "\tcpu%d: ", cpu); + btf_type_seq_show(map->btf, map->btf_value_type_id, + per_cpu_ptr(pptr, cpu), m); + seq_puts(m, "\n"); + } + seq_puts(m, "}\n"); + + rcu_read_unlock(); +} + static int array_map_check_btf(const struct bpf_map *map, const struct btf_type *key_type, const struct btf_type *value_type) @@ -398,6 +421,7 @@ const struct bpf_map_ops percpu_array_map_ops = { .map_lookup_elem = percpu_array_map_lookup_elem, .map_update_elem = array_map_update_elem, .map_delete_elem = array_map_delete_elem, + .map_seq_show_elem = percpu_array_map_seq_show_elem, .map_check_btf = array_map_check_btf, }; @@ -529,6 +553,29 @@ static void bpf_fd_array_map_clear(struct bpf_map *map) fd_array_map_delete_elem(map, &i); } +static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, + struct seq_file *m) +{ + void **elem, *ptr; + u32 prog_id; + + rcu_read_lock(); + + elem = array_map_lookup_elem(map, key); + if (elem) { + ptr = READ_ONCE(*elem); + if (ptr) { + seq_printf(m, "%u: ", *(u32 *)key); + prog_id = prog_fd_array_sys_lookup_elem(ptr); + btf_type_seq_show(map->btf, map->btf_value_type_id, + &prog_id, m); + seq_puts(m, "\n"); + } + } + + rcu_read_unlock(); +} + const struct bpf_map_ops prog_array_map_ops = { .map_alloc_check = fd_array_map_alloc_check, .map_alloc = array_map_alloc, @@ -540,7 +587,7 @@ const struct bpf_map_ops prog_array_map_ops = { .map_fd_put_ptr = prog_fd_array_put_ptr, .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, .map_release_uref = bpf_fd_array_map_clear, - .map_check_btf = map_check_no_btf, + .map_seq_show_elem = prog_array_map_seq_show_elem, }; static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 6a7d931bbc55..00f6ed2e4f9a 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -25,6 +25,7 @@ EXPORT_SYMBOL(cgroup_bpf_enabled_key); */ void cgroup_bpf_put(struct cgroup *cgrp) { + enum bpf_cgroup_storage_type stype; unsigned int type; for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) { @@ -34,8 +35,10 @@ void cgroup_bpf_put(struct cgroup *cgrp) list_for_each_entry_safe(pl, tmp, progs, node) { list_del(&pl->node); bpf_prog_put(pl->prog); - bpf_cgroup_storage_unlink(pl->storage); - bpf_cgroup_storage_free(pl->storage); + for_each_cgroup_storage_type(stype) { + bpf_cgroup_storage_unlink(pl->storage[stype]); + bpf_cgroup_storage_free(pl->storage[stype]); + } kfree(pl); static_branch_dec(&cgroup_bpf_enabled_key); } @@ -97,6 +100,7 @@ static int compute_effective_progs(struct cgroup *cgrp, enum bpf_attach_type type, struct bpf_prog_array __rcu **array) { + enum bpf_cgroup_storage_type stype; struct bpf_prog_array *progs; struct bpf_prog_list *pl; struct cgroup *p = cgrp; @@ -125,7 +129,9 @@ static int compute_effective_progs(struct cgroup *cgrp, continue; progs->items[cnt].prog = pl->prog; - progs->items[cnt].cgroup_storage = pl->storage; + for_each_cgroup_storage_type(stype) + progs->items[cnt].cgroup_storage[stype] = + pl->storage[stype]; cnt++; } } while ((p = cgroup_parent(p))); @@ -232,7 +238,9 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, { struct list_head *progs = &cgrp->bpf.progs[type]; struct bpf_prog *old_prog = NULL; - struct bpf_cgroup_storage *storage, *old_storage = NULL; + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE], + *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL}; + enum bpf_cgroup_storage_type stype; struct bpf_prog_list *pl; bool pl_was_allocated; int err; @@ -254,34 +262,44 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) return -E2BIG; - storage = bpf_cgroup_storage_alloc(prog); - if (IS_ERR(storage)) - return -ENOMEM; + for_each_cgroup_storage_type(stype) { + storage[stype] = bpf_cgroup_storage_alloc(prog, stype); + if (IS_ERR(storage[stype])) { + storage[stype] = NULL; + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_free(storage[stype]); + return -ENOMEM; + } + } if (flags & BPF_F_ALLOW_MULTI) { list_for_each_entry(pl, progs, node) { if (pl->prog == prog) { /* disallow attaching the same prog twice */ - bpf_cgroup_storage_free(storage); + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_free(storage[stype]); return -EINVAL; } } pl = kmalloc(sizeof(*pl), GFP_KERNEL); if (!pl) { - bpf_cgroup_storage_free(storage); + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_free(storage[stype]); return -ENOMEM; } pl_was_allocated = true; pl->prog = prog; - pl->storage = storage; + for_each_cgroup_storage_type(stype) + pl->storage[stype] = storage[stype]; list_add_tail(&pl->node, progs); } else { if (list_empty(progs)) { pl = kmalloc(sizeof(*pl), GFP_KERNEL); if (!pl) { - bpf_cgroup_storage_free(storage); + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_free(storage[stype]); return -ENOMEM; } pl_was_allocated = true; @@ -289,12 +307,15 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, } else { pl = list_first_entry(progs, typeof(*pl), node); old_prog = pl->prog; - old_storage = pl->storage; - bpf_cgroup_storage_unlink(old_storage); + for_each_cgroup_storage_type(stype) { + old_storage[stype] = pl->storage[stype]; + bpf_cgroup_storage_unlink(old_storage[stype]); + } pl_was_allocated = false; } pl->prog = prog; - pl->storage = storage; + for_each_cgroup_storage_type(stype) + pl->storage[stype] = storage[stype]; } cgrp->bpf.flags[type] = flags; @@ -304,21 +325,27 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, goto cleanup; static_branch_inc(&cgroup_bpf_enabled_key); - if (old_storage) - bpf_cgroup_storage_free(old_storage); + for_each_cgroup_storage_type(stype) { + if (!old_storage[stype]) + continue; + bpf_cgroup_storage_free(old_storage[stype]); + } if (old_prog) { bpf_prog_put(old_prog); static_branch_dec(&cgroup_bpf_enabled_key); } - bpf_cgroup_storage_link(storage, cgrp, type); + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_link(storage[stype], cgrp, type); return 0; cleanup: /* and cleanup the prog list */ pl->prog = old_prog; - bpf_cgroup_storage_free(pl->storage); - pl->storage = old_storage; - bpf_cgroup_storage_link(old_storage, cgrp, type); + for_each_cgroup_storage_type(stype) { + bpf_cgroup_storage_free(pl->storage[stype]); + pl->storage[stype] = old_storage[stype]; + bpf_cgroup_storage_link(old_storage[stype], cgrp, type); + } if (pl_was_allocated) { list_del(&pl->node); kfree(pl); @@ -339,6 +366,7 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, enum bpf_attach_type type, u32 unused_flags) { struct list_head *progs = &cgrp->bpf.progs[type]; + enum bpf_cgroup_storage_type stype; u32 flags = cgrp->bpf.flags[type]; struct bpf_prog *old_prog = NULL; struct bpf_prog_list *pl; @@ -385,8 +413,10 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, /* now can actually delete it from this cgroup list */ list_del(&pl->node); - bpf_cgroup_storage_unlink(pl->storage); - bpf_cgroup_storage_free(pl->storage); + for_each_cgroup_storage_type(stype) { + bpf_cgroup_storage_unlink(pl->storage[stype]); + bpf_cgroup_storage_free(pl->storage[stype]); + } kfree(pl); if (list_empty(progs)) /* last program was detached, reset flags to zero */ @@ -677,6 +707,8 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_current_uid_gid_proto; case BPF_FUNC_get_local_storage: return &bpf_get_local_storage_proto; + case BPF_FUNC_get_current_cgroup_id: + return &bpf_get_current_cgroup_id_proto; case BPF_FUNC_trace_printk: if (capable(CAP_SYS_ADMIN)) return bpf_get_trace_printk_proto(); diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 03cc59ee9c95..2c1790288138 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1285,6 +1285,35 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, return ret; } +static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key, + struct seq_file *m) +{ + struct htab_elem *l; + void __percpu *pptr; + int cpu; + + rcu_read_lock(); + + l = __htab_map_lookup_elem(map, key); + if (!l) { + rcu_read_unlock(); + return; + } + + btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); + seq_puts(m, ": {\n"); + pptr = htab_elem_get_ptr(l, map->key_size); + for_each_possible_cpu(cpu) { + seq_printf(m, "\tcpu%d: ", cpu); + btf_type_seq_show(map->btf, map->btf_value_type_id, + per_cpu_ptr(pptr, cpu), m); + seq_puts(m, "\n"); + } + seq_puts(m, "}\n"); + + rcu_read_unlock(); +} + const struct bpf_map_ops htab_percpu_map_ops = { .map_alloc_check = htab_map_alloc_check, .map_alloc = htab_map_alloc, @@ -1293,6 +1322,7 @@ const struct bpf_map_ops htab_percpu_map_ops = { .map_lookup_elem = htab_percpu_map_lookup_elem, .map_update_elem = htab_percpu_map_update_elem, .map_delete_elem = htab_map_delete_elem, + .map_seq_show_elem = htab_percpu_map_seq_show_elem, }; const struct bpf_map_ops htab_lru_percpu_map_ops = { @@ -1303,6 +1333,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = { .map_lookup_elem = htab_lru_percpu_map_lookup_elem, .map_update_elem = htab_lru_percpu_map_update_elem, .map_delete_elem = htab_lru_map_delete_elem, + .map_seq_show_elem = htab_percpu_map_seq_show_elem, }; static int fd_htab_map_alloc_check(union bpf_attr *attr) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 1991466b8327..6502115e8f55 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -194,16 +194,28 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { .ret_type = RET_INTEGER, }; -DECLARE_PER_CPU(void*, bpf_cgroup_storage); +#ifdef CONFIG_CGROUP_BPF +DECLARE_PER_CPU(struct bpf_cgroup_storage*, + bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) { - /* map and flags arguments are not used now, - * but provide an ability to extend the API - * for other types of local storages. - * verifier checks that their values are correct. + /* flags argument is not used now, + * but provides an ability to extend the API. + * verifier checks that its value is correct. */ - return (unsigned long) this_cpu_read(bpf_cgroup_storage); + enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); + struct bpf_cgroup_storage *storage; + void *ptr; + + storage = this_cpu_read(bpf_cgroup_storage[stype]); + + if (stype == BPF_CGROUP_STORAGE_SHARED) + ptr = &READ_ONCE(storage->buf)->data[0]; + else + ptr = this_cpu_ptr(storage->percpu_buf); + + return (unsigned long)ptr; } const struct bpf_func_proto bpf_get_local_storage_proto = { @@ -214,3 +226,4 @@ const struct bpf_func_proto bpf_get_local_storage_proto = { .arg2_type = ARG_ANYTHING, }; #endif +#endif diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index 830d7f095748..c97a8f968638 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -7,7 +7,8 @@ #include <linux/rbtree.h> #include <linux/slab.h> -DEFINE_PER_CPU(void*, bpf_cgroup_storage); +DEFINE_PER_CPU(struct bpf_cgroup_storage*, + bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); #ifdef CONFIG_CGROUP_BPF @@ -151,6 +152,71 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key, return 0; } +int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *_key, + void *value) +{ + struct bpf_cgroup_storage_map *map = map_to_storage(_map); + struct bpf_cgroup_storage_key *key = _key; + struct bpf_cgroup_storage *storage; + int cpu, off = 0; + u32 size; + + rcu_read_lock(); + storage = cgroup_storage_lookup(map, key, false); + if (!storage) { + rcu_read_unlock(); + return -ENOENT; + } + + /* per_cpu areas are zero-filled and bpf programs can only + * access 'value_size' of them, so copying rounded areas + * will not leak any kernel data + */ + size = round_up(_map->value_size, 8); + for_each_possible_cpu(cpu) { + bpf_long_memcpy(value + off, + per_cpu_ptr(storage->percpu_buf, cpu), size); + off += size; + } + rcu_read_unlock(); + return 0; +} + +int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *_key, + void *value, u64 map_flags) +{ + struct bpf_cgroup_storage_map *map = map_to_storage(_map); + struct bpf_cgroup_storage_key *key = _key; + struct bpf_cgroup_storage *storage; + int cpu, off = 0; + u32 size; + + if (map_flags != BPF_ANY && map_flags != BPF_EXIST) + return -EINVAL; + + rcu_read_lock(); + storage = cgroup_storage_lookup(map, key, false); + if (!storage) { + rcu_read_unlock(); + return -ENOENT; + } + + /* the user space will provide round_up(value_size, 8) bytes that + * will be copied into per-cpu area. bpf programs can only access + * value_size of it. During lookup the same extra bytes will be + * returned or zeros which were zero-filled by percpu_alloc, + * so no kernel data leaks possible + */ + size = round_up(_map->value_size, 8); + for_each_possible_cpu(cpu) { + bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu), + value + off, size); + off += size; + } + rcu_read_unlock(); + return 0; +} + static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key, void *_next_key) { @@ -254,6 +320,7 @@ const struct bpf_map_ops cgroup_storage_map_ops = { int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) { + enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); struct bpf_cgroup_storage_map *map = map_to_storage(_map); int ret = -EBUSY; @@ -261,11 +328,12 @@ int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) if (map->prog && map->prog != prog) goto unlock; - if (prog->aux->cgroup_storage && prog->aux->cgroup_storage != _map) + if (prog->aux->cgroup_storage[stype] && + prog->aux->cgroup_storage[stype] != _map) goto unlock; map->prog = prog; - prog->aux->cgroup_storage = _map; + prog->aux->cgroup_storage[stype] = _map; ret = 0; unlock: spin_unlock_bh(&map->lock); @@ -275,70 +343,117 @@ unlock: void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map) { + enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); struct bpf_cgroup_storage_map *map = map_to_storage(_map); spin_lock_bh(&map->lock); if (map->prog == prog) { - WARN_ON(prog->aux->cgroup_storage != _map); + WARN_ON(prog->aux->cgroup_storage[stype] != _map); map->prog = NULL; - prog->aux->cgroup_storage = NULL; + prog->aux->cgroup_storage[stype] = NULL; } spin_unlock_bh(&map->lock); } -struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog) +static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages) +{ + size_t size; + + if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) { + size = sizeof(struct bpf_storage_buffer) + map->value_size; + *pages = round_up(sizeof(struct bpf_cgroup_storage) + size, + PAGE_SIZE) >> PAGE_SHIFT; + } else { + size = map->value_size; + *pages = round_up(round_up(size, 8) * num_possible_cpus(), + PAGE_SIZE) >> PAGE_SHIFT; + } + + return size; +} + +struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, + enum bpf_cgroup_storage_type stype) { struct bpf_cgroup_storage *storage; struct bpf_map *map; + gfp_t flags; + size_t size; u32 pages; - map = prog->aux->cgroup_storage; + map = prog->aux->cgroup_storage[stype]; if (!map) return NULL; - pages = round_up(sizeof(struct bpf_cgroup_storage) + - sizeof(struct bpf_storage_buffer) + - map->value_size, PAGE_SIZE) >> PAGE_SHIFT; + size = bpf_cgroup_storage_calculate_size(map, &pages); + if (bpf_map_charge_memlock(map, pages)) return ERR_PTR(-EPERM); storage = kmalloc_node(sizeof(struct bpf_cgroup_storage), __GFP_ZERO | GFP_USER, map->numa_node); - if (!storage) { - bpf_map_uncharge_memlock(map, pages); - return ERR_PTR(-ENOMEM); - } + if (!storage) + goto enomem; - storage->buf = kmalloc_node(sizeof(struct bpf_storage_buffer) + - map->value_size, __GFP_ZERO | GFP_USER, - map->numa_node); - if (!storage->buf) { - bpf_map_uncharge_memlock(map, pages); - kfree(storage); - return ERR_PTR(-ENOMEM); + flags = __GFP_ZERO | GFP_USER; + + if (stype == BPF_CGROUP_STORAGE_SHARED) { + storage->buf = kmalloc_node(size, flags, map->numa_node); + if (!storage->buf) + goto enomem; + } else { + storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags); + if (!storage->percpu_buf) + goto enomem; } storage->map = (struct bpf_cgroup_storage_map *)map; return storage; + +enomem: + bpf_map_uncharge_memlock(map, pages); + kfree(storage); + return ERR_PTR(-ENOMEM); +} + +static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu) +{ + struct bpf_cgroup_storage *storage = + container_of(rcu, struct bpf_cgroup_storage, rcu); + + kfree(storage->buf); + kfree(storage); +} + +static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu) +{ + struct bpf_cgroup_storage *storage = + container_of(rcu, struct bpf_cgroup_storage, rcu); + + free_percpu(storage->percpu_buf); + kfree(storage); } void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage) { - u32 pages; + enum bpf_cgroup_storage_type stype; struct bpf_map *map; + u32 pages; if (!storage) return; map = &storage->map->map; - pages = round_up(sizeof(struct bpf_cgroup_storage) + - sizeof(struct bpf_storage_buffer) + - map->value_size, PAGE_SIZE) >> PAGE_SHIFT; + + bpf_cgroup_storage_calculate_size(map, &pages); bpf_map_uncharge_memlock(map, pages); - kfree_rcu(storage->buf, rcu); - kfree_rcu(storage, rcu); + stype = cgroup_storage_type(map); + if (stype == BPF_CGROUP_STORAGE_SHARED) + call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu); + else + call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu); } void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c index 3bfbf4464416..99d243e1ad6e 100644 --- a/kernel/bpf/map_in_map.c +++ b/kernel/bpf/map_in_map.c @@ -24,7 +24,8 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) * in the verifier is not enough. */ if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY || - inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE) { + inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || + inner_map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { fdput(f); return ERR_PTR(-ENOTSUPP); } diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 177a52436394..8e93c47f0779 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -172,6 +172,24 @@ int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, return ret; } +int bpf_prog_offload_finalize(struct bpf_verifier_env *env) +{ + struct bpf_prog_offload *offload; + int ret = -ENODEV; + + down_read(&bpf_devs_lock); + offload = env->prog->aux->offload; + if (offload) { + if (offload->dev_ops->finalize) + ret = offload->dev_ops->finalize(env); + else + ret = 0; + } + up_read(&bpf_devs_lock); + + return ret; +} + static void __bpf_prog_offload_destroy(struct bpf_prog *prog) { struct bpf_prog_offload *offload = prog->aux->offload; diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 0a0f2ec75370..d37a1a0a6e1e 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -612,8 +612,7 @@ static int free_sg(struct sock *sk, int start, if (i == MAX_SKB_FRAGS) i = 0; } - if (md->skb) - consume_skb(md->skb); + consume_skb(md->skb); return free; } @@ -995,8 +994,7 @@ bytes_ready: if (!sg->length && md->sg_start == md->sg_end) { list_del(&md->list); - if (md->skb) - consume_skb(md->skb); + consume_skb(md->skb); kfree(md); } } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 8339d81cba1d..5742df21598c 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -30,7 +30,6 @@ #include <linux/cred.h> #include <linux/timekeeping.h> #include <linux/ctype.h> -#include <linux/btf.h> #include <linux/nospec.h> #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ @@ -687,7 +686,8 @@ static int map_lookup_elem(union bpf_attr *attr) if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || - map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) + map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || + map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) value_size = round_up(map->value_size, 8) * num_possible_cpus(); else if (IS_FD_MAP(map)) value_size = sizeof(u32); @@ -706,6 +706,8 @@ static int map_lookup_elem(union bpf_attr *attr) err = bpf_percpu_hash_copy(map, key, value); } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { err = bpf_percpu_array_copy(map, key, value); + } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { + err = bpf_percpu_cgroup_storage_copy(map, key, value); } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { err = bpf_stackmap_copy(map, key, value); } else if (IS_FD_ARRAY(map)) { @@ -775,7 +777,8 @@ static int map_update_elem(union bpf_attr *attr) if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || - map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) + map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || + map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) value_size = round_up(map->value_size, 8) * num_possible_cpus(); else value_size = map->value_size; @@ -810,6 +813,9 @@ static int map_update_elem(union bpf_attr *attr) err = bpf_percpu_hash_update(map, key, value, attr->flags); } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { err = bpf_percpu_array_update(map, key, value, attr->flags); + } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { + err = bpf_percpu_cgroup_storage_update(map, key, value, + attr->flags); } else if (IS_FD_ARRAY(map)) { rcu_read_lock(); err = bpf_fd_array_map_update_elem(map, f.file, key, value, @@ -989,10 +995,15 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) /* drop refcnt on maps used by eBPF program and free auxilary data */ static void free_used_maps(struct bpf_prog_aux *aux) { + enum bpf_cgroup_storage_type stype; int i; - if (aux->cgroup_storage) - bpf_cgroup_storage_release(aux->prog, aux->cgroup_storage); + for_each_cgroup_storage_type(stype) { + if (!aux->cgroup_storage[stype]) + continue; + bpf_cgroup_storage_release(aux->prog, + aux->cgroup_storage[stype]); + } for (i = 0; i < aux->used_map_cnt; i++) bpf_map_put(aux->used_maps[i]); @@ -1616,6 +1627,9 @@ static int bpf_prog_attach(const union bpf_attr *attr) case BPF_LIRC_MODE2: ptype = BPF_PROG_TYPE_LIRC_MODE2; break; + case BPF_FLOW_DISSECTOR: + ptype = BPF_PROG_TYPE_FLOW_DISSECTOR; + break; default: return -EINVAL; } @@ -1637,6 +1651,9 @@ static int bpf_prog_attach(const union bpf_attr *attr) case BPF_PROG_TYPE_LIRC_MODE2: ret = lirc_prog_attach(attr, prog); break; + case BPF_PROG_TYPE_FLOW_DISSECTOR: + ret = skb_flow_dissector_bpf_prog_attach(attr, prog); + break; default: ret = cgroup_bpf_prog_attach(attr, ptype, prog); } @@ -1689,6 +1706,8 @@ static int bpf_prog_detach(const union bpf_attr *attr) return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL); case BPF_LIRC_MODE2: return lirc_prog_detach(attr); + case BPF_FLOW_DISSECTOR: + return skb_flow_dissector_bpf_prog_detach(attr); default: return -EINVAL; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 465952a8e465..3f93a548a642 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1,5 +1,6 @@ /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2016 Facebook + * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -80,8 +81,8 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { * (like pointer plus pointer becomes SCALAR_VALUE type) * * When verifier sees load or store instructions the type of base register - * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer - * types recognized by check_mem_access() function. + * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are + * four pointer types recognized by check_mem_access() function. * * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' * and the range of [ptr, ptr + map's value_size) is accessible. @@ -140,6 +141,24 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { * * After the call R0 is set to return type of the function and registers R1-R5 * are set to NOT_INIT to indicate that they are no longer readable. + * + * The following reference types represent a potential reference to a kernel + * resource which, after first being allocated, must be checked and freed by + * the BPF program: + * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET + * + * When the verifier sees a helper call return a reference type, it allocates a + * pointer id for the reference and stores it in the current function state. + * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into + * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type + * passes through a NULL-check conditional. For the branch wherein the state is + * changed to CONST_IMM, the verifier releases the reference. + * + * For each helper function that allocates a reference, such as + * bpf_sk_lookup_tcp(), there is a corresponding release function, such as + * bpf_sk_release(). When a reference type passes into the release function, + * the verifier also releases the reference. If any unchecked or unreleased + * reference remains at the end of the program, the verifier rejects it. */ /* verifier_state + insn_idx are pushed to stack when branch is encountered */ @@ -189,6 +208,7 @@ struct bpf_call_arg_meta { int access_size; s64 msize_smax_value; u64 msize_umax_value; + int ptr_id; }; static DEFINE_MUTEX(bpf_verifier_lock); @@ -249,6 +269,46 @@ static bool type_is_pkt_pointer(enum bpf_reg_type type) type == PTR_TO_PACKET_META; } +static bool reg_type_may_be_null(enum bpf_reg_type type) +{ + return type == PTR_TO_MAP_VALUE_OR_NULL || + type == PTR_TO_SOCKET_OR_NULL; +} + +static bool type_is_refcounted(enum bpf_reg_type type) +{ + return type == PTR_TO_SOCKET; +} + +static bool type_is_refcounted_or_null(enum bpf_reg_type type) +{ + return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL; +} + +static bool reg_is_refcounted(const struct bpf_reg_state *reg) +{ + return type_is_refcounted(reg->type); +} + +static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg) +{ + return type_is_refcounted_or_null(reg->type); +} + +static bool arg_type_is_refcounted(enum bpf_arg_type type) +{ + return type == ARG_PTR_TO_SOCKET; +} + +/* Determine whether the function releases some resources allocated by another + * function call. The first reference type argument will be assumed to be + * released by release_reference(). + */ +static bool is_release_function(enum bpf_func_id func_id) +{ + return func_id == BPF_FUNC_sk_release; +} + /* string representation of 'enum bpf_reg_type' */ static const char * const reg_type_str[] = { [NOT_INIT] = "?", @@ -261,6 +321,16 @@ static const char * const reg_type_str[] = { [PTR_TO_PACKET] = "pkt", [PTR_TO_PACKET_META] = "pkt_meta", [PTR_TO_PACKET_END] = "pkt_end", + [PTR_TO_FLOW_KEYS] = "flow_keys", + [PTR_TO_SOCKET] = "sock", + [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", +}; + +static char slot_type_char[] = { + [STACK_INVALID] = '?', + [STACK_SPILL] = 'r', + [STACK_MISC] = 'm', + [STACK_ZERO] = '0', }; static void print_liveness(struct bpf_verifier_env *env, @@ -349,72 +419,179 @@ static void print_verifier_state(struct bpf_verifier_env *env, } } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { - if (state->stack[i].slot_type[0] == STACK_SPILL) { - verbose(env, " fp%d", - (-i - 1) * BPF_REG_SIZE); - print_liveness(env, state->stack[i].spilled_ptr.live); + char types_buf[BPF_REG_SIZE + 1]; + bool valid = false; + int j; + + for (j = 0; j < BPF_REG_SIZE; j++) { + if (state->stack[i].slot_type[j] != STACK_INVALID) + valid = true; + types_buf[j] = slot_type_char[ + state->stack[i].slot_type[j]]; + } + types_buf[BPF_REG_SIZE] = 0; + if (!valid) + continue; + verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); + print_liveness(env, state->stack[i].spilled_ptr.live); + if (state->stack[i].slot_type[0] == STACK_SPILL) verbose(env, "=%s", reg_type_str[state->stack[i].spilled_ptr.type]); - } - if (state->stack[i].slot_type[0] == STACK_ZERO) - verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE); + else + verbose(env, "=%s", types_buf); + } + if (state->acquired_refs && state->refs[0].id) { + verbose(env, " refs=%d", state->refs[0].id); + for (i = 1; i < state->acquired_refs; i++) + if (state->refs[i].id) + verbose(env, ",%d", state->refs[i].id); } verbose(env, "\n"); } -static int copy_stack_state(struct bpf_func_state *dst, - const struct bpf_func_state *src) -{ - if (!src->stack) - return 0; - if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) { - /* internal bug, make state invalid to reject the program */ - memset(dst, 0, sizeof(*dst)); - return -EFAULT; - } - memcpy(dst->stack, src->stack, - sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE)); - return 0; -} +#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \ +static int copy_##NAME##_state(struct bpf_func_state *dst, \ + const struct bpf_func_state *src) \ +{ \ + if (!src->FIELD) \ + return 0; \ + if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \ + /* internal bug, make state invalid to reject the program */ \ + memset(dst, 0, sizeof(*dst)); \ + return -EFAULT; \ + } \ + memcpy(dst->FIELD, src->FIELD, \ + sizeof(*src->FIELD) * (src->COUNT / SIZE)); \ + return 0; \ +} +/* copy_reference_state() */ +COPY_STATE_FN(reference, acquired_refs, refs, 1) +/* copy_stack_state() */ +COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) +#undef COPY_STATE_FN + +#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \ +static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \ + bool copy_old) \ +{ \ + u32 old_size = state->COUNT; \ + struct bpf_##NAME##_state *new_##FIELD; \ + int slot = size / SIZE; \ + \ + if (size <= old_size || !size) { \ + if (copy_old) \ + return 0; \ + state->COUNT = slot * SIZE; \ + if (!size && old_size) { \ + kfree(state->FIELD); \ + state->FIELD = NULL; \ + } \ + return 0; \ + } \ + new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \ + GFP_KERNEL); \ + if (!new_##FIELD) \ + return -ENOMEM; \ + if (copy_old) { \ + if (state->FIELD) \ + memcpy(new_##FIELD, state->FIELD, \ + sizeof(*new_##FIELD) * (old_size / SIZE)); \ + memset(new_##FIELD + old_size / SIZE, 0, \ + sizeof(*new_##FIELD) * (size - old_size) / SIZE); \ + } \ + state->COUNT = slot * SIZE; \ + kfree(state->FIELD); \ + state->FIELD = new_##FIELD; \ + return 0; \ +} +/* realloc_reference_state() */ +REALLOC_STATE_FN(reference, acquired_refs, refs, 1) +/* realloc_stack_state() */ +REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) +#undef REALLOC_STATE_FN /* do_check() starts with zero-sized stack in struct bpf_verifier_state to * make it consume minimal amount of memory. check_stack_write() access from * the program calls into realloc_func_state() to grow the stack size. * Note there is a non-zero 'parent' pointer inside bpf_verifier_state - * which this function copies over. It points to previous bpf_verifier_state - * which is never reallocated + * which realloc_stack_state() copies over. It points to previous + * bpf_verifier_state which is never reallocated. + */ +static int realloc_func_state(struct bpf_func_state *state, int stack_size, + int refs_size, bool copy_old) +{ + int err = realloc_reference_state(state, refs_size, copy_old); + if (err) + return err; + return realloc_stack_state(state, stack_size, copy_old); +} + +/* Acquire a pointer id from the env and update the state->refs to include + * this new pointer reference. + * On success, returns a valid pointer id to associate with the register + * On failure, returns a negative errno. */ -static int realloc_func_state(struct bpf_func_state *state, int size, - bool copy_old) +static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) { - u32 old_size = state->allocated_stack; - struct bpf_stack_state *new_stack; - int slot = size / BPF_REG_SIZE; + struct bpf_func_state *state = cur_func(env); + int new_ofs = state->acquired_refs; + int id, err; + + err = realloc_reference_state(state, state->acquired_refs + 1, true); + if (err) + return err; + id = ++env->id_gen; + state->refs[new_ofs].id = id; + state->refs[new_ofs].insn_idx = insn_idx; + + return id; +} + +/* release function corresponding to acquire_reference_state(). Idempotent. */ +static int __release_reference_state(struct bpf_func_state *state, int ptr_id) +{ + int i, last_idx; + + if (!ptr_id) + return -EFAULT; - if (size <= old_size || !size) { - if (copy_old) + last_idx = state->acquired_refs - 1; + for (i = 0; i < state->acquired_refs; i++) { + if (state->refs[i].id == ptr_id) { + if (last_idx && i != last_idx) + memcpy(&state->refs[i], &state->refs[last_idx], + sizeof(*state->refs)); + memset(&state->refs[last_idx], 0, sizeof(*state->refs)); + state->acquired_refs--; return 0; - state->allocated_stack = slot * BPF_REG_SIZE; - if (!size && old_size) { - kfree(state->stack); - state->stack = NULL; } - return 0; } - new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state), - GFP_KERNEL); - if (!new_stack) - return -ENOMEM; - if (copy_old) { - if (state->stack) - memcpy(new_stack, state->stack, - sizeof(*new_stack) * (old_size / BPF_REG_SIZE)); - memset(new_stack + old_size / BPF_REG_SIZE, 0, - sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE); - } - state->allocated_stack = slot * BPF_REG_SIZE; - kfree(state->stack); - state->stack = new_stack; + return -EFAULT; +} + +/* variation on the above for cases where we expect that there must be an + * outstanding reference for the specified ptr_id. + */ +static int release_reference_state(struct bpf_verifier_env *env, int ptr_id) +{ + struct bpf_func_state *state = cur_func(env); + int err; + + err = __release_reference_state(state, ptr_id); + if (WARN_ON_ONCE(err != 0)) + verbose(env, "verifier internal error: can't release reference\n"); + return err; +} + +static int transfer_reference_state(struct bpf_func_state *dst, + struct bpf_func_state *src) +{ + int err = realloc_reference_state(dst, src->acquired_refs, false); + if (err) + return err; + err = copy_reference_state(dst, src); + if (err) + return err; return 0; } @@ -422,6 +599,7 @@ static void free_func_state(struct bpf_func_state *state) { if (!state) return; + kfree(state->refs); kfree(state->stack); kfree(state); } @@ -447,10 +625,14 @@ static int copy_func_state(struct bpf_func_state *dst, { int err; - err = realloc_func_state(dst, src->allocated_stack, false); + err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs, + false); + if (err) + return err; + memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); + err = copy_reference_state(dst, src); if (err) return err; - memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack)); return copy_stack_state(dst, src); } @@ -466,7 +648,6 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, dst_state->frame[i] = NULL; } dst_state->curframe = src->curframe; - dst_state->parent = src->parent; for (i = 0; i <= src->curframe; i++) { dst = dst_state->frame[i]; if (!dst) { @@ -553,7 +734,9 @@ static void __mark_reg_not_init(struct bpf_reg_state *reg); */ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) { - reg->id = 0; + /* Clear id, off, and union(map_ptr, range) */ + memset(((u8 *)reg) + sizeof(reg->type), 0, + offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); reg->var_off = tnum_const(imm); reg->smin_value = (s64)imm; reg->smax_value = (s64)imm; @@ -572,7 +755,6 @@ static void __mark_reg_known_zero(struct bpf_reg_state *reg) static void __mark_reg_const_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); - reg->off = 0; reg->type = SCALAR_VALUE; } @@ -683,9 +865,12 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg) /* Mark a register as having a completely unknown (scalar) value. */ static void __mark_reg_unknown(struct bpf_reg_state *reg) { + /* + * Clear type, id, off, and union(map_ptr, range) and + * padding between 'type' and union + */ + memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); reg->type = SCALAR_VALUE; - reg->id = 0; - reg->off = 0; reg->var_off = tnum_unknown; reg->frameno = 0; __mark_reg_unbounded(reg); @@ -732,6 +917,7 @@ static void init_reg_state(struct bpf_verifier_env *env, for (i = 0; i < MAX_BPF_REG; i++) { mark_reg_not_init(env, regs, i); regs[i].live = REG_LIVE_NONE; + regs[i].parent = NULL; } /* frame pointer */ @@ -823,10 +1009,6 @@ static int check_subprogs(struct bpf_verifier_env *env) verbose(env, "function calls to other bpf functions are allowed for root only\n"); return -EPERM; } - if (bpf_prog_is_dev_bound(env->prog->aux)) { - verbose(env, "function calls in offloaded programs are not supported yet\n"); - return -EINVAL; - } ret = add_subprog(env, i + insn[i].imm + 1); if (ret < 0) return ret; @@ -876,74 +1058,21 @@ next: return 0; } -static -struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env, - const struct bpf_verifier_state *state, - struct bpf_verifier_state *parent, - u32 regno) -{ - struct bpf_verifier_state *tmp = NULL; - - /* 'parent' could be a state of caller and - * 'state' could be a state of callee. In such case - * parent->curframe < state->curframe - * and it's ok for r1 - r5 registers - * - * 'parent' could be a callee's state after it bpf_exit-ed. - * In such case parent->curframe > state->curframe - * and it's ok for r0 only - */ - if (parent->curframe == state->curframe || - (parent->curframe < state->curframe && - regno >= BPF_REG_1 && regno <= BPF_REG_5) || - (parent->curframe > state->curframe && - regno == BPF_REG_0)) - return parent; - - if (parent->curframe > state->curframe && - regno >= BPF_REG_6) { - /* for callee saved regs we have to skip the whole chain - * of states that belong to callee and mark as LIVE_READ - * the registers before the call - */ - tmp = parent; - while (tmp && tmp->curframe != state->curframe) { - tmp = tmp->parent; - } - if (!tmp) - goto bug; - parent = tmp; - } else { - goto bug; - } - return parent; -bug: - verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp); - verbose(env, "regno %d parent frame %d current frame %d\n", - regno, parent->curframe, state->curframe); - return NULL; -} - +/* Parentage chain of this register (or stack slot) should take care of all + * issues like callee-saved registers, stack slot allocation time, etc. + */ static int mark_reg_read(struct bpf_verifier_env *env, - const struct bpf_verifier_state *state, - struct bpf_verifier_state *parent, - u32 regno) + const struct bpf_reg_state *state, + struct bpf_reg_state *parent) { bool writes = parent == state->parent; /* Observe write marks */ - if (regno == BPF_REG_FP) - /* We don't need to worry about FP liveness because it's read-only */ - return 0; - while (parent) { /* if read wasn't screened by an earlier write ... */ - if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN) + if (writes && state->live & REG_LIVE_WRITTEN) break; - parent = skip_callee(env, state, parent, regno); - if (!parent) - return -EFAULT; /* ... then we depend on parent's value */ - parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ; + parent->live |= REG_LIVE_READ; state = parent; parent = state->parent; writes = true; @@ -969,7 +1098,10 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, verbose(env, "R%d !read_ok\n", regno); return -EACCES; } - return mark_reg_read(env, vstate, vstate->parent, regno); + /* We don't need to worry about FP liveness because it's read-only */ + if (regno != BPF_REG_FP) + return mark_reg_read(env, ®s[regno], + regs[regno].parent); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { @@ -993,7 +1125,10 @@ static bool is_spillable_regtype(enum bpf_reg_type type) case PTR_TO_PACKET: case PTR_TO_PACKET_META: case PTR_TO_PACKET_END: + case PTR_TO_FLOW_KEYS: case CONST_PTR_TO_MAP: + case PTR_TO_SOCKET: + case PTR_TO_SOCKET_OR_NULL: return true; default: return false; @@ -1018,7 +1153,7 @@ static int check_stack_write(struct bpf_verifier_env *env, enum bpf_reg_type type; err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), - true); + state->acquired_refs, true); if (err) return err; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, @@ -1080,8 +1215,8 @@ static int check_stack_write(struct bpf_verifier_env *env, } else { u8 type = STACK_MISC; - /* regular write of data into stack */ - state->stack[spi].spilled_ptr = (struct bpf_reg_state) {}; + /* regular write of data into stack destroys any spilled ptr */ + state->stack[spi].spilled_ptr.type = NOT_INIT; /* only mark the slot as written if all 8 bytes were written * otherwise read propagation may incorrectly stop too soon @@ -1106,61 +1241,6 @@ static int check_stack_write(struct bpf_verifier_env *env, return 0; } -/* registers of every function are unique and mark_reg_read() propagates - * the liveness in the following cases: - * - from callee into caller for R1 - R5 that were used as arguments - * - from caller into callee for R0 that used as result of the call - * - from caller to the same caller skipping states of the callee for R6 - R9, - * since R6 - R9 are callee saved by implicit function prologue and - * caller's R6 != callee's R6, so when we propagate liveness up to - * parent states we need to skip callee states for R6 - R9. - * - * stack slot marking is different, since stacks of caller and callee are - * accessible in both (since caller can pass a pointer to caller's stack to - * callee which can pass it to another function), hence mark_stack_slot_read() - * has to propagate the stack liveness to all parent states at given frame number. - * Consider code: - * f1() { - * ptr = fp - 8; - * *ptr = ctx; - * call f2 { - * .. = *ptr; - * } - * .. = *ptr; - * } - * First *ptr is reading from f1's stack and mark_stack_slot_read() has - * to mark liveness at the f1's frame and not f2's frame. - * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has - * to propagate liveness to f2 states at f1's frame level and further into - * f1 states at f1's frame level until write into that stack slot - */ -static void mark_stack_slot_read(struct bpf_verifier_env *env, - const struct bpf_verifier_state *state, - struct bpf_verifier_state *parent, - int slot, int frameno) -{ - bool writes = parent == state->parent; /* Observe write marks */ - - while (parent) { - if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE) - /* since LIVE_WRITTEN mark is only done for full 8-byte - * write the read marks are conservative and parent - * state may not even have the stack allocated. In such case - * end the propagation, since the loop reached beginning - * of the function - */ - break; - /* if read wasn't screened by an earlier write ... */ - if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN) - break; - /* ... then we depend on parent's value */ - parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ; - state = parent; - parent = state->parent; - writes = true; - } -} - static int check_stack_read(struct bpf_verifier_env *env, struct bpf_func_state *reg_state /* func where register points to */, int off, int size, int value_regno) @@ -1198,8 +1278,8 @@ static int check_stack_read(struct bpf_verifier_env *env, */ state->regs[value_regno].live |= REG_LIVE_WRITTEN; } - mark_stack_slot_read(env, vstate, vstate->parent, spi, - reg_state->frameno); + mark_reg_read(env, ®_state->stack[spi].spilled_ptr, + reg_state->stack[spi].spilled_ptr.parent); return 0; } else { int zeros = 0; @@ -1215,8 +1295,8 @@ static int check_stack_read(struct bpf_verifier_env *env, off, i, size); return -EACCES; } - mark_stack_slot_read(env, vstate, vstate->parent, spi, - reg_state->frameno); + mark_reg_read(env, ®_state->stack[spi].spilled_ptr, + reg_state->stack[spi].spilled_ptr.parent); if (value_regno >= 0) { if (zeros == size) { /* any size read into register is zero extended, @@ -1321,6 +1401,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_SK_SKB: case BPF_PROG_TYPE_SK_MSG: + case BPF_PROG_TYPE_FLOW_DISSECTOR: if (meta) return meta->pkt_access; @@ -1404,6 +1485,40 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, return -EACCES; } +static int check_flow_keys_access(struct bpf_verifier_env *env, int off, + int size) +{ + if (size < 0 || off < 0 || + (u64)off + size > sizeof(struct bpf_flow_keys)) { + verbose(env, "invalid access to flow keys off=%d size=%d\n", + off, size); + return -EACCES; + } + return 0; +} + +static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, + int size, enum bpf_access_type t) +{ + struct bpf_reg_state *regs = cur_regs(env); + struct bpf_reg_state *reg = ®s[regno]; + struct bpf_insn_access_aux info; + + if (reg->smin_value < 0) { + verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", + regno); + return -EACCES; + } + + if (!bpf_sock_is_valid_access(off, size, t, &info)) { + verbose(env, "invalid bpf_sock access off=%d size=%d\n", + off, size); + return -EACCES; + } + + return 0; +} + static bool __is_pointer_value(bool allow_ptr_leaks, const struct bpf_reg_state *reg) { @@ -1422,7 +1537,8 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = cur_regs(env) + regno; - return reg->type == PTR_TO_CTX; + return reg->type == PTR_TO_CTX || + reg->type == PTR_TO_SOCKET; } static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) @@ -1505,6 +1621,9 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, * right in front, treat it the very same way. */ return check_pkt_ptr_alignment(env, reg, off, size, strict); + case PTR_TO_FLOW_KEYS: + pointer_desc = "flow keys "; + break; case PTR_TO_MAP_VALUE: pointer_desc = "value "; break; @@ -1519,6 +1638,9 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, */ strict = true; break; + case PTR_TO_SOCKET: + pointer_desc = "sock "; + break; default: break; } @@ -1727,9 +1849,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn else mark_reg_known_zero(env, regs, value_regno); - regs[value_regno].id = 0; - regs[value_regno].off = 0; - regs[value_regno].range = 0; regs[value_regno].type = reg_type; } @@ -1778,6 +1897,25 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn err = check_packet_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); + } else if (reg->type == PTR_TO_FLOW_KEYS) { + if (t == BPF_WRITE && value_regno >= 0 && + is_pointer_value(env, value_regno)) { + verbose(env, "R%d leaks addr into flow keys\n", + value_regno); + return -EACCES; + } + + err = check_flow_keys_access(env, off, size); + if (!err && t == BPF_READ && value_regno >= 0) + mark_reg_unknown(env, regs, value_regno); + } else if (reg->type == PTR_TO_SOCKET) { + if (t == BPF_WRITE) { + verbose(env, "cannot write into socket\n"); + return -EACCES; + } + err = check_sock_access(env, regno, off, size, t); + if (!err && value_regno >= 0) + mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); @@ -1820,8 +1958,7 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins if (is_ctx_reg(env, insn->dst_reg) || is_pkt_reg(env, insn->dst_reg)) { verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", - insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ? - "context" : "packet"); + insn->dst_reg, reg_type_str[insn->dst_reg]); return -EACCES; } @@ -1908,8 +2045,8 @@ mark: /* reading any byte out of 8-byte 'spill_slot' will cause * the whole slot to be marked as 'read' */ - mark_stack_slot_read(env, env->cur_state, env->cur_state->parent, - spi, state->frameno); + mark_reg_read(env, &state->stack[spi].spilled_ptr, + state->stack[spi].spilled_ptr.parent); } return update_stack_depth(env, state, off); } @@ -1925,6 +2062,8 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); + case PTR_TO_FLOW_KEYS: + return check_flow_keys_access(env, reg->off, access_size); case PTR_TO_MAP_VALUE: return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); @@ -1999,6 +2138,16 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, err = check_ctx_reg(env, reg, regno); if (err < 0) return err; + } else if (arg_type == ARG_PTR_TO_SOCKET) { + expected_type = PTR_TO_SOCKET; + if (type != expected_type) + goto err_type; + if (meta->ptr_id || !reg->id) { + verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n", + meta->ptr_id, reg->id); + return -EFAULT; + } + meta->ptr_id = reg->id; } else if (arg_type_is_mem_ptr(arg_type)) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be @@ -2129,6 +2278,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, goto error; break; case BPF_MAP_TYPE_CGROUP_STORAGE: + case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: if (func_id != BPF_FUNC_get_local_storage) goto error; break; @@ -2219,7 +2369,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, goto error; break; case BPF_FUNC_get_local_storage: - if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE) + if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && + map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) goto error; break; case BPF_FUNC_sk_select_reuseport: @@ -2286,10 +2437,32 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn) return true; } +static bool check_refcount_ok(const struct bpf_func_proto *fn) +{ + int count = 0; + + if (arg_type_is_refcounted(fn->arg1_type)) + count++; + if (arg_type_is_refcounted(fn->arg2_type)) + count++; + if (arg_type_is_refcounted(fn->arg3_type)) + count++; + if (arg_type_is_refcounted(fn->arg4_type)) + count++; + if (arg_type_is_refcounted(fn->arg5_type)) + count++; + + /* We only support one arg being unreferenced at the moment, + * which is sufficient for the helper functions we have right now. + */ + return count <= 1; +} + static int check_func_proto(const struct bpf_func_proto *fn) { return check_raw_mode_ok(fn) && - check_arg_pair_ok(fn) ? 0 : -EINVAL; + check_arg_pair_ok(fn) && + check_refcount_ok(fn) ? 0 : -EINVAL; } /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] @@ -2305,10 +2478,9 @@ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, if (reg_is_pkt_pointer_any(®s[i])) mark_reg_unknown(env, regs, i); - for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { - if (state->stack[i].slot_type[0] != STACK_SPILL) + bpf_for_each_spilled_reg(i, state, reg) { + if (!reg) continue; - reg = &state->stack[i].spilled_ptr; if (reg_is_pkt_pointer_any(reg)) __mark_reg_unknown(reg); } @@ -2323,12 +2495,45 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) __clear_all_pkt_pointers(env, vstate->frame[i]); } +static void release_reg_references(struct bpf_verifier_env *env, + struct bpf_func_state *state, int id) +{ + struct bpf_reg_state *regs = state->regs, *reg; + int i; + + for (i = 0; i < MAX_BPF_REG; i++) + if (regs[i].id == id) + mark_reg_unknown(env, regs, i); + + bpf_for_each_spilled_reg(i, state, reg) { + if (!reg) + continue; + if (reg_is_refcounted(reg) && reg->id == id) + __mark_reg_unknown(reg); + } +} + +/* The pointer with the specified id has released its reference to kernel + * resources. Identify all copies of the same pointer and clear the reference. + */ +static int release_reference(struct bpf_verifier_env *env, + struct bpf_call_arg_meta *meta) +{ + struct bpf_verifier_state *vstate = env->cur_state; + int i; + + for (i = 0; i <= vstate->curframe; i++) + release_reg_references(env, vstate->frame[i], meta->ptr_id); + + return release_reference_state(env, meta->ptr_id); +} + static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; struct bpf_func_state *caller, *callee; - int i, subprog, target_insn; + int i, err, subprog, target_insn; if (state->curframe + 1 >= MAX_CALL_FRAMES) { verbose(env, "the call stack of %d frames is too deep\n", @@ -2366,11 +2571,18 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, state->curframe + 1 /* frameno within this callchain */, subprog /* subprog number within this prog */); - /* copy r1 - r5 args that callee can access */ + /* Transfer references to the callee */ + err = transfer_reference_state(callee, caller); + if (err) + return err; + + /* copy r1 - r5 args that callee can access. The copy includes parent + * pointers, which connects us up to the liveness chain + */ for (i = BPF_REG_1; i <= BPF_REG_5; i++) callee->regs[i] = caller->regs[i]; - /* after the call regsiters r0 - r5 were scratched */ + /* after the call registers r0 - r5 were scratched */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, caller->regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); @@ -2396,6 +2608,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) struct bpf_verifier_state *state = env->cur_state; struct bpf_func_state *caller, *callee; struct bpf_reg_state *r0; + int err; callee = state->frame[state->curframe]; r0 = &callee->regs[BPF_REG_0]; @@ -2415,6 +2628,11 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) /* return to the caller whatever r0 had in the callee */ caller->regs[BPF_REG_0] = *r0; + /* Transfer references to the caller */ + err = transfer_reference_state(caller, callee); + if (err) + return err; + *insn_idx = callee->callsite + 1; if (env->log.level) { verbose(env, "returning from callee:\n"); @@ -2471,6 +2689,18 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, return 0; } +static int check_reference_leak(struct bpf_verifier_env *env) +{ + struct bpf_func_state *state = cur_func(env); + int i; + + for (i = 0; i < state->acquired_refs; i++) { + verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", + state->refs[i].id, state->refs[i].insn_idx); + } + return state->acquired_refs ? -EINVAL : 0; +} + static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { const struct bpf_func_proto *fn = NULL; @@ -2549,6 +2779,18 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn return err; } + if (func_id == BPF_FUNC_tail_call) { + err = check_reference_leak(env); + if (err) { + verbose(env, "tail_call would lead to reference leak\n"); + return err; + } + } else if (is_release_function(func_id)) { + err = release_reference(env, &meta); + if (err) + return err; + } + regs = cur_regs(env); /* check that flags argument in get_local_storage(map, flags) is 0, @@ -2580,7 +2822,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); - regs[BPF_REG_0].off = 0; /* remember map_ptr, so that check_map_access() * can check 'value_size' boundary of memory access * to map element returned from bpf_map_lookup_elem() @@ -2592,6 +2833,13 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn } regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].id = ++env->id_gen; + } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { + int id = acquire_reference_state(env, insn_idx); + if (id < 0) + return id; + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; + regs[BPF_REG_0].id = id; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); @@ -2722,20 +2970,20 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, return -EACCES; } - if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { - verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", - dst); - return -EACCES; - } - if (ptr_reg->type == CONST_PTR_TO_MAP) { - verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", - dst); + switch (ptr_reg->type) { + case PTR_TO_MAP_VALUE_OR_NULL: + verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", + dst, reg_type_str[ptr_reg->type]); return -EACCES; - } - if (ptr_reg->type == PTR_TO_PACKET_END) { - verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", - dst); + case CONST_PTR_TO_MAP: + case PTR_TO_PACKET_END: + case PTR_TO_SOCKET: + case PTR_TO_SOCKET_OR_NULL: + verbose(env, "R%d pointer arithmetic on %s prohibited\n", + dst, reg_type_str[ptr_reg->type]); return -EACCES; + default: + break; } /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. @@ -3455,10 +3703,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, for (j = 0; j <= vstate->curframe; j++) { state = vstate->frame[j]; - for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { - if (state->stack[i].slot_type[0] != STACK_SPILL) + bpf_for_each_spilled_reg(i, state, reg) { + if (!reg) continue; - reg = &state->stack[i].spilled_ptr; if (reg->type == type && reg->id == dst_reg->id) reg->range = max(reg->range, new_range); } @@ -3664,12 +3911,11 @@ static void reg_combine_min_max(struct bpf_reg_state *true_src, } } -static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, - bool is_null) +static void mark_ptr_or_null_reg(struct bpf_func_state *state, + struct bpf_reg_state *reg, u32 id, + bool is_null) { - struct bpf_reg_state *reg = ®s[regno]; - - if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { + if (reg_type_may_be_null(reg->type) && reg->id == id) { /* Old offset (both fixed and variable parts) should * have been known-zero, because we don't allow pointer * arithmetic on pointers that might be NULL. @@ -3682,40 +3928,49 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, } if (is_null) { reg->type = SCALAR_VALUE; - } else if (reg->map_ptr->inner_map_meta) { - reg->type = CONST_PTR_TO_MAP; - reg->map_ptr = reg->map_ptr->inner_map_meta; - } else { - reg->type = PTR_TO_MAP_VALUE; + } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) { + if (reg->map_ptr->inner_map_meta) { + reg->type = CONST_PTR_TO_MAP; + reg->map_ptr = reg->map_ptr->inner_map_meta; + } else { + reg->type = PTR_TO_MAP_VALUE; + } + } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { + reg->type = PTR_TO_SOCKET; + } + if (is_null || !reg_is_refcounted(reg)) { + /* We don't need id from this point onwards anymore, + * thus we should better reset it, so that state + * pruning has chances to take effect. + */ + reg->id = 0; } - /* We don't need id from this point onwards anymore, thus we - * should better reset it, so that state pruning has chances - * to take effect. - */ - reg->id = 0; } } /* The logic is similar to find_good_pkt_pointers(), both could eventually * be folded together at some point. */ -static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno, - bool is_null) +static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, + bool is_null) { struct bpf_func_state *state = vstate->frame[vstate->curframe]; - struct bpf_reg_state *regs = state->regs; + struct bpf_reg_state *reg, *regs = state->regs; u32 id = regs[regno].id; int i, j; + if (reg_is_refcounted_or_null(®s[regno]) && is_null) + __release_reference_state(state, id); + for (i = 0; i < MAX_BPF_REG; i++) - mark_map_reg(regs, i, id, is_null); + mark_ptr_or_null_reg(state, ®s[i], id, is_null); for (j = 0; j <= vstate->curframe; j++) { state = vstate->frame[j]; - for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { - if (state->stack[i].slot_type[0] != STACK_SPILL) + bpf_for_each_spilled_reg(i, state, reg) { + if (!reg) continue; - mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null); + mark_ptr_or_null_reg(state, reg, id, is_null); } } } @@ -3917,12 +4172,14 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ if (BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && - dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { - /* Mark all identical map registers in each branch as either + reg_type_may_be_null(dst_reg->type)) { + /* Mark all identical registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ - mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); - mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); + mark_ptr_or_null_regs(this_branch, insn->dst_reg, + opcode == BPF_JNE); + mark_ptr_or_null_regs(other_branch, insn->dst_reg, + opcode == BPF_JEQ); } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], this_branch, other_branch) && is_pointer_value(env, insn->dst_reg)) { @@ -4045,6 +4302,16 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) if (err) return err; + /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as + * gen_ld_abs() may terminate the program at runtime, leading to + * reference leak. + */ + err = check_reference_leak(env); + if (err) { + verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); + return err; + } + if (regs[BPF_REG_6].type != PTR_TO_CTX) { verbose(env, "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); @@ -4378,7 +4645,7 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, /* explored state didn't use this */ return true; - equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0; + equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0; if (rold->type == PTR_TO_STACK) /* two stack pointers are equal only if they're pointing to @@ -4459,6 +4726,9 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, case PTR_TO_CTX: case CONST_PTR_TO_MAP: case PTR_TO_PACKET_END: + case PTR_TO_FLOW_KEYS: + case PTR_TO_SOCKET: + case PTR_TO_SOCKET_OR_NULL: /* Only valid matches are exact, which memcmp() above * would have accepted */ @@ -4534,6 +4804,14 @@ static bool stacksafe(struct bpf_func_state *old, return true; } +static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) +{ + if (old->acquired_refs != cur->acquired_refs) + return false; + return !memcmp(old->refs, cur->refs, + sizeof(*old->refs) * old->acquired_refs); +} + /* compare two verifier states * * all states stored in state_list are known to be valid, since @@ -4579,6 +4857,9 @@ static bool func_states_equal(struct bpf_func_state *old, if (!stacksafe(old, cur, idmap)) goto out_free; + + if (!refsafe(old, cur)) + goto out_free; ret = true; out_free: kfree(idmap); @@ -4611,7 +4892,7 @@ static bool states_equal(struct bpf_verifier_env *env, * equivalent state (jump target or such) we didn't arrive by the straight-line * code, so read marks in the state must propagate to the parent regardless * of the state's write marks. That's what 'parent == state->parent' comparison - * in mark_reg_read() and mark_stack_slot_read() is for. + * in mark_reg_read() is for. */ static int propagate_liveness(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate, @@ -4632,7 +4913,8 @@ static int propagate_liveness(struct bpf_verifier_env *env, if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ) continue; if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) { - err = mark_reg_read(env, vstate, vparent, i); + err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i], + &vparent->frame[vstate->curframe]->regs[i]); if (err) return err; } @@ -4647,7 +4929,8 @@ static int propagate_liveness(struct bpf_verifier_env *env, if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) continue; if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) - mark_stack_slot_read(env, vstate, vparent, i, frame); + mark_reg_read(env, &state->stack[i].spilled_ptr, + &parent->stack[i].spilled_ptr); } } return err; @@ -4657,7 +4940,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl; - struct bpf_verifier_state *cur = env->cur_state; + struct bpf_verifier_state *cur = env->cur_state, *new; int i, j, err; sl = env->explored_states[insn_idx]; @@ -4699,16 +4982,18 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) return -ENOMEM; /* add new state to the head of linked list */ - err = copy_verifier_state(&new_sl->state, cur); + new = &new_sl->state; + err = copy_verifier_state(new, cur); if (err) { - free_verifier_state(&new_sl->state, false); + free_verifier_state(new, false); kfree(new_sl); return err; } new_sl->next = env->explored_states[insn_idx]; env->explored_states[insn_idx] = new_sl; /* connect new state to parentage chain */ - cur->parent = &new_sl->state; + for (i = 0; i < BPF_REG_FP; i++) + cur_regs(env)[i].parent = &new->frame[new->curframe]->regs[i]; /* clear write marks in current state: the writes we did are not writes * our child did, so they don't screen off its reads from us. * (There are no read marks in current state, because reads always mark @@ -4721,13 +5006,48 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) /* all stack frames are accessible from callee, clear them all */ for (j = 0; j <= cur->curframe; j++) { struct bpf_func_state *frame = cur->frame[j]; + struct bpf_func_state *newframe = new->frame[j]; - for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) + for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; + frame->stack[i].spilled_ptr.parent = + &newframe->stack[i].spilled_ptr; + } } return 0; } +/* Return true if it's OK to have the same insn return a different type. */ +static bool reg_type_mismatch_ok(enum bpf_reg_type type) +{ + switch (type) { + case PTR_TO_CTX: + case PTR_TO_SOCKET: + case PTR_TO_SOCKET_OR_NULL: + return false; + default: + return true; + } +} + +/* If an instruction was previously used with particular pointer types, then we + * need to be careful to avoid cases such as the below, where it may be ok + * for one branch accessing the pointer, but not ok for the other branch: + * + * R1 = sock_ptr + * goto X; + * ... + * R1 = some_other_valid_ptr; + * goto X; + * ... + * R2 = *(u32 *)(R1 + 0); + */ +static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) +{ + return src != prev && (!reg_type_mismatch_ok(src) || + !reg_type_mismatch_ok(prev)); +} + static int do_check(struct bpf_verifier_env *env) { struct bpf_verifier_state *state; @@ -4742,7 +5062,6 @@ static int do_check(struct bpf_verifier_env *env) if (!state) return -ENOMEM; state->curframe = 0; - state->parent = NULL; state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); if (!state->frame[0]) { kfree(state); @@ -4822,6 +5141,7 @@ static int do_check(struct bpf_verifier_env *env) regs = cur_regs(env); env->insn_aux_data[insn_idx].seen = true; + if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) @@ -4861,9 +5181,7 @@ static int do_check(struct bpf_verifier_env *env) */ *prev_src_type = src_reg_type; - } else if (src_reg_type != *prev_src_type && - (src_reg_type == PTR_TO_CTX || - *prev_src_type == PTR_TO_CTX)) { + } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) { /* ABuser program is trying to use the same insn * dst_reg = *(u32*) (src_reg + off) * with different pointer types: @@ -4908,9 +5226,7 @@ static int do_check(struct bpf_verifier_env *env) if (*prev_dst_type == NOT_INIT) { *prev_dst_type = dst_reg_type; - } else if (dst_reg_type != *prev_dst_type && - (dst_reg_type == PTR_TO_CTX || - *prev_dst_type == PTR_TO_CTX)) { + } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) { verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } @@ -4927,8 +5243,8 @@ static int do_check(struct bpf_verifier_env *env) return err; if (is_ctx_reg(env, insn->dst_reg)) { - verbose(env, "BPF_ST stores into R%d context is not allowed\n", - insn->dst_reg); + verbose(env, "BPF_ST stores into R%d %s is not allowed\n", + insn->dst_reg, reg_type_str[insn->dst_reg]); return -EACCES; } @@ -4990,6 +5306,10 @@ static int do_check(struct bpf_verifier_env *env) continue; } + err = check_reference_leak(env); + if (err) + return err; + /* eBPF calling convetion is such that R0 is used * to return the value from eBPF program. * Make sure that it's readable at this time @@ -5103,6 +5423,12 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, return 0; } +static bool bpf_map_is_cgroup_storage(struct bpf_map *map) +{ + return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || + map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); +} + /* look for pseudo eBPF instructions that access map FDs and * replace them with actual map pointers */ @@ -5193,10 +5519,9 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) } env->used_maps[env->used_map_cnt++] = map; - if (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE && + if (bpf_map_is_cgroup_storage(map) && bpf_cgroup_storage_assign(env->prog, map)) { - verbose(env, - "only one cgroup storage is allowed\n"); + verbose(env, "only one cgroup storage of each type is allowed\n"); fdput(f); return -EBUSY; } @@ -5225,11 +5550,15 @@ next_insn: /* drop refcnt of maps used by the rejected program */ static void release_maps(struct bpf_verifier_env *env) { + enum bpf_cgroup_storage_type stype; int i; - if (env->prog->aux->cgroup_storage) + for_each_cgroup_storage_type(stype) { + if (!env->prog->aux->cgroup_storage[stype]) + continue; bpf_cgroup_storage_release(env->prog, - env->prog->aux->cgroup_storage); + env->prog->aux->cgroup_storage[stype]); + } for (i = 0; i < env->used_map_cnt; i++) bpf_map_put(env->used_maps[i]); @@ -5327,8 +5656,10 @@ static void sanitize_dead_code(struct bpf_verifier_env *env) } } -/* convert load instructions that access fields of 'struct __sk_buff' - * into sequence of instructions that access fields of 'struct sk_buff' +/* convert load instructions that access fields of a context type into a + * sequence of instructions that access fields of the underlying structure: + * struct __sk_buff -> struct sk_buff + * struct bpf_sock_ops -> struct sock */ static int convert_ctx_accesses(struct bpf_verifier_env *env) { @@ -5357,12 +5688,14 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) } } - if (!ops->convert_ctx_access || bpf_prog_is_dev_bound(env->prog->aux)) + if (bpf_prog_is_dev_bound(env->prog->aux)) return 0; insn = env->prog->insnsi + delta; for (i = 0; i < insn_cnt; i++, insn++) { + bpf_convert_ctx_access_t convert_ctx_access; + if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || @@ -5404,8 +5737,18 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) continue; } - if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) + switch (env->insn_aux_data[i + delta].ptr_type) { + case PTR_TO_CTX: + if (!ops->convert_ctx_access) + continue; + convert_ctx_access = ops->convert_ctx_access; + break; + case PTR_TO_SOCKET: + convert_ctx_access = bpf_sock_convert_ctx_access; + break; + default: continue; + } ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; size = BPF_LDST_BYTES(insn); @@ -5437,8 +5780,8 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) } target_size = 0; - cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog, - &target_size); + cnt = convert_ctx_access(type, insn, insn_buf, env->prog, + &target_size); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || (ctx_field_size && !target_size)) { verbose(env, "bpf verifier is misconfigured\n"); @@ -5629,10 +5972,10 @@ static int fixup_call_args(struct bpf_verifier_env *env) struct bpf_insn *insn = prog->insnsi; int i, depth; #endif - int err; + int err = 0; - err = 0; - if (env->prog->jit_requested) { + if (env->prog->jit_requested && + !bpf_prog_is_dev_bound(env->prog->aux)) { err = jit_subprogs(env); if (err == 0) return 0; @@ -5970,6 +6313,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) env->cur_state = NULL; } + if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) + ret = bpf_prog_offload_finalize(env); + skip_full_check: while (!pop_stack(env, NULL, NULL)); free_states(env); diff --git a/lib/nlattr.c b/lib/nlattr.c index e335bcafa9e4..d26de6156b97 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -45,12 +45,11 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { }; static int validate_nla_bitfield32(const struct nlattr *nla, - u32 *valid_flags_allowed) + const u32 *valid_flags_mask) { const struct nla_bitfield32 *bf = nla_data(nla); - u32 *valid_flags_mask = valid_flags_allowed; - if (!valid_flags_allowed) + if (!valid_flags_mask) return -EINVAL; /*disallow invalid bit selector */ @@ -68,11 +67,99 @@ static int validate_nla_bitfield32(const struct nlattr *nla, return 0; } +static int nla_validate_array(const struct nlattr *head, int len, int maxtype, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + const struct nlattr *entry; + int rem; + + nla_for_each_attr(entry, head, len, rem) { + int ret; + + if (nla_len(entry) == 0) + continue; + + if (nla_len(entry) < NLA_HDRLEN) { + NL_SET_ERR_MSG_ATTR(extack, entry, + "Array element too short"); + return -ERANGE; + } + + ret = nla_validate(nla_data(entry), nla_len(entry), + maxtype, policy, extack); + if (ret < 0) + return ret; + } + + return 0; +} + +static int nla_validate_int_range(const struct nla_policy *pt, + const struct nlattr *nla, + struct netlink_ext_ack *extack) +{ + bool validate_min, validate_max; + s64 value; + + validate_min = pt->validation_type == NLA_VALIDATE_RANGE || + pt->validation_type == NLA_VALIDATE_MIN; + validate_max = pt->validation_type == NLA_VALIDATE_RANGE || + pt->validation_type == NLA_VALIDATE_MAX; + + switch (pt->type) { + case NLA_U8: + value = nla_get_u8(nla); + break; + case NLA_U16: + value = nla_get_u16(nla); + break; + case NLA_U32: + value = nla_get_u32(nla); + break; + case NLA_S8: + value = nla_get_s8(nla); + break; + case NLA_S16: + value = nla_get_s16(nla); + break; + case NLA_S32: + value = nla_get_s32(nla); + break; + case NLA_S64: + value = nla_get_s64(nla); + break; + case NLA_U64: + /* treat this one specially, since it may not fit into s64 */ + if ((validate_min && nla_get_u64(nla) < pt->min) || + (validate_max && nla_get_u64(nla) > pt->max)) { + NL_SET_ERR_MSG_ATTR(extack, nla, + "integer out of range"); + return -ERANGE; + } + return 0; + default: + WARN_ON(1); + return -EINVAL; + } + + if ((validate_min && value < pt->min) || + (validate_max && value > pt->max)) { + NL_SET_ERR_MSG_ATTR(extack, nla, + "integer out of range"); + return -ERANGE; + } + + return 0; +} + static int validate_nla(const struct nlattr *nla, int maxtype, - const struct nla_policy *policy) + const struct nla_policy *policy, + struct netlink_ext_ack *extack) { const struct nla_policy *pt; int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla); + int err = -ERANGE; if (type <= 0 || type > maxtype) return 0; @@ -81,22 +168,40 @@ static int validate_nla(const struct nlattr *nla, int maxtype, BUG_ON(pt->type > NLA_TYPE_MAX); - if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) { + if ((nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) || + (pt->type == NLA_EXACT_LEN_WARN && attrlen != pt->len)) { pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n", current->comm, type); } switch (pt->type) { + case NLA_EXACT_LEN: + if (attrlen != pt->len) + goto out_err; + break; + + case NLA_REJECT: + if (extack && pt->validation_data) { + NL_SET_BAD_ATTR(extack, nla); + extack->_msg = pt->validation_data; + return -EINVAL; + } + err = -EINVAL; + goto out_err; + case NLA_FLAG: if (attrlen > 0) - return -ERANGE; + goto out_err; break; case NLA_BITFIELD32: if (attrlen != sizeof(struct nla_bitfield32)) - return -ERANGE; + goto out_err; - return validate_nla_bitfield32(nla, pt->validation_data); + err = validate_nla_bitfield32(nla, pt->validation_data); + if (err) + goto out_err; + break; case NLA_NUL_STRING: if (pt->len) @@ -104,13 +209,15 @@ static int validate_nla(const struct nlattr *nla, int maxtype, else minlen = attrlen; - if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) - return -EINVAL; + if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) { + err = -EINVAL; + goto out_err; + } /* fall through */ case NLA_STRING: if (attrlen < 1) - return -ERANGE; + goto out_err; if (pt->len) { char *buf = nla_data(nla); @@ -119,32 +226,58 @@ static int validate_nla(const struct nlattr *nla, int maxtype, attrlen--; if (attrlen > pt->len) - return -ERANGE; + goto out_err; } break; case NLA_BINARY: if (pt->len && attrlen > pt->len) - return -ERANGE; + goto out_err; break; - case NLA_NESTED_COMPAT: - if (attrlen < pt->len) - return -ERANGE; - if (attrlen < NLA_ALIGN(pt->len)) - break; - if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN) - return -ERANGE; - nla = nla_data(nla) + NLA_ALIGN(pt->len); - if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla)) - return -ERANGE; - break; case NLA_NESTED: /* a nested attributes is allowed to be empty; if its not, * it must have a size of at least NLA_HDRLEN. */ if (attrlen == 0) break; + if (attrlen < NLA_HDRLEN) + goto out_err; + if (pt->validation_data) { + err = nla_validate(nla_data(nla), nla_len(nla), pt->len, + pt->validation_data, extack); + if (err < 0) { + /* + * return directly to preserve the inner + * error message/attribute pointer + */ + return err; + } + } + break; + case NLA_NESTED_ARRAY: + /* a nested array attribute is allowed to be empty; if its not, + * it must have a size of at least NLA_HDRLEN. + */ + if (attrlen == 0) + break; + if (attrlen < NLA_HDRLEN) + goto out_err; + if (pt->validation_data) { + int err; + + err = nla_validate_array(nla_data(nla), nla_len(nla), + pt->len, pt->validation_data, + extack); + if (err < 0) { + /* + * return directly to preserve the inner + * error message/attribute pointer + */ + return err; + } + } + break; default: if (pt->len) minlen = pt->len; @@ -152,10 +285,34 @@ static int validate_nla(const struct nlattr *nla, int maxtype, minlen = nla_attr_minlen[pt->type]; if (attrlen < minlen) - return -ERANGE; + goto out_err; + } + + /* further validation */ + switch (pt->validation_type) { + case NLA_VALIDATE_NONE: + /* nothing to do */ + break; + case NLA_VALIDATE_RANGE: + case NLA_VALIDATE_MIN: + case NLA_VALIDATE_MAX: + err = nla_validate_int_range(pt, nla, extack); + if (err) + return err; + break; + case NLA_VALIDATE_FUNCTION: + if (pt->validate) { + err = pt->validate(nla, extack); + if (err) + return err; + } + break; } return 0; +out_err: + NL_SET_ERR_MSG_ATTR(extack, nla, "Attribute failed policy validation"); + return err; } /** @@ -180,13 +337,10 @@ int nla_validate(const struct nlattr *head, int len, int maxtype, int rem; nla_for_each_attr(nla, head, len, rem) { - int err = validate_nla(nla, maxtype, policy); + int err = validate_nla(nla, maxtype, policy, extack); - if (err < 0) { - if (extack) - extack->bad_attr = nla; + if (err < 0) return err; - } } return 0; @@ -237,42 +391,63 @@ EXPORT_SYMBOL(nla_policy_len); * * Returns 0 on success or a negative error code. */ -int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, - int len, const struct nla_policy *policy, - struct netlink_ext_ack *extack) +static int __nla_parse(struct nlattr **tb, int maxtype, + const struct nlattr *head, int len, + bool strict, const struct nla_policy *policy, + struct netlink_ext_ack *extack) { const struct nlattr *nla; - int rem, err; + int rem; memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); nla_for_each_attr(nla, head, len, rem) { u16 type = nla_type(nla); - if (type > 0 && type <= maxtype) { - if (policy) { - err = validate_nla(nla, maxtype, policy); - if (err < 0) { - NL_SET_ERR_MSG_ATTR(extack, nla, - "Attribute failed policy validation"); - goto errout; - } + if (type == 0 || type > maxtype) { + if (strict) { + NL_SET_ERR_MSG(extack, "Unknown attribute type"); + return -EINVAL; } + continue; + } + if (policy) { + int err = validate_nla(nla, maxtype, policy, extack); - tb[type] = (struct nlattr *)nla; + if (err < 0) + return err; } + + tb[type] = (struct nlattr *)nla; } - if (unlikely(rem > 0)) + if (unlikely(rem > 0)) { pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n", rem, current->comm); + NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes"); + if (strict) + return -EINVAL; + } - err = 0; -errout: - return err; + return 0; +} + +int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, + int len, const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + return __nla_parse(tb, maxtype, head, len, false, policy, extack); } EXPORT_SYMBOL(nla_parse); +int nla_parse_strict(struct nlattr **tb, int maxtype, const struct nlattr *head, + int len, const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + return __nla_parse(tb, maxtype, head, len, true, policy, extack); +} +EXPORT_SYMBOL(nla_parse_strict); + /** * nla_find - Find a specific attribute in a stream of attributes * @head: head of attribute stream diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 08d3d59dca17..aa22bcaec1dc 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -6494,6 +6494,7 @@ static struct sk_buff *populate_skb(char *buf, int size) skb->queue_mapping = SKB_QUEUE_MAP; skb->vlan_tci = SKB_VLAN_TCI; skb->vlan_proto = htons(ETH_P_IP); + dev_net_set(&dev, &init_net); skb->dev = &dev; skb->dev->ifindex = SKB_DEV_IFINDEX; skb->dev->type = SKB_DEV_TYPE; diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig index 361116f77cb9..f75816f58107 100644 --- a/net/batman-adv/Kconfig +++ b/net/batman-adv/Kconfig @@ -106,3 +106,14 @@ config BATMAN_ADV_DEBUG say N here. This enables compilation of support for outputting debugging information to the kernel log. The output is controlled via the module parameter debug. + +config BATMAN_ADV_TRACING + bool "B.A.T.M.A.N. tracing support" + depends on BATMAN_ADV + depends on EVENT_TRACING + help + This is an option for use by developers; most people should + say N here. Select this option to gather traces like the debug + messages using the generic tracing infrastructure of the kernel. + BATMAN_ADV_DEBUG must also be selected to get trace events for + batadv_dbg. diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile index b97ba6fb8353..9b58160fe485 100644 --- a/net/batman-adv/Makefile +++ b/net/batman-adv/Makefile @@ -42,6 +42,9 @@ batman-adv-y += routing.o batman-adv-y += send.o batman-adv-y += soft-interface.o batman-adv-y += sysfs.o +batman-adv-$(CONFIG_BATMAN_ADV_TRACING) += trace.o batman-adv-y += tp_meter.o batman-adv-y += translation-table.o batman-adv-y += tvlv.o + +CFLAGS_trace.o := -I$(src) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 73bf6a93a3cf..d2227091029f 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -138,169 +138,6 @@ static u8 batadv_ring_buffer_avg(const u8 lq_recv[]) } /** - * batadv_iv_ogm_orig_free() - free the private resources allocated for this - * orig_node - * @orig_node: the orig_node for which the resources have to be free'd - */ -static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node) -{ - kfree(orig_node->bat_iv.bcast_own); - kfree(orig_node->bat_iv.bcast_own_sum); -} - -/** - * batadv_iv_ogm_orig_add_if() - change the private structures of the orig_node - * to include the new hard-interface - * @orig_node: the orig_node that has to be changed - * @max_if_num: the current amount of interfaces - * - * Return: 0 on success, a negative error code otherwise. - */ -static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node, - unsigned int max_if_num) -{ - void *data_ptr; - size_t old_size; - int ret = -ENOMEM; - - spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); - - old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS; - data_ptr = kmalloc_array(max_if_num, - BATADV_NUM_WORDS * sizeof(unsigned long), - GFP_ATOMIC); - if (!data_ptr) - goto unlock; - - memcpy(data_ptr, orig_node->bat_iv.bcast_own, old_size); - kfree(orig_node->bat_iv.bcast_own); - orig_node->bat_iv.bcast_own = data_ptr; - - data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC); - if (!data_ptr) - goto unlock; - - memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum, - (max_if_num - 1) * sizeof(u8)); - kfree(orig_node->bat_iv.bcast_own_sum); - orig_node->bat_iv.bcast_own_sum = data_ptr; - - ret = 0; - -unlock: - spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); - - return ret; -} - -/** - * batadv_iv_ogm_drop_bcast_own_entry() - drop section of bcast_own - * @orig_node: the orig_node that has to be changed - * @max_if_num: the current amount of interfaces - * @del_if_num: the index of the interface being removed - */ -static void -batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node, - unsigned int max_if_num, - unsigned int del_if_num) -{ - size_t chunk_size; - size_t if_offset; - void *data_ptr; - - lockdep_assert_held(&orig_node->bat_iv.ogm_cnt_lock); - - chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS; - data_ptr = kmalloc_array(max_if_num, chunk_size, GFP_ATOMIC); - if (!data_ptr) - /* use old buffer when new one could not be allocated */ - data_ptr = orig_node->bat_iv.bcast_own; - - /* copy first part */ - memmove(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size); - - /* copy second part */ - if_offset = (del_if_num + 1) * chunk_size; - memmove((char *)data_ptr + del_if_num * chunk_size, - (uint8_t *)orig_node->bat_iv.bcast_own + if_offset, - (max_if_num - del_if_num) * chunk_size); - - /* bcast_own was shrunk down in new buffer; free old one */ - if (orig_node->bat_iv.bcast_own != data_ptr) { - kfree(orig_node->bat_iv.bcast_own); - orig_node->bat_iv.bcast_own = data_ptr; - } -} - -/** - * batadv_iv_ogm_drop_bcast_own_sum_entry() - drop section of bcast_own_sum - * @orig_node: the orig_node that has to be changed - * @max_if_num: the current amount of interfaces - * @del_if_num: the index of the interface being removed - */ -static void -batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node, - unsigned int max_if_num, - unsigned int del_if_num) -{ - size_t if_offset; - void *data_ptr; - - lockdep_assert_held(&orig_node->bat_iv.ogm_cnt_lock); - - data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC); - if (!data_ptr) - /* use old buffer when new one could not be allocated */ - data_ptr = orig_node->bat_iv.bcast_own_sum; - - memmove(data_ptr, orig_node->bat_iv.bcast_own_sum, - del_if_num * sizeof(u8)); - - if_offset = (del_if_num + 1) * sizeof(u8); - memmove((char *)data_ptr + del_if_num * sizeof(u8), - orig_node->bat_iv.bcast_own_sum + if_offset, - (max_if_num - del_if_num) * sizeof(u8)); - - /* bcast_own_sum was shrunk down in new buffer; free old one */ - if (orig_node->bat_iv.bcast_own_sum != data_ptr) { - kfree(orig_node->bat_iv.bcast_own_sum); - orig_node->bat_iv.bcast_own_sum = data_ptr; - } -} - -/** - * batadv_iv_ogm_orig_del_if() - change the private structures of the orig_node - * to exclude the removed interface - * @orig_node: the orig_node that has to be changed - * @max_if_num: the current amount of interfaces - * @del_if_num: the index of the interface being removed - * - * Return: 0 on success, a negative error code otherwise. - */ -static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node, - unsigned int max_if_num, - unsigned int del_if_num) -{ - spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); - - if (max_if_num == 0) { - kfree(orig_node->bat_iv.bcast_own); - kfree(orig_node->bat_iv.bcast_own_sum); - orig_node->bat_iv.bcast_own = NULL; - orig_node->bat_iv.bcast_own_sum = NULL; - } else { - batadv_iv_ogm_drop_bcast_own_entry(orig_node, max_if_num, - del_if_num); - batadv_iv_ogm_drop_bcast_own_sum_entry(orig_node, max_if_num, - del_if_num); - } - - spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); - - return 0; -} - -/** * batadv_iv_ogm_orig_get() - retrieve or create (if does not exist) an * originator * @bat_priv: the bat priv with all the soft interface information @@ -315,7 +152,6 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) { struct batadv_orig_node *orig_node; int hash_added; - size_t size; orig_node = batadv_orig_hash_find(bat_priv, addr); if (orig_node) @@ -327,16 +163,6 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) spin_lock_init(&orig_node->bat_iv.ogm_cnt_lock); - size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS; - orig_node->bat_iv.bcast_own = kzalloc(size, GFP_ATOMIC); - if (!orig_node->bat_iv.bcast_own) - goto free_orig_node; - - size = bat_priv->num_ifaces * sizeof(u8); - orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC); - if (!orig_node->bat_iv.bcast_own_sum) - goto free_orig_node; - kref_get(&orig_node->refcount); hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, batadv_choose_orig, orig_node, @@ -347,8 +173,9 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) return orig_node; free_orig_node_hash: + /* reference for batadv_hash_add */ batadv_orig_node_put(orig_node); -free_orig_node: + /* reference from batadv_orig_node_new */ batadv_orig_node_put(orig_node); return NULL; @@ -893,26 +720,30 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) struct batadv_hashtable *hash = bat_priv->orig_hash; struct hlist_head *head; struct batadv_orig_node *orig_node; + struct batadv_orig_ifinfo *orig_ifinfo; unsigned long *word; u32 i; - size_t word_index; u8 *w; - unsigned int if_num; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, head, hash_entry) { - spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); - word_index = hard_iface->if_num * BATADV_NUM_WORDS; - word = &orig_node->bat_iv.bcast_own[word_index]; - - batadv_bit_get_packet(bat_priv, word, 1, 0); - if_num = hard_iface->if_num; - w = &orig_node->bat_iv.bcast_own_sum[if_num]; - *w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE); - spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); + hlist_for_each_entry_rcu(orig_ifinfo, + &orig_node->ifinfo_list, + list) { + if (orig_ifinfo->if_outgoing != hard_iface) + continue; + + spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); + word = orig_ifinfo->bat_iv.bcast_own; + batadv_bit_get_packet(bat_priv, word, 1, 0); + w = &orig_ifinfo->bat_iv.bcast_own_sum; + *w = bitmap_weight(word, + BATADV_TQ_LOCAL_WINDOW_SIZE); + spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); + } } rcu_read_unlock(); } @@ -1000,6 +831,35 @@ out: } /** + * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over iterface + * @orig_node: originator which reproadcasted the OGMs directly + * @if_outgoing: interface which transmitted the original OGM and received the + * direct rebroadcast + * + * Return: Number of replied (rebroadcasted) OGMs which were transmitted by + * an originator and directly (without intermediate hop) received by a specific + * interface + */ +static u8 batadv_iv_orig_ifinfo_sum(struct batadv_orig_node *orig_node, + struct batadv_hard_iface *if_outgoing) +{ + struct batadv_orig_ifinfo *orig_ifinfo; + u8 sum; + + orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing); + if (!orig_ifinfo) + return 0; + + spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); + sum = orig_ifinfo->bat_iv.bcast_own_sum; + spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); + + batadv_orig_ifinfo_put(orig_ifinfo); + + return sum; +} + +/** * batadv_iv_ogm_orig_update() - use OGM to update corresponding data in an * originator * @bat_priv: the bat priv with all the soft interface information @@ -1026,8 +886,6 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, struct batadv_neigh_node *neigh_node = NULL; struct batadv_neigh_node *tmp_neigh_node = NULL; struct batadv_neigh_node *router = NULL; - struct batadv_orig_node *orig_node_tmp; - unsigned int if_num; u8 sum_orig, sum_neigh; u8 *neigh_addr; u8 tq_avg; @@ -1132,18 +990,10 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, */ if (router_ifinfo && neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg) { - orig_node_tmp = router->orig_node; - spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock); - if_num = router->if_incoming->if_num; - sum_orig = orig_node_tmp->bat_iv.bcast_own_sum[if_num]; - spin_unlock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock); - - orig_node_tmp = neigh_node->orig_node; - spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock); - if_num = neigh_node->if_incoming->if_num; - sum_neigh = orig_node_tmp->bat_iv.bcast_own_sum[if_num]; - spin_unlock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock); - + sum_orig = batadv_iv_orig_ifinfo_sum(router->orig_node, + router->if_incoming); + sum_neigh = batadv_iv_orig_ifinfo_sum(neigh_node->orig_node, + neigh_node->if_incoming); if (sum_orig >= sum_neigh) goto out; } @@ -1186,7 +1036,6 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, u8 total_count; u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; - unsigned int if_num; unsigned int tq_asym_penalty, inv_asym_penalty; unsigned int combined_tq; unsigned int tq_iface_penalty; @@ -1227,9 +1076,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, orig_node->last_seen = jiffies; /* find packet count of corresponding one hop neighbor */ - spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock); - if_num = if_incoming->if_num; - orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num]; + orig_eq_count = batadv_iv_orig_ifinfo_sum(orig_neigh_node, if_incoming); neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing); if (neigh_ifinfo) { neigh_rq_count = neigh_ifinfo->bat_iv.real_packet_count; @@ -1237,7 +1084,6 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, } else { neigh_rq_count = 0; } - spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock); /* pay attention to not get a value bigger than 100 % */ if (orig_eq_count > neigh_rq_count) @@ -1622,6 +1468,49 @@ out: } /** + * batadv_iv_ogm_process_reply() - Check OGM for direct reply and process it + * @ogm_packet: rebroadcast OGM packet to process + * @if_incoming: the interface where this packet was received + * @orig_node: originator which reproadcasted the OGMs + * @if_incoming_seqno: OGM sequence number when rebroadcast was received + */ +static void batadv_iv_ogm_process_reply(struct batadv_ogm_packet *ogm_packet, + struct batadv_hard_iface *if_incoming, + struct batadv_orig_node *orig_node, + u32 if_incoming_seqno) +{ + struct batadv_orig_ifinfo *orig_ifinfo; + s32 bit_pos; + u8 *weight; + + /* neighbor has to indicate direct link and it has to + * come via the corresponding interface + */ + if (!(ogm_packet->flags & BATADV_DIRECTLINK)) + return; + + if (!batadv_compare_eth(if_incoming->net_dev->dev_addr, + ogm_packet->orig)) + return; + + orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_incoming); + if (!orig_ifinfo) + return; + + /* save packet seqno for bidirectional check */ + spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); + bit_pos = if_incoming_seqno - 2; + bit_pos -= ntohl(ogm_packet->seqno); + batadv_set_bit(orig_ifinfo->bat_iv.bcast_own, bit_pos); + weight = &orig_ifinfo->bat_iv.bcast_own_sum; + *weight = bitmap_weight(orig_ifinfo->bat_iv.bcast_own, + BATADV_TQ_LOCAL_WINDOW_SIZE); + spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); + + batadv_orig_ifinfo_put(orig_ifinfo); +} + +/** * batadv_iv_ogm_process() - process an incoming batman iv OGM * @skb: the skb containing the OGM * @ogm_offset: offset to the OGM which should be processed (for aggregates) @@ -1705,37 +1594,13 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset, } if (is_my_orig) { - unsigned long *word; - size_t offset; - s32 bit_pos; - unsigned int if_num; - u8 *weight; - orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv, ethhdr->h_source); if (!orig_neigh_node) return; - /* neighbor has to indicate direct link and it has to - * come via the corresponding interface - * save packet seqno for bidirectional check - */ - if (has_directlink_flag && - batadv_compare_eth(if_incoming->net_dev->dev_addr, - ogm_packet->orig)) { - if_num = if_incoming->if_num; - offset = if_num * BATADV_NUM_WORDS; - - spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock); - word = &orig_neigh_node->bat_iv.bcast_own[offset]; - bit_pos = if_incoming_seqno - 2; - bit_pos -= ntohl(ogm_packet->seqno); - batadv_set_bit(word, bit_pos); - weight = &orig_neigh_node->bat_iv.bcast_own_sum[if_num]; - *weight = bitmap_weight(word, - BATADV_TQ_LOCAL_WINDOW_SIZE); - spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock); - } + batadv_iv_ogm_process_reply(ogm_packet, if_incoming, + orig_neigh_node, if_incoming_seqno); batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: originator packet from myself (via neighbor)\n"); @@ -2844,9 +2709,6 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = { .print = batadv_iv_ogm_orig_print, #endif .dump = batadv_iv_ogm_orig_dump, - .free = batadv_iv_ogm_orig_free, - .add_if = batadv_iv_ogm_orig_add_if, - .del_if = batadv_iv_ogm_orig_del_if, }, .gw = { .init_sel_class = batadv_iv_init_sel_class, diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c index 3cb82378300b..8b608a2e2653 100644 --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c @@ -47,8 +47,24 @@ static struct dentry *batadv_debugfs; +/** + * batadv_debugfs_deprecated() - Log use of deprecated batadv debugfs access + * @file: file which was accessed + * @alt: explanation what can be used as alternative + */ +void batadv_debugfs_deprecated(struct file *file, const char *alt) +{ + struct dentry *dentry = file_dentry(file); + const char *name = dentry->d_name.name; + + pr_warn_ratelimited(DEPRECATED "%s (pid %d) Use of debugfs file \"%s\".\n%s", + current->comm, task_pid_nr(current), name, alt); +} + static int batadv_algorithms_open(struct inode *inode, struct file *file) { + batadv_debugfs_deprecated(file, + "Use genl command BATADV_CMD_GET_ROUTING_ALGOS instead\n"); return single_open(file, batadv_algo_seq_print_text, NULL); } @@ -56,6 +72,8 @@ static int neighbors_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; + batadv_debugfs_deprecated(file, + "Use genl command BATADV_CMD_GET_NEIGHBORS instead\n"); return single_open(file, batadv_hardif_neigh_seq_print_text, net_dev); } @@ -63,6 +81,8 @@ static int batadv_originators_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; + batadv_debugfs_deprecated(file, + "Use genl command BATADV_CMD_GET_ORIGINATORS instead\n"); return single_open(file, batadv_orig_seq_print_text, net_dev); } @@ -79,6 +99,8 @@ static int batadv_originators_hardif_open(struct inode *inode, { struct net_device *net_dev = (struct net_device *)inode->i_private; + batadv_debugfs_deprecated(file, + "Use genl command BATADV_CMD_GET_HARDIFS instead\n"); return single_open(file, batadv_orig_hardif_seq_print_text, net_dev); } @@ -86,6 +108,8 @@ static int batadv_gateways_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; + batadv_debugfs_deprecated(file, + "Use genl command BATADV_CMD_GET_GATEWAYS instead\n"); return single_open(file, batadv_gw_client_seq_print_text, net_dev); } @@ -93,6 +117,8 @@ static int batadv_transtable_global_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; + batadv_debugfs_deprecated(file, + "Use genl command BATADV_CMD_GET_TRANSTABLE_GLOBAL instead\n"); return single_open(file, batadv_tt_global_seq_print_text, net_dev); } @@ -101,6 +127,8 @@ static int batadv_bla_claim_table_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; + batadv_debugfs_deprecated(file, + "Use genl command BATADV_CMD_GET_BLA_CLAIM instead\n"); return single_open(file, batadv_bla_claim_table_seq_print_text, net_dev); } @@ -110,6 +138,8 @@ static int batadv_bla_backbone_table_open(struct inode *inode, { struct net_device *net_dev = (struct net_device *)inode->i_private; + batadv_debugfs_deprecated(file, + "Use genl command BATADV_CMD_GET_BLA_BACKBONE instead\n"); return single_open(file, batadv_bla_backbone_table_seq_print_text, net_dev); } @@ -128,6 +158,8 @@ static int batadv_dat_cache_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; + batadv_debugfs_deprecated(file, + "Use genl command BATADV_CMD_GET_DAT_CACHE instead\n"); return single_open(file, batadv_dat_cache_seq_print_text, net_dev); } #endif @@ -136,6 +168,8 @@ static int batadv_transtable_local_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; + batadv_debugfs_deprecated(file, + "Use genl command BATADV_CMD_GET_TRANSTABLE_LOCAL instead\n"); return single_open(file, batadv_tt_local_seq_print_text, net_dev); } @@ -149,6 +183,7 @@ static int batadv_nc_nodes_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; + batadv_debugfs_deprecated(file, ""); return single_open(file, batadv_nc_nodes_seq_print_text, net_dev); } #endif @@ -165,6 +200,8 @@ static int batadv_mcast_flags_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; + batadv_debugfs_deprecated(file, + "Use genl command BATADV_CMD_GET_MCAST_FLAGS instead\n"); return single_open(file, batadv_mcast_flags_seq_print_text, net_dev); } #endif diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h index 08a592ffbee5..8de018e5c577 100644 --- a/net/batman-adv/debugfs.h +++ b/net/batman-adv/debugfs.h @@ -21,12 +21,14 @@ #include "main.h" +struct file; struct net_device; #define BATADV_DEBUGFS_SUBDIR "batman_adv" #if IS_ENABLED(CONFIG_BATMAN_ADV_DEBUGFS) +void batadv_debugfs_deprecated(struct file *file, const char *alt); void batadv_debugfs_init(void); void batadv_debugfs_destroy(void); int batadv_debugfs_add_meshif(struct net_device *dev); @@ -38,6 +40,10 @@ void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface); #else +static inline void batadv_debugfs_deprecated(struct file *file, const char *alt) +{ +} + static inline void batadv_debugfs_init(void) { } diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 2f0d42f2f913..781c5b6e6e8e 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -763,11 +763,6 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, hard_iface->soft_iface = soft_iface; bat_priv = netdev_priv(hard_iface->soft_iface); - if (bat_priv->num_ifaces >= UINT_MAX) { - ret = -ENOSPC; - goto err_dev; - } - ret = netdev_master_upper_dev_link(hard_iface->net_dev, soft_iface, NULL, NULL, NULL); if (ret) @@ -777,16 +772,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, if (ret < 0) goto err_upper; - hard_iface->if_num = bat_priv->num_ifaces; - bat_priv->num_ifaces++; hard_iface->if_status = BATADV_IF_INACTIVE; - ret = batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces); - if (ret < 0) { - bat_priv->algo_ops->iface.disable(hard_iface); - bat_priv->num_ifaces--; - hard_iface->if_status = BATADV_IF_NOT_IN_USE; - goto err_upper; - } kref_get(&hard_iface->refcount); hard_iface->batman_adv_ptype.type = ethertype; @@ -834,6 +820,33 @@ err: } /** + * batadv_hardif_cnt() - get number of interfaces enslaved to soft interface + * @soft_iface: soft interface to check + * + * This function is only using RCU for locking - the result can therefore be + * off when another functions is modifying the list at the same time. The + * caller can use the rtnl_lock to make sure that the count is accurate. + * + * Return: number of connected/enslaved hard interfaces + */ +static size_t batadv_hardif_cnt(const struct net_device *soft_iface) +{ + struct batadv_hard_iface *hard_iface; + size_t count = 0; + + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { + if (hard_iface->soft_iface != soft_iface) + continue; + + count++; + } + rcu_read_unlock(); + + return count; +} + +/** * batadv_hardif_disable_interface() - Remove hard interface from soft interface * @hard_iface: hard interface to be removed * @autodel: whether to delete soft interface when it doesn't contain any other @@ -855,9 +868,6 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface, dev_remove_pack(&hard_iface->batman_adv_ptype); batadv_hardif_put(hard_iface); - bat_priv->num_ifaces--; - batadv_orig_hash_del_if(hard_iface, bat_priv->num_ifaces); - primary_if = batadv_primary_if_get_selected(bat_priv); if (hard_iface == primary_if) { struct batadv_hard_iface *new_if; @@ -881,7 +891,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface, batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface); /* nobody uses this interface anymore */ - if (bat_priv->num_ifaces == 0) { + if (batadv_hardif_cnt(hard_iface->soft_iface) <= 1) { batadv_gw_check_client_stop(bat_priv); if (autodel == BATADV_IF_CLEANUP_AUTO) @@ -917,7 +927,6 @@ batadv_hardif_add_interface(struct net_device *net_dev) if (ret) goto free_if; - hard_iface->if_num = 0; hard_iface->net_dev = net_dev; hard_iface->soft_iface = NULL; hard_iface->if_status = BATADV_IF_NOT_IN_USE; diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index 55c358ad3331..d70f363c52ae 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c @@ -47,6 +47,7 @@ #include <linux/wait.h> #include <uapi/linux/batadv_packet.h> +#include "debugfs.h" #include "hard-interface.h" #include "log.h" #include "originator.h" @@ -74,6 +75,8 @@ static int batadv_socket_open(struct inode *inode, struct file *file) if (!try_module_get(THIS_MODULE)) return -EBUSY; + batadv_debugfs_deprecated(file, ""); + nonseekable_open(inode, file); socket_client = kmalloc(sizeof(*socket_client), GFP_KERNEL); diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c index 853773e45f79..6beb5f067810 100644 --- a/net/batman-adv/log.c +++ b/net/batman-adv/log.c @@ -40,6 +40,9 @@ #include <linux/wait.h> #include <stdarg.h> +#include "debugfs.h" +#include "trace.h" + #define BATADV_LOG_BUFF_MASK (batadv_log_buff_len - 1) static const int batadv_log_buff_len = BATADV_LOG_BUF_LEN; @@ -98,13 +101,19 @@ static int batadv_fdebug_log(struct batadv_priv_debug_log *debug_log, */ int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...) { + struct va_format vaf; va_list args; - char tmp_log_buf[256]; va_start(args, fmt); - vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args); - batadv_fdebug_log(bat_priv->debug_log, "[%10u] %s", - jiffies_to_msecs(jiffies), tmp_log_buf); + + vaf.fmt = fmt; + vaf.va = &args; + + batadv_fdebug_log(bat_priv->debug_log, "[%10u] %pV", + jiffies_to_msecs(jiffies), &vaf); + + trace_batadv_dbg(bat_priv, &vaf); + va_end(args); return 0; @@ -115,6 +124,9 @@ static int batadv_log_open(struct inode *inode, struct file *file) if (!try_module_get(THIS_MODULE)) return -EBUSY; + batadv_debugfs_deprecated(file, + "Use tracepoint batadv:batadv_dbg instead\n"); + nonseekable_open(inode, file); file->private_data = inode->i_private; return 0; diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 3ccc75ee719c..2002b70e18db 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -25,7 +25,7 @@ #define BATADV_DRIVER_DEVICE "batman-adv" #ifndef BATADV_SOURCE_VERSION -#define BATADV_SOURCE_VERSION "2018.3" +#define BATADV_SOURCE_VERSION "2018.4" #endif /* B.A.T.M.A.N. parameters */ diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 1d295da3e342..56a981af5c92 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -904,9 +904,6 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu) batadv_frag_purge_orig(orig_node, NULL); - if (orig_node->bat_priv->algo_ops->orig.free) - orig_node->bat_priv->algo_ops->orig.free(orig_node); - kfree(orig_node->tt_buff); kfree(orig_node); } @@ -1555,107 +1552,3 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb) return ret; } - -/** - * batadv_orig_hash_add_if() - Add interface to originators in orig_hash - * @hard_iface: hard interface to add (already slave of the soft interface) - * @max_if_num: new number of interfaces - * - * Return: 0 on success or negative error number in case of failure - */ -int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, - unsigned int max_if_num) -{ - struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - struct batadv_algo_ops *bao = bat_priv->algo_ops; - struct batadv_hashtable *hash = bat_priv->orig_hash; - struct hlist_head *head; - struct batadv_orig_node *orig_node; - u32 i; - int ret; - - /* resize all orig nodes because orig_node->bcast_own(_sum) depend on - * if_num - */ - for (i = 0; i < hash->size; i++) { - head = &hash->table[i]; - - rcu_read_lock(); - hlist_for_each_entry_rcu(orig_node, head, hash_entry) { - ret = 0; - if (bao->orig.add_if) - ret = bao->orig.add_if(orig_node, max_if_num); - if (ret == -ENOMEM) - goto err; - } - rcu_read_unlock(); - } - - return 0; - -err: - rcu_read_unlock(); - return -ENOMEM; -} - -/** - * batadv_orig_hash_del_if() - Remove interface from originators in orig_hash - * @hard_iface: hard interface to remove (still slave of the soft interface) - * @max_if_num: new number of interfaces - * - * Return: 0 on success or negative error number in case of failure - */ -int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, - unsigned int max_if_num) -{ - struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - struct batadv_hashtable *hash = bat_priv->orig_hash; - struct hlist_head *head; - struct batadv_hard_iface *hard_iface_tmp; - struct batadv_orig_node *orig_node; - struct batadv_algo_ops *bao = bat_priv->algo_ops; - u32 i; - int ret; - - /* resize all orig nodes because orig_node->bcast_own(_sum) depend on - * if_num - */ - for (i = 0; i < hash->size; i++) { - head = &hash->table[i]; - - rcu_read_lock(); - hlist_for_each_entry_rcu(orig_node, head, hash_entry) { - ret = 0; - if (bao->orig.del_if) - ret = bao->orig.del_if(orig_node, max_if_num, - hard_iface->if_num); - if (ret == -ENOMEM) - goto err; - } - rcu_read_unlock(); - } - - /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ - rcu_read_lock(); - list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) { - if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE) - continue; - - if (hard_iface == hard_iface_tmp) - continue; - - if (hard_iface->soft_iface != hard_iface_tmp->soft_iface) - continue; - - if (hard_iface_tmp->if_num > hard_iface->if_num) - hard_iface_tmp->if_num--; - } - rcu_read_unlock(); - - hard_iface->if_num = -1; - return 0; - -err: - rcu_read_unlock(); - return -ENOMEM; -} diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 3b3f59b881e1..a8b4c7b667ec 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h @@ -72,10 +72,6 @@ void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo); int batadv_orig_seq_print_text(struct seq_file *seq, void *offset); int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb); int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset); -int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, - unsigned int max_if_num); -int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, - unsigned int max_if_num); struct batadv_orig_node_vlan * batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, unsigned short vid); diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 626ddca332db..5db5a0a4c959 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -844,7 +844,6 @@ static int batadv_softif_init_late(struct net_device *dev) atomic_set(&bat_priv->frag_seqno, random_seqno); bat_priv->primary_if = NULL; - bat_priv->num_ifaces = 0; batadv_nc_init_bat_priv(bat_priv); @@ -1062,6 +1061,7 @@ static void batadv_softif_init_early(struct net_device *dev) dev->needs_free_netdev = true; dev->priv_destructor = batadv_softif_free; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL; + dev->features |= NETIF_F_LLTX; dev->priv_flags |= IFF_NO_QUEUE; /* can't call min_mtu, because the needed variables diff --git a/net/batman-adv/trace.c b/net/batman-adv/trace.c new file mode 100644 index 000000000000..3d57f9981f25 --- /dev/null +++ b/net/batman-adv/trace.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2010-2018 B.A.T.M.A.N. contributors: + * + * Sven Eckelmann + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/module.h> + +#define CREATE_TRACE_POINTS +#include "trace.h" diff --git a/net/batman-adv/trace.h b/net/batman-adv/trace.h new file mode 100644 index 000000000000..3acda26a30ca --- /dev/null +++ b/net/batman-adv/trace.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2010-2018 B.A.T.M.A.N. contributors: + * + * Sven Eckelmann + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#if !defined(_NET_BATMAN_ADV_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _NET_BATMAN_ADV_TRACE_H_ + +#include "main.h" + +#include <linux/tracepoint.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM batadv + +/* provide dummy function when tracing is disabled */ +#if !defined(CONFIG_BATMAN_ADV_TRACING) + +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) \ + static inline void trace_ ## name(proto) {} + +#endif /* CONFIG_BATMAN_ADV_TRACING */ + +#define BATADV_MAX_MSG_LEN 256 + +TRACE_EVENT(batadv_dbg, + + TP_PROTO(struct batadv_priv *bat_priv, + struct va_format *vaf), + + TP_ARGS(bat_priv, vaf), + + TP_STRUCT__entry( + __string(device, bat_priv->soft_iface->name) + __string(driver, KBUILD_MODNAME) + __dynamic_array(char, msg, BATADV_MAX_MSG_LEN) + ), + + TP_fast_assign( + __assign_str(device, bat_priv->soft_iface->name); + __assign_str(driver, KBUILD_MODNAME); + WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), + BATADV_MAX_MSG_LEN, + vaf->fmt, + *vaf->va) >= BATADV_MAX_MSG_LEN); + ), + + TP_printk( + "%s %s %s", + __get_str(driver), + __get_str(device), + __get_str(msg) + ) +); + +#endif /* _NET_BATMAN_ADV_TRACE_H_ || TRACE_HEADER_MULTI_READ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 343d304851a5..45b5592de816 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -167,9 +167,6 @@ struct batadv_hard_iface { /** @list: list node for batadv_hardif_list */ struct list_head list; - /** @if_num: identificator of the interface */ - unsigned int if_num; - /** @if_status: status of the interface for batman-adv */ char if_status; @@ -233,6 +230,20 @@ struct batadv_hard_iface { }; /** + * struct batadv_orig_ifinfo - B.A.T.M.A.N. IV private orig_ifinfo members + */ +struct batadv_orig_ifinfo_bat_iv { + /** + * @bcast_own: bitfield which counts the number of our OGMs this + * orig_node rebroadcasted "back" to us (relative to last_real_seqno) + */ + DECLARE_BITMAP(bcast_own, BATADV_TQ_LOCAL_WINDOW_SIZE); + + /** @bcast_own_sum: sum of bcast_own */ + u8 bcast_own_sum; +}; + +/** * struct batadv_orig_ifinfo - originator info per outgoing interface */ struct batadv_orig_ifinfo { @@ -257,6 +268,9 @@ struct batadv_orig_ifinfo { /** @batman_seqno_reset: time when the batman seqno window was reset */ unsigned long batman_seqno_reset; + /** @bat_iv: B.A.T.M.A.N. IV private structure */ + struct batadv_orig_ifinfo_bat_iv bat_iv; + /** @refcount: number of contexts the object is used */ struct kref refcount; @@ -339,19 +353,10 @@ struct batadv_orig_node_vlan { */ struct batadv_orig_bat_iv { /** - * @bcast_own: set of bitfields (one per hard-interface) where each one - * counts the number of our OGMs this orig_node rebroadcasted "back" to - * us (relative to last_real_seqno). Every bitfield is - * BATADV_TQ_LOCAL_WINDOW_SIZE bits long. - */ - unsigned long *bcast_own; - - /** @bcast_own_sum: sum of bcast_own */ - u8 *bcast_own_sum; - - /** - * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum, - * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count + * @ogm_cnt_lock: lock protecting &batadv_orig_ifinfo_bat_iv.bcast_own, + * &batadv_orig_ifinfo_bat_iv.bcast_own_sum, + * &batadv_neigh_ifinfo_bat_iv.bat_iv.real_bits and + * &batadv_neigh_ifinfo_bat_iv.real_packet_count */ spinlock_t ogm_cnt_lock; }; @@ -1597,9 +1602,6 @@ struct batadv_priv { /** @batman_queue_left: number of remaining OGM packet slots */ atomic_t batman_queue_left; - /** @num_ifaces: number of interfaces assigned to this mesh interface */ - unsigned int num_ifaces; - /** @mesh_obj: kobject for sysfs mesh subdirectory */ struct kobject *mesh_obj; @@ -2179,28 +2181,6 @@ struct batadv_algo_neigh_ops { * struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific) */ struct batadv_algo_orig_ops { - /** - * @free: free the resources allocated by the routing algorithm for an - * orig_node object (optional) - */ - void (*free)(struct batadv_orig_node *orig_node); - - /** - * @add_if: ask the routing algorithm to apply the needed changes to the - * orig_node due to a new hard-interface being added into the mesh - * (optional) - */ - int (*add_if)(struct batadv_orig_node *orig_node, - unsigned int max_if_num); - - /** - * @del_if: ask the routing algorithm to apply the needed changes to the - * orig_node due to an hard-interface being removed from the mesh - * (optional) - */ - int (*del_if)(struct batadv_orig_node *orig_node, - unsigned int max_if_num, unsigned int del_if_num); - #ifdef CONFIG_BATMAN_ADV_DEBUGFS /** @print: print the originator table (optional) */ void (*print)(struct batadv_priv *priv, struct seq_file *seq, diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 7b3965861013..43c284158f63 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -489,9 +489,6 @@ static int bnep_session(void *arg) add_wait_queue(sk_sleep(sk), &wait); while (1) { - /* Ensure session->terminate is updated */ - smp_mb__before_atomic(); - if (atomic_read(&s->terminate)) break; /* RX */ @@ -512,6 +509,10 @@ static int bnep_session(void *arg) break; netif_wake_queue(dev); + /* + * wait_woken() performs the necessary memory barriers + * for us; see the header comment for this primitive. + */ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } remove_wait_queue(sk_sleep(sk), &wait); diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 7f26a5a19ff6..07cfa3249f83 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -288,9 +288,6 @@ static int cmtp_session(void *arg) add_wait_queue(sk_sleep(sk), &wait); while (1) { - /* Ensure session->terminate is updated */ - smp_mb__before_atomic(); - if (atomic_read(&session->terminate)) break; if (sk->sk_state != BT_CONNECTED) @@ -306,6 +303,10 @@ static int cmtp_session(void *arg) cmtp_process_transmit(session); + /* + * wait_woken() performs the necessary memory barriers + * for us; see the header comment for this primitive. + */ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } remove_wait_queue(sk_sleep(sk), &wait); @@ -431,9 +432,10 @@ int cmtp_del_connection(struct cmtp_conndel_req *req) /* Stop session thread */ atomic_inc(&session->terminate); - /* Ensure session->terminate is updated */ - smp_mb__after_atomic(); - + /* + * See the comment preceding the call to wait_woken() + * in cmtp_session(). + */ wake_up_interruptible(sk_sleep(session->sock->sk)); } else err = -ENOENT; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 74b29c7d841c..7352fe85674b 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2839,6 +2839,20 @@ struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, return NULL; } +struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( + struct list_head *bdaddr_list, bdaddr_t *bdaddr, + u8 type) +{ + struct bdaddr_list_with_irk *b; + + list_for_each_entry(b, bdaddr_list, list) { + if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) + return b; + } + + return NULL; +} + void hci_bdaddr_list_clear(struct list_head *bdaddr_list) { struct bdaddr_list *b, *n; @@ -2871,6 +2885,35 @@ int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type) return 0; } +int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, + u8 type, u8 *peer_irk, u8 *local_irk) +{ + struct bdaddr_list_with_irk *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) + return -EBADF; + + if (hci_bdaddr_list_lookup(list, bdaddr, type)) + return -EEXIST; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + bacpy(&entry->bdaddr, bdaddr); + entry->bdaddr_type = type; + + if (peer_irk) + memcpy(entry->peer_irk, peer_irk, 16); + + if (local_irk) + memcpy(entry->local_irk, local_irk, 16); + + list_add(&entry->list, list); + + return 0; +} + int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *entry; @@ -2890,6 +2933,26 @@ int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) return 0; } +int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, + u8 type) +{ + struct bdaddr_list_with_irk *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) { + hci_bdaddr_list_clear(list); + return 0; + } + + entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type); + if (!entry) + return -ENOENT; + + list_del(&entry->list); + kfree(entry); + + return 0; +} + /* This function requires the caller holds hdev->lock */ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) @@ -3084,6 +3147,8 @@ struct hci_dev *hci_alloc_dev(void) hdev->le_max_tx_time = 0x0148; hdev->le_max_rx_len = 0x001b; hdev->le_max_rx_time = 0x0148; + hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE; + hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE; hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index f12555f23a49..f47f8fad757a 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1454,6 +1454,45 @@ static void hci_cc_le_write_def_data_len(struct hci_dev *hdev, hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); } +static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_cp_le_add_to_resolv_list *sent; + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + if (status) + return; + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); + if (!sent) + return; + + hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, + sent->bdaddr_type, sent->peer_irk, + sent->local_irk); +} + +static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_cp_le_del_from_resolv_list *sent; + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + if (status) + return; + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); + if (!sent) + return; + + hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, + sent->bdaddr_type); +} + static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev, struct sk_buff *skb) { @@ -3279,6 +3318,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_cc_le_write_def_data_len(hdev, skb); break; + case HCI_OP_LE_ADD_TO_RESOLV_LIST: + hci_cc_le_add_to_resolv_list(hdev, skb); + break; + + case HCI_OP_LE_DEL_FROM_RESOLV_LIST: + hci_cc_le_del_from_resolv_list(hdev, skb); + break; + case HCI_OP_LE_CLEAR_RESOLV_LIST: hci_cc_le_clear_resolv_list(hdev, skb); break; diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 253975cce943..3734dc1788b4 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -1074,6 +1074,10 @@ static int hidp_session_start_sync(struct hidp_session *session) static void hidp_session_terminate(struct hidp_session *session) { atomic_inc(&session->terminate); + /* + * See the comment preceding the call to wait_woken() + * in hidp_session_run(). + */ wake_up_interruptible(&hidp_session_wq); } @@ -1193,8 +1197,6 @@ static void hidp_session_run(struct hidp_session *session) * thread is woken up by ->sk_state_changed(). */ - /* Ensure session->terminate is updated */ - smp_mb__before_atomic(); if (atomic_read(&session->terminate)) break; @@ -1228,14 +1230,15 @@ static void hidp_session_run(struct hidp_session *session) hidp_process_transmit(session, &session->ctrl_transmit, session->ctrl_sock); + /* + * wait_woken() performs the necessary memory barriers + * for us; see the header comment for this primitive. + */ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } remove_wait_queue(&hidp_session_wq, &wait); atomic_inc(&session->terminate); - - /* Ensure session->terminate is updated */ - smp_mb__after_atomic(); } static int hidp_session_wake_function(wait_queue_entry_t *wait, diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index d17a4736e47c..514899f7f0d4 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -51,9 +51,6 @@ static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD; static LIST_HEAD(chan_list); static DEFINE_RWLOCK(chan_list_lock); -static u16 le_max_credits = L2CAP_LE_MAX_CREDITS; -static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS; - static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, u8 ident, u16 dlen, void *data); static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, @@ -519,8 +516,10 @@ static void l2cap_le_flowctl_init(struct l2cap_chan *chan) chan->sdu_last_frag = NULL; chan->sdu_len = 0; chan->tx_credits = 0; - chan->rx_credits = le_max_credits; - chan->mps = min_t(u16, chan->imtu, le_default_mps); + /* Derive MPS from connection MTU to stop HCI fragmentation */ + chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE); + /* Give enough credits for a full packet */ + chan->rx_credits = (chan->imtu / chan->mps) + 1; skb_queue_head_init(&chan->tx_q); } @@ -1282,6 +1281,8 @@ static void l2cap_le_connect(struct l2cap_chan *chan) if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags)) return; + l2cap_le_flowctl_init(chan); + req.psm = chan->psm; req.scid = cpu_to_le16(chan->scid); req.mtu = cpu_to_le16(chan->imtu); @@ -5493,8 +5494,6 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn, goto response_unlock; } - l2cap_le_flowctl_init(chan); - bacpy(&chan->src, &conn->hcon->src); bacpy(&chan->dst, &conn->hcon->dst); chan->src_type = bdaddr_src_type(conn->hcon); @@ -5506,6 +5505,9 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn, chan->tx_credits = __le16_to_cpu(req->credits); __l2cap_chan_add(conn, chan); + + l2cap_le_flowctl_init(chan); + dcid = chan->scid; credits = chan->rx_credits; @@ -6699,13 +6701,10 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan) struct l2cap_le_credits pkt; u16 return_credits; - /* We return more credits to the sender only after the amount of - * credits falls below half of the initial amount. - */ - if (chan->rx_credits >= (le_max_credits + 1) / 2) - return; + return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits; - return_credits = le_max_credits - chan->rx_credits; + if (!return_credits) + return; BT_DBG("chan %p returning %u credits to sender", chan, return_credits); @@ -6719,6 +6718,21 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan) l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt); } +static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb) +{ + int err; + + BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len); + + /* Wait recv to confirm reception before updating the credits */ + err = chan->ops->recv(chan, skb); + + /* Update credits whenever an SDU is received */ + l2cap_chan_le_send_credits(chan); + + return err; +} + static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) { int err; @@ -6737,7 +6751,11 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) chan->rx_credits--; BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits); - l2cap_chan_le_send_credits(chan); + /* Update if remote had run out of credits, this should only happens + * if the remote is not using the entire MPS. + */ + if (!chan->rx_credits) + l2cap_chan_le_send_credits(chan); err = 0; @@ -6763,12 +6781,22 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) } if (skb->len == sdu_len) - return chan->ops->recv(chan, skb); + return l2cap_le_recv(chan, skb); chan->sdu = skb; chan->sdu_len = sdu_len; chan->sdu_last_frag = skb; + /* Detect if remote is not able to use the selected MPS */ + if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) { + u16 mps_len = skb->len + L2CAP_SDULEN_SIZE; + + /* Adjust the number of credits */ + BT_DBG("chan->mps %u -> %u", chan->mps, mps_len); + chan->mps = mps_len; + l2cap_chan_le_send_credits(chan); + } + return 0; } @@ -6785,7 +6813,7 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) skb = NULL; if (chan->sdu->len == chan->sdu_len) { - err = chan->ops->recv(chan, chan->sdu); + err = l2cap_le_recv(chan, chan->sdu); if (!err) { chan->sdu = NULL; chan->sdu_last_frag = NULL; @@ -7102,7 +7130,6 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, case L2CAP_MODE_BASIC: break; case L2CAP_MODE_LE_FLOWCTL: - l2cap_le_flowctl_init(chan); break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: @@ -7645,11 +7672,6 @@ int __init l2cap_init(void) l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, NULL, &l2cap_debugfs_fops); - debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs, - &le_max_credits); - debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs, - &le_default_mps); - return 0; } diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 73f7211d0431..a1c1b7e8a45c 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -88,9 +88,6 @@ struct smp_dev { u8 local_rand[16]; bool debug_key; - u8 min_key_size; - u8 max_key_size; - struct crypto_cipher *tfm_aes; struct crypto_shash *tfm_cmac; struct crypto_kpp *tfm_ecdh; @@ -720,7 +717,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn, if (rsp == NULL) { req->io_capability = conn->hcon->io_capability; req->oob_flag = oob_flag; - req->max_key_size = SMP_DEV(hdev)->max_key_size; + req->max_key_size = hdev->le_max_key_size; req->init_key_dist = local_dist; req->resp_key_dist = remote_dist; req->auth_req = (authreq & AUTH_REQ_MASK(hdev)); @@ -731,7 +728,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn, rsp->io_capability = conn->hcon->io_capability; rsp->oob_flag = oob_flag; - rsp->max_key_size = SMP_DEV(hdev)->max_key_size; + rsp->max_key_size = hdev->le_max_key_size; rsp->init_key_dist = req->init_key_dist & remote_dist; rsp->resp_key_dist = req->resp_key_dist & local_dist; rsp->auth_req = (authreq & AUTH_REQ_MASK(hdev)); @@ -745,7 +742,7 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) struct hci_dev *hdev = conn->hcon->hdev; struct smp_chan *smp = chan->data; - if (max_key_size > SMP_DEV(hdev)->max_key_size || + if (max_key_size > hdev->le_max_key_size || max_key_size < SMP_MIN_ENC_KEY_SIZE) return SMP_ENC_KEY_SIZE; @@ -3264,8 +3261,6 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid) smp->tfm_aes = tfm_aes; smp->tfm_cmac = tfm_cmac; smp->tfm_ecdh = tfm_ecdh; - smp->min_key_size = SMP_MIN_ENC_KEY_SIZE; - smp->max_key_size = SMP_MAX_ENC_KEY_SIZE; create_chan: chan = l2cap_chan_create(); @@ -3391,7 +3386,7 @@ static ssize_t le_min_key_size_read(struct file *file, struct hci_dev *hdev = file->private_data; char buf[4]; - snprintf(buf, sizeof(buf), "%2u\n", SMP_DEV(hdev)->min_key_size); + snprintf(buf, sizeof(buf), "%2u\n", hdev->le_min_key_size); return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); } @@ -3412,11 +3407,11 @@ static ssize_t le_min_key_size_write(struct file *file, sscanf(buf, "%hhu", &key_size); - if (key_size > SMP_DEV(hdev)->max_key_size || + if (key_size > hdev->le_max_key_size || key_size < SMP_MIN_ENC_KEY_SIZE) return -EINVAL; - SMP_DEV(hdev)->min_key_size = key_size; + hdev->le_min_key_size = key_size; return count; } @@ -3435,7 +3430,7 @@ static ssize_t le_max_key_size_read(struct file *file, struct hci_dev *hdev = file->private_data; char buf[4]; - snprintf(buf, sizeof(buf), "%2u\n", SMP_DEV(hdev)->max_key_size); + snprintf(buf, sizeof(buf), "%2u\n", hdev->le_max_key_size); return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); } @@ -3457,10 +3452,10 @@ static ssize_t le_max_key_size_write(struct file *file, sscanf(buf, "%hhu", &key_size); if (key_size > SMP_MAX_ENC_KEY_SIZE || - key_size < SMP_DEV(hdev)->min_key_size) + key_size < hdev->le_min_key_size) return -EINVAL; - SMP_DEV(hdev)->max_key_size = key_size; + hdev->le_max_key_size = key_size; return count; } diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index f4078830ea50..0c423b8cd75c 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -12,7 +12,7 @@ #include <linux/sched/signal.h> static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, - struct bpf_cgroup_storage *storage) + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { u32 ret; @@ -28,13 +28,20 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) { - struct bpf_cgroup_storage *storage = NULL; + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; + enum bpf_cgroup_storage_type stype; u64 time_start, time_spent = 0; u32 ret = 0, i; - storage = bpf_cgroup_storage_alloc(prog); - if (IS_ERR(storage)) - return PTR_ERR(storage); + for_each_cgroup_storage_type(stype) { + storage[stype] = bpf_cgroup_storage_alloc(prog, stype); + if (IS_ERR(storage[stype])) { + storage[stype] = NULL; + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_free(storage[stype]); + return -ENOMEM; + } + } if (!repeat) repeat = 1; @@ -53,7 +60,8 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) do_div(time_spent, repeat); *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; - bpf_cgroup_storage_free(storage); + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_free(storage[stype]); return ret; } diff --git a/net/bridge/br.c b/net/bridge/br.c index b0a0b82e2d91..e411e40333e2 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c @@ -175,6 +175,22 @@ static struct notifier_block br_switchdev_notifier = { .notifier_call = br_switchdev_event, }; +void br_opt_toggle(struct net_bridge *br, enum net_bridge_opts opt, bool on) +{ + bool cur = !!br_opt_get(br, opt); + + br_debug(br, "toggle option: %d state: %d -> %d\n", + opt, cur, on); + + if (cur == on) + return; + + if (on) + set_bit(opt, &br->options); + else + clear_bit(opt, &br->options); +} + static void __net_exit br_net_exit(struct net *net) { struct net_device *dev; diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c index 2cf7716254be..6b78e6351719 100644 --- a/net/bridge/br_arp_nd_proxy.c +++ b/net/bridge/br_arp_nd_proxy.c @@ -39,7 +39,7 @@ void br_recalculate_neigh_suppress_enabled(struct net_bridge *br) } } - br->neigh_suppress_enabled = neigh_suppress; + br_opt_toggle(br, BROPT_NEIGH_SUPPRESS_ENABLED, neigh_suppress); } #if IS_ENABLED(CONFIG_INET) @@ -155,7 +155,7 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br, ipv4_is_multicast(tip)) return; - if (br->neigh_suppress_enabled) { + if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) { if (p && (p->flags & BR_NEIGH_SUPPRESS)) return; if (ipv4_is_zeronet(sip) || sip == tip) { @@ -175,7 +175,8 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br, return; } - if (br->neigh_suppress_enabled && br_is_local_ip(vlandev, tip)) { + if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) && + br_is_local_ip(vlandev, tip)) { /* its our local ip, so don't proxy reply * and don't forward to neigh suppress ports */ @@ -213,7 +214,8 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br, /* If we have replied or as long as we know the * mac, indicate to arp replied */ - if (replied || br->neigh_suppress_enabled) + if (replied || + br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) BR_INPUT_SKB_CB(skb)->proxyarp_replied = true; } @@ -311,7 +313,7 @@ static void br_nd_send(struct net_bridge *br, struct net_bridge_port *p, /* Neighbor Advertisement */ memset(na, 0, sizeof(*na) + na_olen); na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; - na->icmph.icmp6_router = 0; /* XXX: should be 1 ? */ + na->icmph.icmp6_router = (n->flags & NTF_ROUTER) ? 1 : 0; na->icmph.icmp6_override = 1; na->icmph.icmp6_solicited = 1; na->target = ns->target; @@ -460,7 +462,8 @@ void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br, * mac, indicate to NEIGH_SUPPRESS ports that we * have replied */ - if (replied || br->neigh_suppress_enabled) + if (replied || + br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) BR_INPUT_SKB_CB(skb)->proxyarp_replied = true; } neigh_release(n); diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index e682a668ce57..e053a4e43758 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -67,11 +67,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) if (IS_ENABLED(CONFIG_INET) && (eth->h_proto == htons(ETH_P_ARP) || eth->h_proto == htons(ETH_P_RARP)) && - br->neigh_suppress_enabled) { + br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) { br_do_proxy_suppress_arp(skb, br, vid, NULL); } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6) && - br->neigh_suppress_enabled && + br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) && pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)) && ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { @@ -228,7 +228,7 @@ static int br_change_mtu(struct net_device *dev, int new_mtu) dev->mtu = new_mtu; /* this flag will be cleared if the MTU was automatically adjusted */ - br->mtu_set_by_user = true; + br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) /* remember the MTU in the rtable for PMTU */ dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu); diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 502f66349530..74331690a390 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -504,6 +504,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br, fdb->added_by_user = 0; fdb->added_by_external_learn = 0; fdb->offloaded = 0; + fdb->is_sticky = 0; fdb->updated = fdb->used = jiffies; if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl, &fdb->rhnode, @@ -584,7 +585,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, unsigned long now = jiffies; /* fastpath: update of existing entry */ - if (unlikely(source != fdb->dst)) { + if (unlikely(source != fdb->dst && !fdb->is_sticky)) { fdb->dst = source; fdb_modified = true; /* Take over HW learned entry */ @@ -656,6 +657,8 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, ndm->ndm_flags |= NTF_OFFLOADED; if (fdb->added_by_external_learn) ndm->ndm_flags |= NTF_EXT_LEARNED; + if (fdb->is_sticky) + ndm->ndm_flags |= NTF_STICKY; if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr)) goto nla_put_failure; @@ -772,8 +775,10 @@ skip: /* Update (create or replace) forwarding database entry */ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, - const __u8 *addr, __u16 state, __u16 flags, __u16 vid) + const u8 *addr, u16 state, u16 flags, u16 vid, + u8 ndm_flags) { + u8 is_sticky = !!(ndm_flags & NTF_STICKY); struct net_bridge_fdb_entry *fdb; bool modified = false; @@ -789,6 +794,9 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, return -EINVAL; } + if (is_sticky && (state & NUD_PERMANENT)) + return -EINVAL; + fdb = br_fdb_find(br, addr, vid); if (fdb == NULL) { if (!(flags & NLM_F_CREATE)) @@ -832,6 +840,12 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, modified = true; } + + if (is_sticky != fdb->is_sticky) { + fdb->is_sticky = is_sticky; + modified = true; + } + fdb->added_by_user = 1; fdb->used = jiffies; @@ -865,7 +879,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, } else { spin_lock_bh(&br->hash_lock); err = fdb_add_entry(br, p, addr, ndm->ndm_state, - nlh_flags, vid); + nlh_flags, vid, ndm->ndm_flags); spin_unlock_bh(&br->hash_lock); } diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 0363f1bdc401..9b46d2dc4c22 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -394,8 +394,7 @@ static int find_portno(struct net_bridge *br) struct net_bridge_port *p; unsigned long *inuse; - inuse = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long), - GFP_KERNEL); + inuse = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL); if (!inuse) return -ENOMEM; @@ -404,7 +403,7 @@ static int find_portno(struct net_bridge *br) set_bit(p->port_no, inuse); } index = find_first_zero_bit(inuse, BR_MAX_PORTS); - kfree(inuse); + bitmap_free(inuse); return (index >= BR_MAX_PORTS) ? -EXFULL : index; } @@ -509,14 +508,14 @@ void br_mtu_auto_adjust(struct net_bridge *br) ASSERT_RTNL(); /* if the bridge MTU was manually configured don't mess with it */ - if (br->mtu_set_by_user) + if (br_opt_get(br, BROPT_MTU_SET_BY_USER)) return; /* change to the minimum MTU and clear the flag which was set by * the bridge ndo_change_mtu callback */ dev_set_mtu(br->dev, br_mtu_min(br)); - br->mtu_set_by_user = false; + br_opt_toggle(br, BROPT_MTU_SET_BY_USER, false); } static void br_set_gso_limits(struct net_bridge *br) diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 72074276c088..3ddca11f44c2 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -122,7 +122,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb br_do_proxy_suppress_arp(skb, br, vid, p); } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6) && - br->neigh_suppress_enabled && + br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) && pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)) && ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 6d9f48bd374a..a7ea2d431714 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -84,7 +84,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, int i, err = 0; int idx = 0, s_idx = cb->args[1]; - if (br->multicast_disabled) + if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) return 0; mdb = rcu_dereference(br->mdb); @@ -162,6 +162,29 @@ out: return err; } +static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) +{ + struct br_port_msg *bpm; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) { + NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request"); + return -EINVAL; + } + + bpm = nlmsg_data(nlh); + if (bpm->ifindex) { + NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request"); + return -EINVAL; + } + if (nlmsg_attrlen(nlh, sizeof(*bpm))) { + NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request"); + return -EINVAL; + } + + return 0; +} + static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct net_device *dev; @@ -169,6 +192,13 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) struct nlmsghdr *nlh = NULL; int idx = 0, s_idx; + if (cb->strict_check) { + int err = br_mdb_valid_dump_req(cb->nlh, cb->extack); + + if (err < 0) + return err; + } + s_idx = cb->args[0]; rcu_read_lock(); @@ -598,7 +628,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br, struct net_bridge_port *p; int ret; - if (!netif_running(br->dev) || br->multicast_disabled) + if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) return -EINVAL; dev = __dev_get_by_index(net, entry->ifindex); @@ -673,7 +703,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) struct br_ip ip; int err = -EINVAL; - if (!netif_running(br->dev) || br->multicast_disabled) + if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) return -EINVAL; __mdb_entry_to_br_ip(entry, &ip); diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 20ed7adcf1cc..024139b51d3a 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -158,7 +158,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); struct br_ip ip; - if (br->multicast_disabled) + if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) return NULL; if (BR_INPUT_SKB_CB(skb)->igmp) @@ -411,7 +411,7 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, iph->frag_off = htons(IP_DF); iph->ttl = 1; iph->protocol = IPPROTO_IGMP; - iph->saddr = br->multicast_query_use_ifaddr ? + iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ? inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); ((u8 *)&iph[1])[0] = IPOPT_RA; @@ -503,11 +503,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, &ip6h->saddr)) { kfree_skb(skb); - br->has_ipv6_addr = 0; + br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false); return NULL; } - br->has_ipv6_addr = 1; + br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); hopopt = (u8 *)(ip6h + 1); @@ -628,7 +628,7 @@ static struct net_bridge_mdb_entry *br_multicast_get_group( port ? port->dev->name : br->dev->name); err = -E2BIG; disable: - br->multicast_disabled = 1; + br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false); goto err; } } @@ -894,7 +894,7 @@ static void br_multicast_querier_expired(struct net_bridge *br, struct bridge_mcast_own_query *query) { spin_lock(&br->multicast_lock); - if (!netif_running(br->dev) || br->multicast_disabled) + if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) goto out; br_multicast_start_querier(br, query); @@ -965,8 +965,9 @@ static void br_multicast_send_query(struct net_bridge *br, struct br_ip br_group; unsigned long time; - if (!netif_running(br->dev) || br->multicast_disabled || - !br->multicast_querier) + if (!netif_running(br->dev) || + !br_opt_get(br, BROPT_MULTICAST_ENABLED) || + !br_opt_get(br, BROPT_MULTICAST_QUERIER)) return; memset(&br_group.u, 0, sizeof(br_group.u)); @@ -1036,7 +1037,7 @@ static void br_mc_disabled_update(struct net_device *dev, bool value) .orig_dev = dev, .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, .flags = SWITCHDEV_F_DEFER, - .u.mc_disabled = value, + .u.mc_disabled = !value, }; switchdev_port_attr_set(dev, &attr); @@ -1054,7 +1055,8 @@ int br_multicast_add_port(struct net_bridge_port *port) timer_setup(&port->ip6_own_query.timer, br_ip6_multicast_port_query_expired, 0); #endif - br_mc_disabled_update(port->dev, port->br->multicast_disabled); + br_mc_disabled_update(port->dev, + br_opt_get(port->br, BROPT_MULTICAST_ENABLED)); port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); if (!port->mcast_stats) @@ -1091,7 +1093,7 @@ static void __br_multicast_enable_port(struct net_bridge_port *port) { struct net_bridge *br = port->br; - if (br->multicast_disabled || !netif_running(br->dev)) + if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev)) return; br_multicast_enable(&port->ip4_own_query); @@ -1634,7 +1636,7 @@ br_multicast_leave_group(struct net_bridge *br, if (timer_pending(&other_query->timer)) goto out; - if (br->multicast_querier) { + if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) { __br_multicast_send_query(br, port, &mp->addr); time = jiffies + br->multicast_last_member_count * @@ -1746,7 +1748,7 @@ static void br_multicast_err_count(const struct net_bridge *br, struct bridge_mcast_stats __percpu *stats; struct bridge_mcast_stats *pstats; - if (!br->multicast_stats_enabled) + if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) return; if (p) @@ -1904,7 +1906,7 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, BR_INPUT_SKB_CB(skb)->igmp = 0; BR_INPUT_SKB_CB(skb)->mrouters_only = 0; - if (br->multicast_disabled) + if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) return 0; switch (skb->protocol) { @@ -1956,8 +1958,6 @@ void br_multicast_init(struct net_bridge *br) br->hash_max = 512; br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; - br->multicast_querier = 0; - br->multicast_query_use_ifaddr = 0; br->multicast_last_member_count = 2; br->multicast_startup_query_count = 2; @@ -1976,7 +1976,8 @@ void br_multicast_init(struct net_bridge *br) br->ip6_other_query.delay_time = 0; br->ip6_querier.port = NULL; #endif - br->has_ipv6_addr = 1; + br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true); + br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); spin_lock_init(&br->multicast_lock); timer_setup(&br->multicast_router_timer, @@ -1998,7 +1999,7 @@ static void __br_multicast_open(struct net_bridge *br, { query->startup_sent = 0; - if (br->multicast_disabled) + if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) return; mod_timer(&query->timer, jiffies); @@ -2173,12 +2174,12 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val) int err = 0; spin_lock_bh(&br->multicast_lock); - if (br->multicast_disabled == !val) + if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) goto unlock; - br_mc_disabled_update(br->dev, !val); - br->multicast_disabled = !val; - if (br->multicast_disabled) + br_mc_disabled_update(br->dev, val); + br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); + if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) goto unlock; if (!netif_running(br->dev)) @@ -2189,7 +2190,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val) if (mdb->old) { err = -EEXIST; rollback: - br->multicast_disabled = !!val; + br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false); goto unlock; } @@ -2213,7 +2214,7 @@ bool br_multicast_enabled(const struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); - return !br->multicast_disabled; + return !!br_opt_get(br, BROPT_MULTICAST_ENABLED); } EXPORT_SYMBOL_GPL(br_multicast_enabled); @@ -2236,10 +2237,10 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val) val = !!val; spin_lock_bh(&br->multicast_lock); - if (br->multicast_querier == val) + if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val) goto unlock; - br->multicast_querier = val; + br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val); if (!val) goto unlock; @@ -2560,7 +2561,7 @@ void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, struct bridge_mcast_stats __percpu *stats; /* if multicast_disabled is true then igmp type can't be set */ - if (!type || !br->multicast_stats_enabled) + if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) return; if (p) diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 37278dc280eb..b1b5e8516724 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -487,14 +487,15 @@ static unsigned int br_nf_pre_routing(void *priv, br = p->br; if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) { - if (!brnf_call_ip6tables && !br->nf_call_ip6tables) + if (!brnf_call_ip6tables && + !br_opt_get(br, BROPT_NF_CALL_IP6TABLES)) return NF_ACCEPT; nf_bridge_pull_encap_header_rcsum(skb); return br_nf_pre_routing_ipv6(priv, skb, state); } - if (!brnf_call_iptables && !br->nf_call_iptables) + if (!brnf_call_iptables && !br_opt_get(br, BROPT_NF_CALL_IPTABLES)) return NF_ACCEPT; if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb)) @@ -636,7 +637,7 @@ static unsigned int br_nf_forward_arp(void *priv, return NF_ACCEPT; br = p->br; - if (!brnf_call_arptables && !br->nf_call_arptables) + if (!brnf_call_arptables && !br_opt_get(br, BROPT_NF_CALL_ARPTABLES)) return NF_ACCEPT; if (!IS_ARP(skb)) { diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index ec2b58a09f76..3345f1984542 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -1034,6 +1034,7 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 }, [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 }, [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 }, + [IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 }, }; static int br_changelink(struct net_device *brdev, struct nlattr *tb[], @@ -1114,6 +1115,14 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], if (err) return err; } + + if (data[IFLA_BR_VLAN_STATS_PER_PORT]) { + __u8 per_port = nla_get_u8(data[IFLA_BR_VLAN_STATS_PER_PORT]); + + err = br_vlan_set_stats_per_port(br, per_port); + if (err) + return err; + } #endif if (data[IFLA_BR_GROUP_FWD_MASK]) { @@ -1139,7 +1148,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], spin_lock_bh(&br->lock); memcpy(br->group_addr, new_addr, sizeof(br->group_addr)); spin_unlock_bh(&br->lock); - br->group_addr_set = true; + br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true); br_recalculate_fwd_mask(br); } @@ -1167,7 +1176,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], u8 val; val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]); - br->multicast_query_use_ifaddr = !!val; + br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val); } if (data[IFLA_BR_MCAST_QUERIER]) { @@ -1244,7 +1253,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], __u8 mcast_stats; mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]); - br->multicast_stats_enabled = !!mcast_stats; + br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!mcast_stats); } if (data[IFLA_BR_MCAST_IGMP_VERSION]) { @@ -1271,19 +1280,19 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], if (data[IFLA_BR_NF_CALL_IPTABLES]) { u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]); - br->nf_call_iptables = val ? true : false; + br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val); } if (data[IFLA_BR_NF_CALL_IP6TABLES]) { u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]); - br->nf_call_ip6tables = val ? true : false; + br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val); } if (data[IFLA_BR_NF_CALL_ARPTABLES]) { u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]); - br->nf_call_arptables = val ? true : false; + br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val); } #endif @@ -1327,6 +1336,7 @@ static size_t br_get_size(const struct net_device *brdev) nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */ nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */ nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_PER_PORT */ #endif nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */ nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */ @@ -1416,17 +1426,22 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) #ifdef CONFIG_BRIDGE_VLAN_FILTERING if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) || nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) || - nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br->vlan_stats_enabled)) + nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, + br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) || + nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT, + br_opt_get(br, IFLA_BR_VLAN_STATS_PER_PORT))) return -EMSGSIZE; #endif #ifdef CONFIG_BRIDGE_IGMP_SNOOPING if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) || - nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) || + nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, + br_opt_get(br, BROPT_MULTICAST_ENABLED)) || nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR, - br->multicast_query_use_ifaddr) || - nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) || + br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR)) || + nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, + br_opt_get(br, BROPT_MULTICAST_QUERIER)) || nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED, - br->multicast_stats_enabled) || + br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) || nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, br->hash_elasticity) || nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) || @@ -1469,11 +1484,11 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES, - br->nf_call_iptables ? 1 : 0) || + br_opt_get(br, BROPT_NF_CALL_IPTABLES) ? 1 : 0) || nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES, - br->nf_call_ip6tables ? 1 : 0) || + br_opt_get(br, BROPT_NF_CALL_IP6TABLES) ? 1 : 0) || nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES, - br->nf_call_arptables ? 1 : 0)) + br_opt_get(br, BROPT_NF_CALL_ARPTABLES) ? 1 : 0)) return -EMSGSIZE; #endif diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 11ed2029985f..10ee39fdca5c 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -54,14 +54,12 @@ typedef struct bridge_id bridge_id; typedef struct mac_addr mac_addr; typedef __u16 port_id; -struct bridge_id -{ +struct bridge_id { unsigned char prio[2]; unsigned char addr[ETH_ALEN]; }; -struct mac_addr -{ +struct mac_addr { unsigned char addr[ETH_ALEN]; }; @@ -181,6 +179,7 @@ struct net_bridge_fdb_entry { struct hlist_node fdb_node; unsigned char is_local:1, is_static:1, + is_sticky:1, added_by_user:1, added_by_external_learn:1, offloaded:1; @@ -206,8 +205,7 @@ struct net_bridge_port_group { unsigned char eth_addr[ETH_ALEN]; }; -struct net_bridge_mdb_entry -{ +struct net_bridge_mdb_entry { struct hlist_node hlist[2]; struct net_bridge *br; struct net_bridge_port_group __rcu *ports; @@ -217,8 +215,7 @@ struct net_bridge_mdb_entry bool host_joined; }; -struct net_bridge_mdb_htable -{ +struct net_bridge_mdb_htable { struct hlist_head *mhash; struct rcu_head rcu; struct net_bridge_mdb_htable *old; @@ -309,16 +306,32 @@ static inline struct net_bridge_port *br_port_get_rtnl_rcu(const struct net_devi rcu_dereference_rtnl(dev->rx_handler_data) : NULL; } +enum net_bridge_opts { + BROPT_VLAN_ENABLED, + BROPT_VLAN_STATS_ENABLED, + BROPT_NF_CALL_IPTABLES, + BROPT_NF_CALL_IP6TABLES, + BROPT_NF_CALL_ARPTABLES, + BROPT_GROUP_ADDR_SET, + BROPT_MULTICAST_ENABLED, + BROPT_MULTICAST_QUERIER, + BROPT_MULTICAST_QUERY_USE_IFADDR, + BROPT_MULTICAST_STATS_ENABLED, + BROPT_HAS_IPV6_ADDR, + BROPT_NEIGH_SUPPRESS_ENABLED, + BROPT_MTU_SET_BY_USER, + BROPT_VLAN_STATS_PER_PORT, +}; + struct net_bridge { spinlock_t lock; spinlock_t hash_lock; struct list_head port_list; struct net_device *dev; struct pcpu_sw_netstats __percpu *stats; + unsigned long options; /* These fields are accessed on each packet */ #ifdef CONFIG_BRIDGE_VLAN_FILTERING - u8 vlan_enabled; - u8 vlan_stats_enabled; __be16 vlan_proto; u16 default_pvid; struct net_bridge_vlan_group __rcu *vlgrp; @@ -330,9 +343,6 @@ struct net_bridge { struct rtable fake_rtable; struct rt6_info fake_rt6_info; }; - bool nf_call_iptables; - bool nf_call_ip6tables; - bool nf_call_arptables; #endif u16 group_fwd_mask; u16 group_fwd_mask_required; @@ -340,7 +350,6 @@ struct net_bridge { /* STP */ bridge_id designated_root; bridge_id bridge_id; - u32 root_path_cost; unsigned char topology_change; unsigned char topology_change_detected; u16 root_port; @@ -352,9 +361,9 @@ struct net_bridge { unsigned long bridge_hello_time; unsigned long bridge_forward_delay; unsigned long bridge_ageing_time; + u32 root_path_cost; u8 group_addr[ETH_ALEN]; - bool group_addr_set; enum { BR_NO_STP, /* no spanning tree */ @@ -363,13 +372,6 @@ struct net_bridge { } stp_enabled; #ifdef CONFIG_BRIDGE_IGMP_SNOOPING - unsigned char multicast_router; - - u8 multicast_disabled:1; - u8 multicast_querier:1; - u8 multicast_query_use_ifaddr:1; - u8 has_ipv6_addr:1; - u8 multicast_stats_enabled:1; u32 hash_elasticity; u32 hash_max; @@ -378,7 +380,11 @@ struct net_bridge { u32 multicast_startup_query_count; u8 multicast_igmp_version; - + u8 multicast_router; +#if IS_ENABLED(CONFIG_IPV6) + u8 multicast_mld_version; +#endif + spinlock_t multicast_lock; unsigned long multicast_last_member_interval; unsigned long multicast_membership_interval; unsigned long multicast_querier_interval; @@ -386,7 +392,6 @@ struct net_bridge { unsigned long multicast_query_response_interval; unsigned long multicast_startup_query_interval; - spinlock_t multicast_lock; struct net_bridge_mdb_htable __rcu *mdb; struct hlist_head router_list; @@ -399,7 +404,6 @@ struct net_bridge { struct bridge_mcast_other_query ip6_other_query; struct bridge_mcast_own_query ip6_own_query; struct bridge_mcast_querier ip6_querier; - u8 multicast_mld_version; #endif /* IS_ENABLED(CONFIG_IPV6) */ #endif @@ -413,8 +417,6 @@ struct net_bridge { #ifdef CONFIG_NET_SWITCHDEV int offload_fwd_mark; #endif - bool neigh_suppress_enabled; - bool mtu_set_by_user; struct hlist_head fdb_list; }; @@ -492,6 +494,14 @@ static inline bool br_vlan_should_use(const struct net_bridge_vlan *v) return true; } +static inline int br_opt_get(const struct net_bridge *br, + enum net_bridge_opts opt) +{ + return test_bit(opt, &br->options); +} + +void br_opt_toggle(struct net_bridge *br, enum net_bridge_opts opt, bool on); + /* br_device.c */ void br_dev_setup(struct net_device *dev); void br_dev_delete(struct net_device *dev, struct list_head *list); @@ -698,8 +708,8 @@ __br_multicast_querier_exists(struct net_bridge *br, { bool own_querier_enabled; - if (br->multicast_querier) { - if (is_ipv6 && !br->has_ipv6_addr) + if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) { + if (is_ipv6 && !br_opt_get(br, BROPT_HAS_IPV6_ADDR)) own_querier_enabled = false; else own_querier_enabled = true; @@ -850,6 +860,7 @@ int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val); int __br_vlan_set_proto(struct net_bridge *br, __be16 proto); int br_vlan_set_proto(struct net_bridge *br, unsigned long val); int br_vlan_set_stats(struct net_bridge *br, unsigned long val); +int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val); int br_vlan_init(struct net_bridge *br); int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val); int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid); diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 0318a69888d4..60182bef6341 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -303,7 +303,7 @@ static ssize_t group_addr_store(struct device *d, ether_addr_copy(br->group_addr, new_addr); spin_unlock_bh(&br->lock); - br->group_addr_set = true; + br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true); br_recalculate_fwd_mask(br); netdev_state_change(br->dev); @@ -349,7 +349,7 @@ static ssize_t multicast_snooping_show(struct device *d, char *buf) { struct net_bridge *br = to_bridge(d); - return sprintf(buf, "%d\n", !br->multicast_disabled); + return sprintf(buf, "%d\n", br_opt_get(br, BROPT_MULTICAST_ENABLED)); } static ssize_t multicast_snooping_store(struct device *d, @@ -365,12 +365,13 @@ static ssize_t multicast_query_use_ifaddr_show(struct device *d, char *buf) { struct net_bridge *br = to_bridge(d); - return sprintf(buf, "%d\n", br->multicast_query_use_ifaddr); + return sprintf(buf, "%d\n", + br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR)); } static int set_query_use_ifaddr(struct net_bridge *br, unsigned long val) { - br->multicast_query_use_ifaddr = !!val; + br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val); return 0; } @@ -388,7 +389,7 @@ static ssize_t multicast_querier_show(struct device *d, char *buf) { struct net_bridge *br = to_bridge(d); - return sprintf(buf, "%d\n", br->multicast_querier); + return sprintf(buf, "%d\n", br_opt_get(br, BROPT_MULTICAST_QUERIER)); } static ssize_t multicast_querier_store(struct device *d, @@ -636,12 +637,13 @@ static ssize_t multicast_stats_enabled_show(struct device *d, { struct net_bridge *br = to_bridge(d); - return sprintf(buf, "%u\n", br->multicast_stats_enabled); + return sprintf(buf, "%d\n", + br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)); } static int set_stats_enabled(struct net_bridge *br, unsigned long val) { - br->multicast_stats_enabled = !!val; + br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!val); return 0; } @@ -678,12 +680,12 @@ static ssize_t nf_call_iptables_show( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); - return sprintf(buf, "%u\n", br->nf_call_iptables); + return sprintf(buf, "%u\n", br_opt_get(br, BROPT_NF_CALL_IPTABLES)); } static int set_nf_call_iptables(struct net_bridge *br, unsigned long val) { - br->nf_call_iptables = val ? true : false; + br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val); return 0; } @@ -699,12 +701,12 @@ static ssize_t nf_call_ip6tables_show( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); - return sprintf(buf, "%u\n", br->nf_call_ip6tables); + return sprintf(buf, "%u\n", br_opt_get(br, BROPT_NF_CALL_IP6TABLES)); } static int set_nf_call_ip6tables(struct net_bridge *br, unsigned long val) { - br->nf_call_ip6tables = val ? true : false; + br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val); return 0; } @@ -720,12 +722,12 @@ static ssize_t nf_call_arptables_show( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); - return sprintf(buf, "%u\n", br->nf_call_arptables); + return sprintf(buf, "%u\n", br_opt_get(br, BROPT_NF_CALL_ARPTABLES)); } static int set_nf_call_arptables(struct net_bridge *br, unsigned long val) { - br->nf_call_arptables = val ? true : false; + br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val); return 0; } @@ -743,7 +745,7 @@ static ssize_t vlan_filtering_show(struct device *d, char *buf) { struct net_bridge *br = to_bridge(d); - return sprintf(buf, "%d\n", br->vlan_enabled); + return sprintf(buf, "%d\n", br_opt_get(br, BROPT_VLAN_ENABLED)); } static ssize_t vlan_filtering_store(struct device *d, @@ -791,7 +793,7 @@ static ssize_t vlan_stats_enabled_show(struct device *d, char *buf) { struct net_bridge *br = to_bridge(d); - return sprintf(buf, "%u\n", br->vlan_stats_enabled); + return sprintf(buf, "%u\n", br_opt_get(br, BROPT_VLAN_STATS_ENABLED)); } static ssize_t vlan_stats_enabled_store(struct device *d, @@ -801,6 +803,22 @@ static ssize_t vlan_stats_enabled_store(struct device *d, return store_bridge_parm(d, buf, len, br_vlan_set_stats); } static DEVICE_ATTR_RW(vlan_stats_enabled); + +static ssize_t vlan_stats_per_port_show(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%u\n", br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)); +} + +static ssize_t vlan_stats_per_port_store(struct device *d, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return store_bridge_parm(d, buf, len, br_vlan_set_stats_per_port); +} +static DEVICE_ATTR_RW(vlan_stats_per_port); #endif static struct attribute *bridge_attrs[] = { @@ -854,6 +872,7 @@ static struct attribute *bridge_attrs[] = { &dev_attr_vlan_protocol.attr, &dev_attr_default_pvid.attr, &dev_attr_vlan_stats_enabled.attr, + &dev_attr_vlan_stats_per_port.attr, #endif NULL }; diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 7df269092103..9b707234e4ae 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -190,6 +190,19 @@ static void br_vlan_put_master(struct net_bridge_vlan *masterv) } } +static void nbp_vlan_rcu_free(struct rcu_head *rcu) +{ + struct net_bridge_vlan *v; + + v = container_of(rcu, struct net_bridge_vlan, rcu); + WARN_ON(br_vlan_is_master(v)); + /* if we had per-port stats configured then free them here */ + if (v->brvlan->stats != v->stats) + free_percpu(v->stats); + v->stats = NULL; + kfree(v); +} + /* This is the shared VLAN add function which works for both ports and bridge * devices. There are four possible calls to this function in terms of the * vlan entry type: @@ -245,7 +258,15 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags) if (!masterv) goto out_filt; v->brvlan = masterv; - v->stats = masterv->stats; + if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) { + v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats); + if (!v->stats) { + err = -ENOMEM; + goto out_filt; + } + } else { + v->stats = masterv->stats; + } } else { err = br_switchdev_port_vlan_add(dev, v->vid, flags); if (err && err != -EOPNOTSUPP) @@ -329,7 +350,7 @@ static int __vlan_del(struct net_bridge_vlan *v) rhashtable_remove_fast(&vg->vlan_hash, &v->vnode, br_vlan_rht_params); __vlan_del_list(v); - kfree_rcu(v, rcu); + call_rcu(&v->rcu, nbp_vlan_rcu_free); } br_vlan_put_master(masterv); @@ -386,7 +407,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br, return NULL; } } - if (br->vlan_stats_enabled) { + if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) { stats = this_cpu_ptr(v->stats); u64_stats_update_begin(&stats->syncp); stats->tx_bytes += skb->len; @@ -475,14 +496,14 @@ static bool __allowed_ingress(const struct net_bridge *br, skb->vlan_tci |= pvid; /* if stats are disabled we can avoid the lookup */ - if (!br->vlan_stats_enabled) + if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) return true; } v = br_vlan_find(vg, *vid); if (!v || !br_vlan_should_use(v)) goto drop; - if (br->vlan_stats_enabled) { + if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) { stats = this_cpu_ptr(v->stats); u64_stats_update_begin(&stats->syncp); stats->rx_bytes += skb->len; @@ -504,7 +525,7 @@ bool br_allowed_ingress(const struct net_bridge *br, /* If VLAN filtering is disabled on the bridge, all packets are * permitted. */ - if (!br->vlan_enabled) { + if (!br_opt_get(br, BROPT_VLAN_ENABLED)) { BR_INPUT_SKB_CB(skb)->vlan_filtered = false; return true; } @@ -538,7 +559,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid) struct net_bridge *br = p->br; /* If filtering was disabled at input, let it pass. */ - if (!br->vlan_enabled) + if (!br_opt_get(br, BROPT_VLAN_ENABLED)) return true; vg = nbp_vlan_group_rcu(p); @@ -695,11 +716,12 @@ struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid) /* Must be protected by RTNL. */ static void recalculate_group_addr(struct net_bridge *br) { - if (br->group_addr_set) + if (br_opt_get(br, BROPT_GROUP_ADDR_SET)) return; spin_lock_bh(&br->lock); - if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) { + if (!br_opt_get(br, BROPT_VLAN_ENABLED) || + br->vlan_proto == htons(ETH_P_8021Q)) { /* Bridge Group Address */ br->group_addr[5] = 0x00; } else { /* vlan_enabled && ETH_P_8021AD */ @@ -712,7 +734,8 @@ static void recalculate_group_addr(struct net_bridge *br) /* Must be protected by RTNL. */ void br_recalculate_fwd_mask(struct net_bridge *br) { - if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) + if (!br_opt_get(br, BROPT_VLAN_ENABLED) || + br->vlan_proto == htons(ETH_P_8021Q)) br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT; else /* vlan_enabled && ETH_P_8021AD */ br->group_fwd_mask_required = BR_GROUPFWD_8021AD & @@ -729,14 +752,14 @@ int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) }; int err; - if (br->vlan_enabled == val) + if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val) return 0; err = switchdev_port_attr_set(br->dev, &attr); if (err && err != -EOPNOTSUPP) return err; - br->vlan_enabled = val; + br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val); br_manage_promisc(br); recalculate_group_addr(br); br_recalculate_fwd_mask(br); @@ -753,7 +776,7 @@ bool br_vlan_enabled(const struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); - return !!br->vlan_enabled; + return br_opt_get(br, BROPT_VLAN_ENABLED); } EXPORT_SYMBOL_GPL(br_vlan_enabled); @@ -819,7 +842,31 @@ int br_vlan_set_stats(struct net_bridge *br, unsigned long val) switch (val) { case 0: case 1: - br->vlan_stats_enabled = val; + br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val); + break; + default: + return -EINVAL; + } + + return 0; +} + +int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val) +{ + struct net_bridge_port *p; + + /* allow to change the option if there are no port vlans configured */ + list_for_each_entry(p, &br->port_list, list) { + struct net_bridge_vlan_group *vg = nbp_vlan_group(p); + + if (vg->num_vlans) + return -EBUSY; + } + + switch (val) { + case 0: + case 1: + br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val); break; default: return -EINVAL; @@ -877,8 +924,7 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid) return 0; } - changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long), - GFP_KERNEL); + changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL); if (!changed) return -ENOMEM; @@ -925,7 +971,7 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid) br->default_pvid = pvid; out: - kfree(changed); + bitmap_free(changed); return err; err_port: @@ -965,7 +1011,7 @@ int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val) goto out; /* Only allow default pvid change when filtering is disabled */ - if (br->vlan_enabled) { + if (br_opt_get(br, BROPT_VLAN_ENABLED)) { pr_info_once("Please disable vlan filtering to change default_pvid\n"); err = -EPERM; goto out; @@ -1019,7 +1065,7 @@ int nbp_vlan_init(struct net_bridge_port *p) .orig_dev = p->br->dev, .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING, .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP, - .u.vlan_filtering = p->br->vlan_enabled, + .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED), }; struct net_bridge_vlan_group *vg; int ret = -ENOMEM; diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index b82440e1fcb4..a931a71ef6df 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c @@ -264,9 +264,6 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) frontpkt = rearpkt; rearpkt = NULL; - err = -ENOMEM; - if (frontpkt == NULL) - goto out; err = -EPROTO; if (cfpkt_add_head(frontpkt, head, 6) < 0) goto out; diff --git a/net/core/dev.c b/net/core/dev.c index 93243479085f..a4d39b87b4e5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1991,6 +1991,9 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) rcu_read_lock(); again: list_for_each_entry_rcu(ptype, ptype_list, list) { + if (ptype->ignore_outgoing) + continue; + /* Never send packets back to the socket * they originated from - MvS (miquels@drinkel.ow.org) */ @@ -3250,7 +3253,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de while (skb) { struct sk_buff *next = skb->next; - skb->next = NULL; + skb_mark_not_on_list(skb); rc = xmit_one(skb, dev, txq, next != NULL); if (unlikely(!dev_xmit_complete(rc))) { skb->next = next; @@ -3350,7 +3353,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d for (; skb != NULL; skb = next) { next = skb->next; - skb->next = NULL; + skb_mark_not_on_list(skb); /* in case skb wont be segmented, point to itself */ skb->prev = skb; @@ -5314,8 +5317,7 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, list_for_each_entry_safe_reverse(skb, p, head, list) { if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) return; - list_del(&skb->list); - skb->next = NULL; + skb_list_del_init(skb); napi_gro_complete(skb); napi->gro_hash[index].count--; } @@ -5500,8 +5502,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; if (pp) { - list_del(&pp->list); - pp->next = NULL; + skb_list_del_init(pp); napi_gro_complete(pp); napi->gro_hash[hash].count--; } diff --git a/net/core/devlink.c b/net/core/devlink.c index 6bc42933be4a..3a4b29a13d31 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -1626,7 +1626,7 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb, if (!ops->eswitch_mode_set) return -EOPNOTSUPP; mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]); - err = ops->eswitch_mode_set(devlink, mode); + err = ops->eswitch_mode_set(devlink, mode, info->extack); if (err) return err; } @@ -1636,7 +1636,8 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb, return -EOPNOTSUPP; inline_mode = nla_get_u8( info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]); - err = ops->eswitch_inline_mode_set(devlink, inline_mode); + err = ops->eswitch_inline_mode_set(devlink, inline_mode, + info->extack); if (err) return err; } @@ -1645,7 +1646,8 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb, if (!ops->eswitch_encap_mode_set) return -EOPNOTSUPP; encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]); - err = ops->eswitch_encap_mode_set(devlink, encap_mode); + err = ops->eswitch_encap_mode_set(devlink, encap_mode, + info->extack); if (err) return err; } @@ -2675,6 +2677,21 @@ static const struct devlink_param devlink_param_generic[] = { .name = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME, .type = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE, }, + { + .id = DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, + .name = DEVLINK_PARAM_GENERIC_IGNORE_ARI_NAME, + .type = DEVLINK_PARAM_GENERIC_IGNORE_ARI_TYPE, + }, + { + .id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, + .name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_NAME, + .type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_TYPE, + }, + { + .id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, + .name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME, + .type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE, + }, }; static int devlink_param_generic_verify(const struct devlink_param *param) @@ -3495,7 +3512,7 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb, start_offset = *((u64 *)&cb->args[0]); err = nlmsg_parse(cb->nlh, GENL_HDRLEN + devlink_nl_family.hdrsize, - attrs, DEVLINK_ATTR_MAX, ops->policy, NULL); + attrs, DEVLINK_ATTR_MAX, ops->policy, cb->extack); if (err) goto out; diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 0762aaf8e964..4cc603dfc9ef 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -27,6 +27,7 @@ #include <linux/rtnetlink.h> #include <linux/sched/signal.h> #include <linux/net.h> +#include <net/xdp_sock.h> /* * Some useful ethtool_ops methods that're device independent. @@ -539,47 +540,17 @@ struct ethtool_link_usettings { } link_modes; }; -/* Internal kernel helper to query a device ethtool_link_settings. - * - * Backward compatibility note: for compatibility with legacy drivers - * that implement only the ethtool_cmd API, this has to work with both - * drivers implementing get_link_ksettings API and drivers - * implementing get_settings API. When drivers implement get_settings - * and report ethtool_cmd deprecated fields - * (transceiver/maxrxpkt/maxtxpkt), these fields are silently ignored - * because the resulting struct ethtool_link_settings does not report them. - */ +/* Internal kernel helper to query a device ethtool_link_settings. */ int __ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *link_ksettings) { - int err; - struct ethtool_cmd cmd; - ASSERT_RTNL(); - if (dev->ethtool_ops->get_link_ksettings) { - memset(link_ksettings, 0, sizeof(*link_ksettings)); - return dev->ethtool_ops->get_link_ksettings(dev, - link_ksettings); - } - - /* driver doesn't support %ethtool_link_ksettings API. revert to - * legacy %ethtool_cmd API, unless it's not supported either. - * TODO: remove when ethtool_ops::get_settings disappears internally - */ - if (!dev->ethtool_ops->get_settings) + if (!dev->ethtool_ops->get_link_ksettings) return -EOPNOTSUPP; - memset(&cmd, 0, sizeof(cmd)); - cmd.cmd = ETHTOOL_GSET; - err = dev->ethtool_ops->get_settings(dev, &cmd); - if (err < 0) - return err; - - /* we ignore deprecated fields transceiver/maxrxpkt/maxtxpkt - */ - convert_legacy_settings_to_link_ksettings(link_ksettings, &cmd); - return err; + memset(link_ksettings, 0, sizeof(*link_ksettings)); + return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings); } EXPORT_SYMBOL(__ethtool_get_link_ksettings); @@ -635,16 +606,7 @@ store_link_ksettings_for_user(void __user *to, return 0; } -/* Query device for its ethtool_link_settings. - * - * Backward compatibility note: this function must fail when driver - * does not implement ethtool::get_link_ksettings, even if legacy - * ethtool_ops::get_settings is implemented. This tells new versions - * of ethtool that they should use the legacy API %ETHTOOL_GSET for - * this driver, so that they can correctly access the ethtool_cmd - * deprecated fields (transceiver/maxrxpkt/maxtxpkt), until no driver - * implements ethtool_ops::get_settings anymore. - */ +/* Query device for its ethtool_link_settings. */ static int ethtool_get_link_ksettings(struct net_device *dev, void __user *useraddr) { @@ -652,7 +614,6 @@ static int ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings link_ksettings; ASSERT_RTNL(); - if (!dev->ethtool_ops->get_link_ksettings) return -EOPNOTSUPP; @@ -699,16 +660,7 @@ static int ethtool_get_link_ksettings(struct net_device *dev, return store_link_ksettings_for_user(useraddr, &link_ksettings); } -/* Update device ethtool_link_settings. - * - * Backward compatibility note: this function must fail when driver - * does not implement ethtool::set_link_ksettings, even if legacy - * ethtool_ops::set_settings is implemented. This tells new versions - * of ethtool that they should use the legacy API %ETHTOOL_SSET for - * this driver, so that they can correctly update the ethtool_cmd - * deprecated fields (transceiver/maxrxpkt/maxtxpkt), until no driver - * implements ethtool_ops::get_settings anymore. - */ +/* Update device ethtool_link_settings. */ static int ethtool_set_link_ksettings(struct net_device *dev, void __user *useraddr) { @@ -746,51 +698,31 @@ static int ethtool_set_link_ksettings(struct net_device *dev, /* Query device for its ethtool_cmd settings. * - * Backward compatibility note: for compatibility with legacy ethtool, - * this has to work with both drivers implementing get_link_ksettings - * API and drivers implementing get_settings API. When drivers - * implement get_link_ksettings and report higher link mode bits, a - * kernel warning is logged once (with name of 1st driver/device) to - * recommend user to upgrade ethtool, but the command is successful - * (only the lower link mode bits reported back to user). + * Backward compatibility note: for compatibility with legacy ethtool, this is + * now implemented via get_link_ksettings. When driver reports higher link mode + * bits, a kernel warning is logged once (with name of 1st driver/device) to + * recommend user to upgrade ethtool, but the command is successful (only the + * lower link mode bits reported back to user). Deprecated fields from + * ethtool_cmd (transceiver/maxrxpkt/maxtxpkt) are always set to zero. */ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) { + struct ethtool_link_ksettings link_ksettings; struct ethtool_cmd cmd; + int err; ASSERT_RTNL(); + if (!dev->ethtool_ops->get_link_ksettings) + return -EOPNOTSUPP; - if (dev->ethtool_ops->get_link_ksettings) { - /* First, use link_ksettings API if it is supported */ - int err; - struct ethtool_link_ksettings link_ksettings; - - memset(&link_ksettings, 0, sizeof(link_ksettings)); - err = dev->ethtool_ops->get_link_ksettings(dev, - &link_ksettings); - if (err < 0) - return err; - convert_link_ksettings_to_legacy_settings(&cmd, - &link_ksettings); - - /* send a sensible cmd tag back to user */ - cmd.cmd = ETHTOOL_GSET; - } else { - /* driver doesn't support %ethtool_link_ksettings - * API. revert to legacy %ethtool_cmd API, unless it's - * not supported either. - */ - int err; - - if (!dev->ethtool_ops->get_settings) - return -EOPNOTSUPP; + memset(&link_ksettings, 0, sizeof(link_ksettings)); + err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings); + if (err < 0) + return err; + convert_link_ksettings_to_legacy_settings(&cmd, &link_ksettings); - memset(&cmd, 0, sizeof(cmd)); - cmd.cmd = ETHTOOL_GSET; - err = dev->ethtool_ops->get_settings(dev, &cmd); - if (err < 0) - return err; - } + /* send a sensible cmd tag back to user */ + cmd.cmd = ETHTOOL_GSET; if (copy_to_user(useraddr, &cmd, sizeof(cmd))) return -EFAULT; @@ -800,48 +732,29 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) /* Update device link settings with given ethtool_cmd. * - * Backward compatibility note: for compatibility with legacy ethtool, - * this has to work with both drivers implementing set_link_ksettings - * API and drivers implementing set_settings API. When drivers - * implement set_link_ksettings and user's request updates deprecated - * ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel - * warning is logged once (with name of 1st driver/device) to - * recommend user to upgrade ethtool, and the request is rejected. + * Backward compatibility note: for compatibility with legacy ethtool, this is + * now always implemented via set_link_settings. When user's request updates + * deprecated ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel + * warning is logged once (with name of 1st driver/device) to recommend user to + * upgrade ethtool, and the request is rejected. */ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) { + struct ethtool_link_ksettings link_ksettings; struct ethtool_cmd cmd; ASSERT_RTNL(); if (copy_from_user(&cmd, useraddr, sizeof(cmd))) return -EFAULT; - - /* first, try new %ethtool_link_ksettings API. */ - if (dev->ethtool_ops->set_link_ksettings) { - struct ethtool_link_ksettings link_ksettings; - - if (!convert_legacy_settings_to_link_ksettings(&link_ksettings, - &cmd)) - return -EINVAL; - - link_ksettings.base.cmd = ETHTOOL_SLINKSETTINGS; - link_ksettings.base.link_mode_masks_nwords - = __ETHTOOL_LINK_MODE_MASK_NU32; - return dev->ethtool_ops->set_link_ksettings(dev, - &link_ksettings); - } - - /* legacy %ethtool_cmd API */ - - /* TODO: return -EOPNOTSUPP when ethtool_ops::get_settings - * disappears internally - */ - - if (!dev->ethtool_ops->set_settings) + if (!dev->ethtool_ops->set_link_ksettings) return -EOPNOTSUPP; - return dev->ethtool_ops->set_settings(dev, &cmd); + if (!convert_legacy_settings_to_link_ksettings(&link_ksettings, &cmd)) + return -EINVAL; + link_ksettings.base.link_mode_masks_nwords = + __ETHTOOL_LINK_MODE_MASK_NU32; + return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); } static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, @@ -1750,8 +1663,10 @@ static noinline_for_stack int ethtool_get_channels(struct net_device *dev, static noinline_for_stack int ethtool_set_channels(struct net_device *dev, void __user *useraddr) { - struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS }; + struct ethtool_channels channels, curr = { .cmd = ETHTOOL_GCHANNELS }; + u16 from_channel, to_channel; u32 max_rx_in_use = 0; + unsigned int i; if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels) return -EOPNOTSUPP; @@ -1759,13 +1674,13 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev, if (copy_from_user(&channels, useraddr, sizeof(channels))) return -EFAULT; - dev->ethtool_ops->get_channels(dev, &max); + dev->ethtool_ops->get_channels(dev, &curr); /* ensure new counts are within the maximums */ - if ((channels.rx_count > max.max_rx) || - (channels.tx_count > max.max_tx) || - (channels.combined_count > max.max_combined) || - (channels.other_count > max.max_other)) + if (channels.rx_count > curr.max_rx || + channels.tx_count > curr.max_tx || + channels.combined_count > curr.max_combined || + channels.other_count > curr.max_other) return -EINVAL; /* ensure the new Rx count fits within the configured Rx flow @@ -1775,6 +1690,14 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev, (channels.combined_count + channels.rx_count) <= max_rx_in_use) return -EINVAL; + /* Disabling channels, query zero-copy AF_XDP sockets */ + from_channel = channels.combined_count + + min(channels.rx_count, channels.tx_count); + to_channel = curr.combined_count + max(curr.rx_count, curr.tx_count); + for (i = from_channel; i < to_channel; i++) + if (xdp_get_umem_from_qid(dev, i)) + return -EINVAL; + return dev->ethtool_ops->set_channels(dev, &channels); } diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 0ff3953f64aa..ffbb827723a2 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -1063,13 +1063,47 @@ skip: return err; } +static int fib_valid_dumprule_req(const struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) +{ + struct fib_rule_hdr *frh; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) { + NL_SET_ERR_MSG(extack, "Invalid header for fib rule dump request"); + return -EINVAL; + } + + frh = nlmsg_data(nlh); + if (frh->dst_len || frh->src_len || frh->tos || frh->table || + frh->res1 || frh->res2 || frh->action || frh->flags) { + NL_SET_ERR_MSG(extack, + "Invalid values in header for fib rule dump request"); + return -EINVAL; + } + + if (nlmsg_attrlen(nlh, sizeof(*frh))) { + NL_SET_ERR_MSG(extack, "Invalid data after header in fib rule dump request"); + return -EINVAL; + } + + return 0; +} + static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) { + const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); struct fib_rules_ops *ops; int idx = 0, family; - family = rtnl_msg_family(cb->nlh); + if (cb->strict_check) { + int err = fib_valid_dumprule_req(nlh, cb->extack); + + if (err < 0) + return err; + } + + family = rtnl_msg_family(nlh); if (family != AF_UNSPEC) { /* Protocol specific dump request */ ops = lookup_rules_ops(net, family); diff --git a/net/core/filter.c b/net/core/filter.c index 5e00f2b85a56..4bbc6567fcb8 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -58,13 +58,17 @@ #include <net/busy_poll.h> #include <net/tcp.h> #include <net/xfrm.h> +#include <net/udp.h> #include <linux/bpf_trace.h> #include <net/xdp_sock.h> #include <linux/inetdevice.h> +#include <net/inet_hashtables.h> +#include <net/inet6_hashtables.h> #include <net/ip_fib.h> #include <net/flow.h> #include <net/arp.h> #include <net/ipv6.h> +#include <net/net_namespace.h> #include <linux/seg6_local.h> #include <net/seg6.h> #include <net/seg6_local.h> @@ -3176,6 +3180,32 @@ static int __bpf_tx_xdp(struct net_device *dev, return 0; } +static noinline int +xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp, + struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri) +{ + struct net_device *fwd; + u32 index = ri->ifindex; + int err; + + fwd = dev_get_by_index_rcu(dev_net(dev), index); + ri->ifindex = 0; + if (unlikely(!fwd)) { + err = -EINVAL; + goto err; + } + + err = __bpf_tx_xdp(fwd, NULL, xdp, 0); + if (unlikely(err)) + goto err; + + _trace_xdp_redirect(dev, xdp_prog, index); + return 0; +err: + _trace_xdp_redirect_err(dev, xdp_prog, index, err); + return err; +} + static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, struct bpf_map *map, struct xdp_buff *xdp, @@ -3188,7 +3218,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, struct bpf_dtab_netdev *dst = fwd; err = dev_map_enqueue(dst, xdp, dev_rx); - if (err) + if (unlikely(err)) return err; __dev_map_insert_ctx(map, index); break; @@ -3197,7 +3227,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, struct bpf_cpu_map_entry *rcpu = fwd; err = cpu_map_enqueue(rcpu, xdp, dev_rx); - if (err) + if (unlikely(err)) return err; __cpu_map_insert_ctx(map, index); break; @@ -3238,7 +3268,7 @@ void xdp_do_flush_map(void) } EXPORT_SYMBOL_GPL(xdp_do_flush_map); -static void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index) +static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index) { switch (map->map_type) { case BPF_MAP_TYPE_DEVMAP: @@ -3270,9 +3300,9 @@ void bpf_clear_redirect_map(struct bpf_map *map) } static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, - struct bpf_prog *xdp_prog, struct bpf_map *map) + struct bpf_prog *xdp_prog, struct bpf_map *map, + struct bpf_redirect_info *ri) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); u32 index = ri->ifindex; void *fwd = NULL; int err; @@ -3281,11 +3311,11 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, WRITE_ONCE(ri->map, NULL); fwd = __xdp_map_lookup_elem(map, index); - if (!fwd) { + if (unlikely(!fwd)) { err = -EINVAL; goto err; } - if (ri->map_to_flush && ri->map_to_flush != map) + if (ri->map_to_flush && unlikely(ri->map_to_flush != map)) xdp_do_flush_map(); err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index); @@ -3305,29 +3335,11 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_map *map = READ_ONCE(ri->map); - struct net_device *fwd; - u32 index = ri->ifindex; - int err; - if (map) - return xdp_do_redirect_map(dev, xdp, xdp_prog, map); + if (likely(map)) + return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri); - fwd = dev_get_by_index_rcu(dev_net(dev), index); - ri->ifindex = 0; - if (unlikely(!fwd)) { - err = -EINVAL; - goto err; - } - - err = __bpf_tx_xdp(fwd, NULL, xdp, 0); - if (unlikely(err)) - goto err; - - _trace_xdp_redirect(dev, xdp_prog, index); - return 0; -err: - _trace_xdp_redirect_err(dev, xdp_prog, index, err); - return err; + return xdp_do_redirect_slow(dev, xdp, xdp_prog, ri); } EXPORT_SYMBOL_GPL(xdp_do_redirect); @@ -4013,6 +4025,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, tp->snd_ssthresh = val; } break; + case TCP_SAVE_SYN: + if (val < 0 || val > 1) + ret = -EINVAL; + else + tp->save_syn = val; + break; default: ret = -EINVAL; } @@ -4042,17 +4060,29 @@ BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock, if (!sk_fullsock(sk)) goto err_clear; - #ifdef CONFIG_INET if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) { - if (optname == TCP_CONGESTION) { - struct inet_connection_sock *icsk = inet_csk(sk); + struct inet_connection_sock *icsk; + struct tcp_sock *tp; + + switch (optname) { + case TCP_CONGESTION: + icsk = inet_csk(sk); if (!icsk->icsk_ca_ops || optlen <= 1) goto err_clear; strncpy(optval, icsk->icsk_ca_ops->name, optlen); optval[optlen - 1] = 0; - } else { + break; + case TCP_SAVED_SYN: + tp = tcp_sk(sk); + + if (optlen <= 0 || !tp->saved_syn || + optlen > tp->saved_syn[0]) + goto err_clear; + memcpy(optval, tp->saved_syn + 1, optlen); + break; + default: goto err_clear; } } else if (level == SOL_IP) { @@ -4787,6 +4817,143 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { }; #endif /* CONFIG_IPV6_SEG6_BPF */ +#ifdef CONFIG_INET +static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, + struct sk_buff *skb, u8 family, u8 proto) +{ + int dif = skb->dev->ifindex; + bool refcounted = false; + struct sock *sk = NULL; + + if (family == AF_INET) { + __be32 src4 = tuple->ipv4.saddr; + __be32 dst4 = tuple->ipv4.daddr; + int sdif = inet_sdif(skb); + + if (proto == IPPROTO_TCP) + sk = __inet_lookup(net, &tcp_hashinfo, skb, 0, + src4, tuple->ipv4.sport, + dst4, tuple->ipv4.dport, + dif, sdif, &refcounted); + else + sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport, + dst4, tuple->ipv4.dport, + dif, sdif, &udp_table, skb); +#if IS_REACHABLE(CONFIG_IPV6) + } else { + struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr; + struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr; + int sdif = inet6_sdif(skb); + + if (proto == IPPROTO_TCP) + sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0, + src6, tuple->ipv6.sport, + dst6, tuple->ipv6.dport, + dif, sdif, &refcounted); + else + sk = __udp6_lib_lookup(net, src6, tuple->ipv6.sport, + dst6, tuple->ipv6.dport, + dif, sdif, &udp_table, skb); +#endif + } + + if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) { + WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); + sk = NULL; + } + return sk; +} + +/* bpf_sk_lookup performs the core lookup for different types of sockets, + * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE. + * Returns the socket as an 'unsigned long' to simplify the casting in the + * callers to satisfy BPF_CALL declarations. + */ +static unsigned long +bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, + u8 proto, u64 netns_id, u64 flags) +{ + struct net *caller_net; + struct sock *sk = NULL; + u8 family = AF_UNSPEC; + struct net *net; + + family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6; + if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags)) + goto out; + + if (skb->dev) + caller_net = dev_net(skb->dev); + else + caller_net = sock_net(skb->sk); + if (netns_id) { + net = get_net_ns_by_id(caller_net, netns_id); + if (unlikely(!net)) + goto out; + sk = sk_lookup(net, tuple, skb, family, proto); + put_net(net); + } else { + net = caller_net; + sk = sk_lookup(net, tuple, skb, family, proto); + } + + if (sk) + sk = sk_to_full_sk(sk); +out: + return (unsigned long) sk; +} + +BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb, + struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) +{ + return bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP, netns_id, flags); +} + +static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = { + .func = bpf_sk_lookup_tcp, + .gpl_only = false, + .pkt_access = true, + .ret_type = RET_PTR_TO_SOCKET_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +}; + +BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb, + struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) +{ + return bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP, netns_id, flags); +} + +static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { + .func = bpf_sk_lookup_udp, + .gpl_only = false, + .pkt_access = true, + .ret_type = RET_PTR_TO_SOCKET_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +}; + +BPF_CALL_1(bpf_sk_release, struct sock *, sk) +{ + if (!sock_flag(sk, SOCK_RCU_FREE)) + sock_gen_put(sk); + return 0; +} + +static const struct bpf_func_proto bpf_sk_release_proto = { + .func = bpf_sk_release, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_SOCKET, +}; +#endif /* CONFIG_INET */ + bool bpf_helper_changes_pkt_data(void *func) { if (func == bpf_skb_vlan_push || @@ -4993,6 +5160,14 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_skb_ancestor_cgroup_id: return &bpf_skb_ancestor_cgroup_id_proto; #endif +#ifdef CONFIG_INET + case BPF_FUNC_sk_lookup_tcp: + return &bpf_sk_lookup_tcp_proto; + case BPF_FUNC_sk_lookup_udp: + return &bpf_sk_lookup_udp_proto; + case BPF_FUNC_sk_release: + return &bpf_sk_release_proto; +#endif default: return bpf_base_func_proto(func_id); } @@ -5093,6 +5268,25 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_sk_redirect_hash_proto; case BPF_FUNC_get_local_storage: return &bpf_get_local_storage_proto; +#ifdef CONFIG_INET + case BPF_FUNC_sk_lookup_tcp: + return &bpf_sk_lookup_tcp_proto; + case BPF_FUNC_sk_lookup_udp: + return &bpf_sk_lookup_udp_proto; + case BPF_FUNC_sk_release: + return &bpf_sk_release_proto; +#endif + default: + return bpf_base_func_proto(func_id); + } +} + +static const struct bpf_func_proto * +flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_skb_load_bytes: + return &bpf_skb_load_bytes_proto; default: return bpf_base_func_proto(func_id); } @@ -5216,6 +5410,10 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type if (size != size_default) return false; break; + case bpf_ctx_range(struct __sk_buff, flow_keys): + if (size != sizeof(struct bpf_flow_keys *)) + return false; + break; default: /* Only narrow read access allowed for now. */ if (type == BPF_WRITE) { @@ -5241,6 +5439,7 @@ static bool sk_filter_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data): case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, data_end): + case bpf_ctx_range(struct __sk_buff, flow_keys): case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; } @@ -5266,6 +5465,7 @@ static bool lwt_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range_till(struct __sk_buff, family, local_port): case bpf_ctx_range(struct __sk_buff, data_meta): + case bpf_ctx_range(struct __sk_buff, flow_keys): return false; } @@ -5351,23 +5551,29 @@ static bool __sock_filter_check_size(int off, int size, return size == size_default; } -static bool sock_filter_is_valid_access(int off, int size, - enum bpf_access_type type, - const struct bpf_prog *prog, - struct bpf_insn_access_aux *info) +bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info) { if (off < 0 || off >= sizeof(struct bpf_sock)) return false; if (off % size != 0) return false; - if (!__sock_filter_check_attach_type(off, type, - prog->expected_attach_type)) - return false; if (!__sock_filter_check_size(off, size, info)) return false; return true; } +static bool sock_filter_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + if (!bpf_sock_is_valid_access(off, size, type, info)) + return false; + return __sock_filter_check_attach_type(off, type, + prog->expected_attach_type); +} + static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write, const struct bpf_prog *prog, int drop_verdict) { @@ -5476,6 +5682,7 @@ static bool tc_cls_act_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data_end): info->reg_type = PTR_TO_PACKET_END; break; + case bpf_ctx_range(struct __sk_buff, flow_keys): case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; } @@ -5677,6 +5884,7 @@ static bool sk_skb_is_valid_access(int off, int size, switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range(struct __sk_buff, data_meta): + case bpf_ctx_range(struct __sk_buff, flow_keys): return false; } @@ -5736,6 +5944,39 @@ static bool sk_msg_is_valid_access(int off, int size, return true; } +static bool flow_dissector_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + if (type == BPF_WRITE) { + switch (off) { + case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): + break; + default: + return false; + } + } + + switch (off) { + case bpf_ctx_range(struct __sk_buff, data): + info->reg_type = PTR_TO_PACKET; + break; + case bpf_ctx_range(struct __sk_buff, data_end): + info->reg_type = PTR_TO_PACKET_END; + break; + case bpf_ctx_range(struct __sk_buff, flow_keys): + info->reg_type = PTR_TO_FLOW_KEYS; + break; + case bpf_ctx_range(struct __sk_buff, tc_classid): + case bpf_ctx_range(struct __sk_buff, data_meta): + case bpf_ctx_range_till(struct __sk_buff, family, local_port): + return false; + } + + return bpf_skb_is_valid_access(off, size, type, prog, info); +} + static u32 bpf_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, @@ -6030,15 +6271,24 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, bpf_target_off(struct sock_common, skc_num, 2, target_size)); break; + + case offsetof(struct __sk_buff, flow_keys): + off = si->off; + off -= offsetof(struct __sk_buff, flow_keys); + off += offsetof(struct sk_buff, cb); + off += offsetof(struct qdisc_skb_cb, flow_keys); + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, + si->src_reg, off); + break; } return insn - insn_buf; } -static u32 sock_filter_convert_ctx_access(enum bpf_access_type type, - const struct bpf_insn *si, - struct bpf_insn *insn_buf, - struct bpf_prog *prog, u32 *target_size) +u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; int off; @@ -6950,7 +7200,7 @@ const struct bpf_prog_ops lwt_seg6local_prog_ops = { const struct bpf_verifier_ops cg_sock_verifier_ops = { .get_func_proto = sock_filter_func_proto, .is_valid_access = sock_filter_is_valid_access, - .convert_ctx_access = sock_filter_convert_ctx_access, + .convert_ctx_access = bpf_sock_convert_ctx_access, }; const struct bpf_prog_ops cg_sock_prog_ops = { @@ -6993,6 +7243,15 @@ const struct bpf_verifier_ops sk_msg_verifier_ops = { const struct bpf_prog_ops sk_msg_prog_ops = { }; +const struct bpf_verifier_ops flow_dissector_verifier_ops = { + .get_func_proto = flow_dissector_func_proto, + .is_valid_access = flow_dissector_is_valid_access, + .convert_ctx_access = bpf_convert_ctx_access, +}; + +const struct bpf_prog_ops flow_dissector_prog_ops = { +}; + int sk_detach_filter(struct sock *sk) { int ret = -ENOENT; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index ce9eeeb7c024..676f3ad629f9 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -25,6 +25,9 @@ #include <net/flow_dissector.h> #include <scsi/fc/fc_fcoe.h> #include <uapi/linux/batadv_packet.h> +#include <linux/bpf.h> + +static DEFINE_MUTEX(flow_dissector_mutex); static void dissector_set_key(struct flow_dissector *flow_dissector, enum flow_dissector_key_id key_id) @@ -62,6 +65,44 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector, } EXPORT_SYMBOL(skb_flow_dissector_init); +int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + struct bpf_prog *attached; + struct net *net; + + net = current->nsproxy->net_ns; + mutex_lock(&flow_dissector_mutex); + attached = rcu_dereference_protected(net->flow_dissector_prog, + lockdep_is_held(&flow_dissector_mutex)); + if (attached) { + /* Only one BPF program can be attached at a time */ + mutex_unlock(&flow_dissector_mutex); + return -EEXIST; + } + rcu_assign_pointer(net->flow_dissector_prog, prog); + mutex_unlock(&flow_dissector_mutex); + return 0; +} + +int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) +{ + struct bpf_prog *attached; + struct net *net; + + net = current->nsproxy->net_ns; + mutex_lock(&flow_dissector_mutex); + attached = rcu_dereference_protected(net->flow_dissector_prog, + lockdep_is_held(&flow_dissector_mutex)); + if (!attached) { + mutex_unlock(&flow_dissector_mutex); + return -ENOENT; + } + bpf_prog_put(attached); + RCU_INIT_POINTER(net->flow_dissector_prog, NULL); + mutex_unlock(&flow_dissector_mutex); + return 0; +} /** * skb_flow_get_be16 - extract be16 entity * @skb: sk_buff to extract from @@ -382,8 +423,8 @@ __skb_flow_dissect_gre(const struct sk_buff *skb, offset += sizeof(struct gre_base_hdr); if (hdr->flags & GRE_CSUM) - offset += sizeof(((struct gre_full_hdr *) 0)->csum) + - sizeof(((struct gre_full_hdr *) 0)->reserved1); + offset += FIELD_SIZEOF(struct gre_full_hdr, csum) + + FIELD_SIZEOF(struct gre_full_hdr, reserved1); if (hdr->flags & GRE_KEY) { const __be32 *keyid; @@ -405,11 +446,11 @@ __skb_flow_dissect_gre(const struct sk_buff *skb, else key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK; } - offset += sizeof(((struct gre_full_hdr *) 0)->key); + offset += FIELD_SIZEOF(struct gre_full_hdr, key); } if (hdr->flags & GRE_SEQ) - offset += sizeof(((struct pptp_gre_header *) 0)->seq); + offset += FIELD_SIZEOF(struct pptp_gre_header, seq); if (gre_ver == 0) { if (*p_proto == htons(ETH_P_TEB)) { @@ -436,7 +477,7 @@ __skb_flow_dissect_gre(const struct sk_buff *skb, u8 *ppp_hdr; if (hdr->flags & GRE_ACK) - offset += sizeof(((struct pptp_gre_header *) 0)->ack); + offset += FIELD_SIZEOF(struct pptp_gre_header, ack); ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset, sizeof(_ppp_hdr), @@ -588,6 +629,60 @@ static bool skb_flow_dissect_allowed(int *num_hdrs) return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS); } +static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys, + struct flow_dissector *flow_dissector, + void *target_container) +{ + struct flow_dissector_key_control *key_control; + struct flow_dissector_key_basic *key_basic; + struct flow_dissector_key_addrs *key_addrs; + struct flow_dissector_key_ports *key_ports; + + key_control = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_CONTROL, + target_container); + key_control->thoff = flow_keys->thoff; + if (flow_keys->is_frag) + key_control->flags |= FLOW_DIS_IS_FRAGMENT; + if (flow_keys->is_first_frag) + key_control->flags |= FLOW_DIS_FIRST_FRAG; + if (flow_keys->is_encap) + key_control->flags |= FLOW_DIS_ENCAPSULATION; + + key_basic = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_BASIC, + target_container); + key_basic->n_proto = flow_keys->n_proto; + key_basic->ip_proto = flow_keys->ip_proto; + + if (flow_keys->addr_proto == ETH_P_IP && + dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + key_addrs = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + target_container); + key_addrs->v4addrs.src = flow_keys->ipv4_src; + key_addrs->v4addrs.dst = flow_keys->ipv4_dst; + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + } else if (flow_keys->addr_proto == ETH_P_IPV6 && + dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + key_addrs = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + target_container); + memcpy(&key_addrs->v6addrs, &flow_keys->ipv6_src, + sizeof(key_addrs->v6addrs)); + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + } + + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) { + key_ports = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_PORTS, + target_container); + key_ports->src = flow_keys->sport; + key_ports->dst = flow_keys->dport; + } +} + /** * __skb_flow_dissect - extract the flow_keys struct and return it * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified @@ -619,6 +714,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector_key_vlan *key_vlan; enum flow_dissect_ret fdret; enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX; + struct bpf_prog *attached = NULL; int num_hdrs = 0; u8 ip_proto = 0; bool ret; @@ -658,6 +754,50 @@ bool __skb_flow_dissect(const struct sk_buff *skb, FLOW_DISSECTOR_KEY_BASIC, target_container); + rcu_read_lock(); + if (skb) { + if (skb->dev) + attached = rcu_dereference(dev_net(skb->dev)->flow_dissector_prog); + else if (skb->sk) + attached = rcu_dereference(sock_net(skb->sk)->flow_dissector_prog); + else + WARN_ON_ONCE(1); + } + if (attached) { + /* Note that even though the const qualifier is discarded + * throughout the execution of the BPF program, all changes(the + * control block) are reverted after the BPF program returns. + * Therefore, __skb_flow_dissect does not alter the skb. + */ + struct bpf_flow_keys flow_keys = {}; + struct bpf_skb_data_end cb_saved; + struct bpf_skb_data_end *cb; + u32 result; + + cb = (struct bpf_skb_data_end *)skb->cb; + + /* Save Control Block */ + memcpy(&cb_saved, cb, sizeof(cb_saved)); + memset(cb, 0, sizeof(cb_saved)); + + /* Pass parameters to the BPF program */ + cb->qdisc_cb.flow_keys = &flow_keys; + flow_keys.nhoff = nhoff; + + bpf_compute_data_pointers((struct sk_buff *)skb); + result = BPF_PROG_RUN(attached, skb); + + /* Restore state */ + memcpy(cb, &cb_saved, sizeof(cb_saved)); + + __skb_flow_bpf_to_target(&flow_keys, flow_dissector, + target_container); + key_control->thoff = min_t(u16, key_control->thoff, skb->len); + rcu_read_unlock(); + return result == BPF_OK; + } + rcu_read_unlock(); + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct ethhdr *eth = eth_hdr(skb); diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 188d693cb251..9bf1b9ad1780 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -162,6 +162,34 @@ __gnet_stats_copy_basic(const seqcount_t *running, } EXPORT_SYMBOL(__gnet_stats_copy_basic); +static int +___gnet_stats_copy_basic(const seqcount_t *running, + struct gnet_dump *d, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b, + int type) +{ + struct gnet_stats_basic_packed bstats = {0}; + + __gnet_stats_copy_basic(running, &bstats, cpu, b); + + if (d->compat_tc_stats && type == TCA_STATS_BASIC) { + d->tc_stats.bytes = bstats.bytes; + d->tc_stats.packets = bstats.packets; + } + + if (d->tail) { + struct gnet_stats_basic sb; + + memset(&sb, 0, sizeof(sb)); + sb.bytes = bstats.bytes; + sb.packets = bstats.packets; + return gnet_stats_copy(d, type, &sb, sizeof(sb), + TCA_STATS_PAD); + } + return 0; +} + /** * gnet_stats_copy_basic - copy basic statistics into statistic TLV * @running: seqcount_t pointer @@ -181,29 +209,36 @@ gnet_stats_copy_basic(const seqcount_t *running, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) { - struct gnet_stats_basic_packed bstats = {0}; - - __gnet_stats_copy_basic(running, &bstats, cpu, b); - - if (d->compat_tc_stats) { - d->tc_stats.bytes = bstats.bytes; - d->tc_stats.packets = bstats.packets; - } - - if (d->tail) { - struct gnet_stats_basic sb; - - memset(&sb, 0, sizeof(sb)); - sb.bytes = bstats.bytes; - sb.packets = bstats.packets; - return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb), - TCA_STATS_PAD); - } - return 0; + return ___gnet_stats_copy_basic(running, d, cpu, b, + TCA_STATS_BASIC); } EXPORT_SYMBOL(gnet_stats_copy_basic); /** + * gnet_stats_copy_basic_hw - copy basic hw statistics into statistic TLV + * @running: seqcount_t pointer + * @d: dumping handle + * @cpu: copy statistic per cpu + * @b: basic statistics + * + * Appends the basic statistics to the top level TLV created by + * gnet_stats_start_copy(). + * + * Returns 0 on success or -1 with the statistic lock released + * if the room in the socket buffer was not sufficient. + */ +int +gnet_stats_copy_basic_hw(const seqcount_t *running, + struct gnet_dump *d, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b) +{ + return ___gnet_stats_copy_basic(running, d, cpu, b, + TCA_STATS_BASIC_HW); +} +EXPORT_SYMBOL(gnet_stats_copy_basic_hw); + +/** * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV * @d: dumping handle * @rate_est: rate estimator diff --git a/net/core/link_watch.c b/net/core/link_watch.c index e38e641e98d5..7f51efb2b3ab 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -155,7 +155,7 @@ static void linkwatch_do_dev(struct net_device *dev) clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); rfc2863_policy(dev); - if (dev->flags & IFF_UP) { + if (dev->flags & IFF_UP && netif_device_present(dev)) { if (netif_carrier_ok(dev)) dev_activate(dev); else diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 91592fceeaad..69c41cb3966d 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -232,7 +232,8 @@ static void pneigh_queue_purge(struct sk_buff_head *list) } } -static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev) +static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev, + bool skip_perm) { int i; struct neigh_hash_table *nht; @@ -250,6 +251,10 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev) np = &n->next; continue; } + if (skip_perm && n->nud_state & NUD_PERMANENT) { + np = &n->next; + continue; + } rcu_assign_pointer(*np, rcu_dereference_protected(n->next, lockdep_is_held(&tbl->lock))); @@ -285,21 +290,35 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev) void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) { write_lock_bh(&tbl->lock); - neigh_flush_dev(tbl, dev); + neigh_flush_dev(tbl, dev, false); write_unlock_bh(&tbl->lock); } EXPORT_SYMBOL(neigh_changeaddr); -int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) +static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, + bool skip_perm) { write_lock_bh(&tbl->lock); - neigh_flush_dev(tbl, dev); + neigh_flush_dev(tbl, dev, skip_perm); pneigh_ifdown_and_unlock(tbl, dev); del_timer_sync(&tbl->proxy_timer); pneigh_queue_purge(&tbl->proxy_queue); return 0; } + +int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev) +{ + __neigh_ifdown(tbl, dev, true); + return 0; +} +EXPORT_SYMBOL(neigh_carrier_down); + +int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) +{ + __neigh_ifdown(tbl, dev, false); + return 0; +} EXPORT_SYMBOL(neigh_ifdown); static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev) @@ -1280,11 +1299,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, neigh->arp_queue_len_bytes = 0; } out: - if (update_isrouter) { - neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ? - (neigh->flags | NTF_ROUTER) : - (neigh->flags & ~NTF_ROUTER); - } + if (update_isrouter) + neigh_update_is_router(neigh, flags, ¬ify); write_unlock_bh(&neigh->lock); if (notify) @@ -1712,7 +1728,8 @@ out: static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { - int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE; + int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE | + NEIGH_UPDATE_F_OVERRIDE_ISROUTER; struct net *net = sock_net(skb->sk); struct ndmsg *ndm; struct nlattr *tb[NDA_MAX+1]; @@ -1787,12 +1804,16 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, } if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) - flags &= ~NEIGH_UPDATE_F_OVERRIDE; + flags &= ~(NEIGH_UPDATE_F_OVERRIDE | + NEIGH_UPDATE_F_OVERRIDE_ISROUTER); } if (ndm->ndm_flags & NTF_EXT_LEARNED) flags |= NEIGH_UPDATE_F_EXT_LEARNED; + if (ndm->ndm_flags & NTF_ROUTER) + flags |= NEIGH_UPDATE_F_ISROUTER; + if (ndm->ndm_flags & NTF_USE) { neigh_event_send(neigh, NULL); err = 0; @@ -2162,15 +2183,47 @@ errout: return err; } +static int neightbl_valid_dump_info(const struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) +{ + struct ndtmsg *ndtm; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) { + NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request"); + return -EINVAL; + } + + ndtm = nlmsg_data(nlh); + if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) { + NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request"); + return -EINVAL; + } + + if (nlmsg_attrlen(nlh, sizeof(*ndtm))) { + NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request"); + return -EINVAL; + } + + return 0; +} + static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) { + const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); int family, tidx, nidx = 0; int tbl_skip = cb->args[0]; int neigh_skip = cb->args[1]; struct neigh_table *tbl; - family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; + if (cb->strict_check) { + int err = neightbl_valid_dump_info(nlh, cb->extack); + + if (err < 0) + return err; + } + + family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { struct neigh_parms *p; @@ -2183,7 +2236,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) continue; if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL, + nlh->nlmsg_seq, RTM_NEWNEIGHTBL, NLM_F_MULTI) < 0) break; @@ -2198,7 +2251,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) if (neightbl_fill_param_info(skb, tbl, p, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, + nlh->nlmsg_seq, RTM_NEWNEIGHTBL, NLM_F_MULTI) < 0) goto out; @@ -2327,35 +2380,24 @@ static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx) return false; } +struct neigh_dump_filter { + int master_idx; + int dev_idx; +}; + static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, - struct netlink_callback *cb) + struct netlink_callback *cb, + struct neigh_dump_filter *filter) { struct net *net = sock_net(skb->sk); - const struct nlmsghdr *nlh = cb->nlh; - struct nlattr *tb[NDA_MAX + 1]; struct neighbour *n; int rc, h, s_h = cb->args[1]; int idx, s_idx = idx = cb->args[2]; struct neigh_hash_table *nht; - int filter_master_idx = 0, filter_idx = 0; unsigned int flags = NLM_F_MULTI; - int err; - err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL); - if (!err) { - if (tb[NDA_IFINDEX]) { - if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) - return -EINVAL; - filter_idx = nla_get_u32(tb[NDA_IFINDEX]); - } - if (tb[NDA_MASTER]) { - if (nla_len(tb[NDA_MASTER]) != sizeof(u32)) - return -EINVAL; - filter_master_idx = nla_get_u32(tb[NDA_MASTER]); - } - if (filter_idx || filter_master_idx) - flags |= NLM_F_DUMP_FILTERED; - } + if (filter->dev_idx || filter->master_idx) + flags |= NLM_F_DUMP_FILTERED; rcu_read_lock_bh(); nht = rcu_dereference_bh(tbl->nht); @@ -2368,8 +2410,8 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, n = rcu_dereference_bh(n->next)) { if (idx < s_idx || !net_eq(dev_net(n->dev), net)) goto next; - if (neigh_ifindex_filtered(n->dev, filter_idx) || - neigh_master_filtered(n->dev, filter_master_idx)) + if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || + neigh_master_filtered(n->dev, filter->master_idx)) goto next; if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, @@ -2391,12 +2433,17 @@ out: } static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, - struct netlink_callback *cb) + struct netlink_callback *cb, + struct neigh_dump_filter *filter) { struct pneigh_entry *n; struct net *net = sock_net(skb->sk); int rc, h, s_h = cb->args[3]; int idx, s_idx = idx = cb->args[4]; + unsigned int flags = NLM_F_MULTI; + + if (filter->dev_idx || filter->master_idx) + flags |= NLM_F_DUMP_FILTERED; read_lock_bh(&tbl->lock); @@ -2406,10 +2453,12 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { if (idx < s_idx || pneigh_net(n) != net) goto next; + if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || + neigh_master_filtered(n->dev, filter->master_idx)) + goto next; if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - RTM_NEWNEIGH, - NLM_F_MULTI, tbl) < 0) { + RTM_NEWNEIGH, flags, tbl) < 0) { read_unlock_bh(&tbl->lock); rc = -1; goto out; @@ -2428,22 +2477,91 @@ out: } +static int neigh_valid_dump_req(const struct nlmsghdr *nlh, + bool strict_check, + struct neigh_dump_filter *filter, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[NDA_MAX + 1]; + int err, i; + + if (strict_check) { + struct ndmsg *ndm; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { + NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request"); + return -EINVAL; + } + + ndm = nlmsg_data(nlh); + if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex || + ndm->ndm_state || ndm->ndm_flags || ndm->ndm_type) { + NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request"); + return -EINVAL; + } + + err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX, + NULL, extack); + } else { + err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, + NULL, extack); + } + if (err < 0) + return err; + + for (i = 0; i <= NDA_MAX; ++i) { + if (!tb[i]) + continue; + + /* all new attributes should require strict_check */ + switch (i) { + case NDA_IFINDEX: + if (nla_len(tb[i]) != sizeof(u32)) { + NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in neighbor dump request"); + return -EINVAL; + } + filter->dev_idx = nla_get_u32(tb[i]); + break; + case NDA_MASTER: + if (nla_len(tb[i]) != sizeof(u32)) { + NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in neighbor dump request"); + return -EINVAL; + } + filter->master_idx = nla_get_u32(tb[i]); + break; + default: + if (strict_check) { + NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request"); + return -EINVAL; + } + } + } + + return 0; +} + static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) { + const struct nlmsghdr *nlh = cb->nlh; + struct neigh_dump_filter filter = {}; struct neigh_table *tbl; int t, family, s_t; int proxy = 0; int err; - family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; + family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; /* check for full ndmsg structure presence, family member is * the same for both structures */ - if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) && - ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY) + if (nlmsg_len(nlh) >= sizeof(struct ndmsg) && + ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY) proxy = 1; + err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack); + if (err < 0 && cb->strict_check) + return err; + s_t = cb->args[0]; for (t = 0; t < NEIGH_NR_TABLES; t++) { @@ -2457,9 +2575,9 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); if (proxy) - err = pneigh_dump_table(tbl, skb, cb); + err = pneigh_dump_table(tbl, skb, cb, &filter); else - err = neigh_dump_table(tbl, skb, cb); + err = neigh_dump_table(tbl, skb, cb, &filter); if (err < 0) break; } diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 670c84b1bfc2..fefe72774aeb 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -853,6 +853,12 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) .s_idx = cb->args[0], }; + if (cb->strict_check && + nlmsg_attrlen(cb->nlh, sizeof(struct rtgenmsg))) { + NL_SET_ERR_MSG(cb->extack, "Unknown data in network namespace id dump request"); + return -EINVAL; + } + spin_lock_bh(&net->nsid_lock); idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); spin_unlock_bh(&net->nsid_lock); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 7f6938405fa1..6ac919847ce6 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3426,7 +3426,7 @@ xmit_more: net_info_ratelimited("%s xmit error: %d\n", pkt_dev->odevname, ret); pkt_dev->errors++; - /* fallthru */ + /* fall through */ case NETDEV_TX_BUSY: /* Retry it next time */ refcount_dec(&(pkt_dev->skb->users)); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 37c7936124e6..0958c7be2c22 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -59,7 +59,7 @@ #include <net/rtnetlink.h> #include <net/net_namespace.h> -#define RTNL_MAX_TYPE 48 +#define RTNL_MAX_TYPE 49 #define RTNL_SLAVE_MAX_TYPE 36 struct rtnl_link { @@ -130,6 +130,12 @@ int rtnl_is_locked(void) } EXPORT_SYMBOL(rtnl_is_locked); +bool refcount_dec_and_rtnl_lock(refcount_t *r) +{ + return refcount_dec_and_mutex_lock(r, &rtnl_mutex); +} +EXPORT_SYMBOL(refcount_dec_and_rtnl_lock); + #ifdef CONFIG_PROVE_LOCKING bool lockdep_rtnl_is_held(void) { @@ -1016,7 +1022,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(4) /* IFLA_NEW_NETNSID */ + nla_total_size(4) /* IFLA_NEW_IFINDEX */ + nla_total_size(1) /* IFLA_PROTO_DOWN */ - + nla_total_size(4) /* IFLA_IF_NETNSID */ + + nla_total_size(4) /* IFLA_TARGET_NETNSID */ + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ + nla_total_size(4) /* IFLA_MIN_MTU */ @@ -1598,7 +1604,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, ifm->ifi_flags = dev_get_flags(dev); ifm->ifi_change = change; - if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_IF_NETNSID, tgt_netnsid)) + if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid)) goto nla_put_failure; if (nla_put_string(skb, IFLA_IFNAME, dev->name) || @@ -1737,7 +1743,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_XDP] = { .type = NLA_NESTED }, [IFLA_EVENT] = { .type = NLA_U32 }, [IFLA_GROUP] = { .type = NLA_U32 }, - [IFLA_IF_NETNSID] = { .type = NLA_S32 }, + [IFLA_TARGET_NETNSID] = { .type = NLA_S32 }, [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 }, [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 }, [IFLA_MIN_MTU] = { .type = NLA_U32 }, @@ -1845,7 +1851,15 @@ static bool link_dump_filtered(struct net_device *dev, return false; } -static struct net *get_target_net(struct sock *sk, int netnsid) +/** + * rtnl_get_net_ns_capable - Get netns if sufficiently privileged. + * @sk: netlink socket + * @netnsid: network namespace identifier + * + * Returns the network namespace identified by netnsid on success or an error + * pointer on failure. + */ +struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid) { struct net *net; @@ -1862,9 +1876,54 @@ static struct net *get_target_net(struct sock *sk, int netnsid) } return net; } +EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable); + +static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh, + bool strict_check, struct nlattr **tb, + struct netlink_ext_ack *extack) +{ + int hdrlen; + + if (strict_check) { + struct ifinfomsg *ifm; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { + NL_SET_ERR_MSG(extack, "Invalid header for link dump"); + return -EINVAL; + } + + ifm = nlmsg_data(nlh); + if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || + ifm->ifi_change) { + NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request"); + return -EINVAL; + } + if (ifm->ifi_index) { + NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps"); + return -EINVAL; + } + + return nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, + ifla_policy, extack); + } + + /* A hack to preserve kernel<->userspace interface. + * The correct header is ifinfomsg. It is consistent with rtnl_getlink. + * However, before Linux v3.9 the code here assumed rtgenmsg and that's + * what iproute2 < v3.9.0 used. + * We can detect the old iproute2. Even including the IFLA_EXT_MASK + * attribute, its netlink message is shorter than struct ifinfomsg. + */ + hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? + sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); + + return nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, extack); +} static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) { + struct netlink_ext_ack *extack = cb->extack; + const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); struct net *tgt_net = net; int h, s_h; @@ -1877,44 +1936,54 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) unsigned int flags = NLM_F_MULTI; int master_idx = 0; int netnsid = -1; - int err; - int hdrlen; + int err, i; s_h = cb->args[0]; s_idx = cb->args[1]; - /* A hack to preserve kernel<->userspace interface. - * The correct header is ifinfomsg. It is consistent with rtnl_getlink. - * However, before Linux v3.9 the code here assumed rtgenmsg and that's - * what iproute2 < v3.9.0 used. - * We can detect the old iproute2. Even including the IFLA_EXT_MASK - * attribute, its netlink message is shorter than struct ifinfomsg. - */ - hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ? - sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); - - if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, - ifla_policy, NULL) >= 0) { - if (tb[IFLA_IF_NETNSID]) { - netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); - tgt_net = get_target_net(skb->sk, netnsid); - if (IS_ERR(tgt_net)) - return PTR_ERR(tgt_net); - } - - if (tb[IFLA_EXT_MASK]) - ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); + err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack); + if (err < 0) { + if (cb->strict_check) + return err; - if (tb[IFLA_MASTER]) - master_idx = nla_get_u32(tb[IFLA_MASTER]); + goto walk_entries; + } - if (tb[IFLA_LINKINFO]) - kind_ops = linkinfo_to_kind_ops(tb[IFLA_LINKINFO]); + for (i = 0; i <= IFLA_MAX; ++i) { + if (!tb[i]) + continue; - if (master_idx || kind_ops) - flags |= NLM_F_DUMP_FILTERED; + /* new attributes should only be added with strict checking */ + switch (i) { + case IFLA_TARGET_NETNSID: + netnsid = nla_get_s32(tb[i]); + tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); + if (IS_ERR(tgt_net)) { + NL_SET_ERR_MSG(extack, "Invalid target network namespace id"); + return PTR_ERR(tgt_net); + } + break; + case IFLA_EXT_MASK: + ext_filter_mask = nla_get_u32(tb[i]); + break; + case IFLA_MASTER: + master_idx = nla_get_u32(tb[i]); + break; + case IFLA_LINKINFO: + kind_ops = linkinfo_to_kind_ops(tb[i]); + break; + default: + if (cb->strict_check) { + NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request"); + return -EINVAL; + } + } } + if (master_idx || kind_ops) + flags |= NLM_F_DUMP_FILTERED; + +walk_entries: for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { idx = 0; head = &tgt_net->dev_index_head[h]; @@ -1926,8 +1995,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, 0, - flags, + nlh->nlmsg_seq, 0, flags, ext_filter_mask, 0, NULL, 0, netnsid); @@ -1982,7 +2050,7 @@ EXPORT_SYMBOL(rtnl_link_get_net); * * 1. IFLA_NET_NS_PID * 2. IFLA_NET_NS_FD - * 3. IFLA_IF_NETNSID + * 3. IFLA_TARGET_NETNSID */ static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net, struct nlattr *tb[]) @@ -1992,10 +2060,10 @@ static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net, if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) return rtnl_link_get_net(src_net, tb); - if (!tb[IFLA_IF_NETNSID]) + if (!tb[IFLA_TARGET_NETNSID]) return get_net(src_net); - net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_IF_NETNSID])); + net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID])); if (!net) return ERR_PTR(-EINVAL); @@ -2036,13 +2104,13 @@ static int rtnl_ensure_unique_netns(struct nlattr *tb[], return -EOPNOTSUPP; } - if (tb[IFLA_IF_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])) + if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])) goto invalid_attr; - if (tb[IFLA_NET_NS_PID] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_FD])) + if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD])) goto invalid_attr; - if (tb[IFLA_NET_NS_FD] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_PID])) + if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID])) goto invalid_attr; return 0; @@ -2318,7 +2386,7 @@ static int do_setlink(const struct sk_buff *skb, if (err < 0) return err; - if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_IF_NETNSID]) { + if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev), tb, CAP_NET_ADMIN); if (IS_ERR(net)) { @@ -2761,9 +2829,9 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, if (tb[IFLA_IFNAME]) nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); - if (tb[IFLA_IF_NETNSID]) { - netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); - tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid); + if (tb[IFLA_TARGET_NETNSID]) { + netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); + tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); if (IS_ERR(tgt_net)) return PTR_ERR(tgt_net); } @@ -3177,9 +3245,9 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, if (err < 0) return err; - if (tb[IFLA_IF_NETNSID]) { - netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); - tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid); + if (tb[IFLA_TARGET_NETNSID]) { + netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); + tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); if (IS_ERR(tgt_net)) return PTR_ERR(tgt_net); } @@ -3264,13 +3332,13 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) { int idx; int s_idx = cb->family; + int type = cb->nlh->nlmsg_type - RTM_BASE; if (s_idx == 0) s_idx = 1; for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { struct rtnl_link **tab; - int type = cb->nlh->nlmsg_type-RTM_BASE; struct rtnl_link *link; rtnl_dumpit_func dumpit; @@ -3731,22 +3799,66 @@ out: } EXPORT_SYMBOL(ndo_dflt_fdb_dump); -static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) +static int valid_fdb_dump_strict(const struct nlmsghdr *nlh, + int *br_idx, int *brport_idx, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[NDA_MAX + 1]; + struct ndmsg *ndm; + int err, i; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { + NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request"); + return -EINVAL; + } + + ndm = nlmsg_data(nlh); + if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || + ndm->ndm_flags || ndm->ndm_type) { + NL_SET_ERR_MSG(extack, "Invalid values in header for fbd dump request"); + return -EINVAL; + } + + err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX, + NULL, extack); + if (err < 0) + return err; + + *brport_idx = ndm->ndm_ifindex; + for (i = 0; i <= NDA_MAX; ++i) { + if (!tb[i]) + continue; + + switch (i) { + case NDA_IFINDEX: + if (nla_len(tb[i]) != sizeof(u32)) { + NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request"); + return -EINVAL; + } + *brport_idx = nla_get_u32(tb[NDA_IFINDEX]); + break; + case NDA_MASTER: + if (nla_len(tb[i]) != sizeof(u32)) { + NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request"); + return -EINVAL; + } + *br_idx = nla_get_u32(tb[NDA_MASTER]); + break; + default: + NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request"); + return -EINVAL; + } + } + + return 0; +} + +static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh, + int *br_idx, int *brport_idx, + struct netlink_ext_ack *extack) { - struct net_device *dev; struct nlattr *tb[IFLA_MAX+1]; - struct net_device *br_dev = NULL; - const struct net_device_ops *ops = NULL; - const struct net_device_ops *cops = NULL; - struct ifinfomsg *ifm = nlmsg_data(cb->nlh); - struct net *net = sock_net(skb->sk); - struct hlist_head *head; - int brport_idx = 0; - int br_idx = 0; - int h, s_h; - int idx = 0, s_idx; - int err = 0; - int fidx = 0; + int err; /* A hack to preserve kernel<->userspace interface. * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0. @@ -3755,20 +3867,49 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) * Fortunately these sizes don't conflict with the size of ifinfomsg * with an optional attribute. */ - if (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) && - (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) + + if (nlmsg_len(nlh) != sizeof(struct ndmsg) && + (nlmsg_len(nlh) != sizeof(struct ndmsg) + nla_attr_size(sizeof(u32)))) { - err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, - IFLA_MAX, ifla_policy, NULL); + struct ifinfomsg *ifm; + + err = nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, + ifla_policy, extack); if (err < 0) { return -EINVAL; } else if (err == 0) { if (tb[IFLA_MASTER]) - br_idx = nla_get_u32(tb[IFLA_MASTER]); + *br_idx = nla_get_u32(tb[IFLA_MASTER]); } - brport_idx = ifm->ifi_index; + ifm = nlmsg_data(nlh); + *brport_idx = ifm->ifi_index; } + return 0; +} + +static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct net_device *dev; + struct net_device *br_dev = NULL; + const struct net_device_ops *ops = NULL; + const struct net_device_ops *cops = NULL; + struct net *net = sock_net(skb->sk); + struct hlist_head *head; + int brport_idx = 0; + int br_idx = 0; + int h, s_h; + int idx = 0, s_idx; + int err = 0; + int fidx = 0; + + if (cb->strict_check) + err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx, + cb->extack); + else + err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx, + cb->extack); + if (err < 0) + return err; if (br_idx) { br_dev = __dev_get_by_index(net, br_idx); @@ -3953,28 +4094,72 @@ nla_put_failure: } EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink); +static int valid_bridge_getlink_req(const struct nlmsghdr *nlh, + bool strict_check, u32 *filter_mask, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[IFLA_MAX+1]; + int err, i; + + if (strict_check) { + struct ifinfomsg *ifm; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { + NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump"); + return -EINVAL; + } + + ifm = nlmsg_data(nlh); + if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || + ifm->ifi_change || ifm->ifi_index) { + NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request"); + return -EINVAL; + } + + err = nlmsg_parse_strict(nlh, sizeof(struct ifinfomsg), tb, + IFLA_MAX, ifla_policy, extack); + } else { + err = nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, + IFLA_MAX, ifla_policy, extack); + } + if (err < 0) + return err; + + /* new attributes should only be added with strict checking */ + for (i = 0; i <= IFLA_MAX; ++i) { + if (!tb[i]) + continue; + + switch (i) { + case IFLA_EXT_MASK: + *filter_mask = nla_get_u32(tb[i]); + break; + default: + if (strict_check) { + NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request"); + return -EINVAL; + } + } + } + + return 0; +} + static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) { + const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); struct net_device *dev; int idx = 0; u32 portid = NETLINK_CB(cb->skb).portid; - u32 seq = cb->nlh->nlmsg_seq; + u32 seq = nlh->nlmsg_seq; u32 filter_mask = 0; int err; - if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) { - struct nlattr *extfilt; - - extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), - IFLA_EXT_MASK); - if (extfilt) { - if (nla_len(extfilt) < sizeof(filter_mask)) - return -EINVAL; - - filter_mask = nla_get_u32(extfilt); - } - } + err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask, + cb->extack); + if (err < 0 && cb->strict_check) + return err; rcu_read_lock(); for_each_netdev_rcu(net, dev) { @@ -4568,6 +4753,7 @@ static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) { + struct netlink_ext_ack *extack = cb->extack; int h, s_h, err, s_idx, s_idxattr, s_prividx; struct net *net = sock_net(skb->sk); unsigned int flags = NLM_F_MULTI; @@ -4584,13 +4770,32 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) cb->seq = net->dev_base_seq; - if (nlmsg_len(cb->nlh) < sizeof(*ifsm)) + if (nlmsg_len(cb->nlh) < sizeof(*ifsm)) { + NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); return -EINVAL; + } ifsm = nlmsg_data(cb->nlh); + + /* only requests using strict checks can pass data to influence + * the dump. The legacy exception is filter_mask. + */ + if (cb->strict_check) { + if (ifsm->pad1 || ifsm->pad2 || ifsm->ifindex) { + NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request"); + return -EINVAL; + } + if (nlmsg_attrlen(cb->nlh, sizeof(*ifsm))) { + NL_SET_ERR_MSG(extack, "Invalid attributes after stats header"); + return -EINVAL; + } + } + filter_mask = ifsm->filter_mask; - if (!filter_mask) + if (!filter_mask) { + NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump"); return -EINVAL; + } for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { idx = 0; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 428094b577fc..54b961de9538 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3381,64 +3381,6 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, } EXPORT_SYMBOL(skb_find_text); -/** - * skb_append_datato_frags - append the user data to a skb - * @sk: sock structure - * @skb: skb structure to be appended with user data. - * @getfrag: call back function to be used for getting the user data - * @from: pointer to user message iov - * @length: length of the iov message - * - * Description: This procedure append the user data in the fragment part - * of the skb if any page alloc fails user this procedure returns -ENOMEM - */ -int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, - int (*getfrag)(void *from, char *to, int offset, - int len, int odd, struct sk_buff *skb), - void *from, int length) -{ - int frg_cnt = skb_shinfo(skb)->nr_frags; - int copy; - int offset = 0; - int ret; - struct page_frag *pfrag = ¤t->task_frag; - - do { - /* Return error if we don't have space for new frag */ - if (frg_cnt >= MAX_SKB_FRAGS) - return -EMSGSIZE; - - if (!sk_page_frag_refill(sk, pfrag)) - return -ENOMEM; - - /* copy the user data to page */ - copy = min_t(int, length, pfrag->size - pfrag->offset); - - ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, - offset, copy, 0, skb); - if (ret < 0) - return -EFAULT; - - /* copy was successful so update the size parameters */ - skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, - copy); - frg_cnt++; - pfrag->offset += copy; - get_page(pfrag->page); - - skb->truesize += copy; - refcount_add(copy, &sk->sk_wmem_alloc); - skb->len += copy; - skb->data_len += copy; - offset += copy; - length -= copy; - - } while (length > 0); - - return 0; -} -EXPORT_SYMBOL(skb_append_datato_frags); - int skb_append_pagefrags(struct sk_buff *skb, struct page *page, int offset, size_t size) { diff --git a/net/core/sock.c b/net/core/sock.c index 3730eb855095..7e8796a6a089 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2317,7 +2317,7 @@ static void __lock_sock(struct sock *sk) finish_wait(&sk->sk_lock.wq, &wait); } -static void __release_sock(struct sock *sk) +void __release_sock(struct sock *sk) __releases(&sk->sk_lock.slock) __acquires(&sk->sk_lock.slock) { @@ -2332,7 +2332,7 @@ static void __release_sock(struct sock *sk) next = skb->next; prefetch(next); WARN_ON_ONCE(skb_dst_is_noref(skb)); - skb->next = NULL; + skb_mark_not_on_list(skb); sk_backlog_rcv(sk, skb); cond_resched(); diff --git a/net/core/xdp.c b/net/core/xdp.c index 89b6785cef2a..4b2b194f4f1f 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -94,11 +94,21 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) kfree(xa); } -static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) +void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) { struct xdp_mem_allocator *xa; int id = xdp_rxq->mem.id; + if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { + WARN(1, "Missing register, driver bug"); + return; + } + + if (xdp_rxq->mem.type != MEM_TYPE_PAGE_POOL && + xdp_rxq->mem.type != MEM_TYPE_ZERO_COPY) { + return; + } + if (id == 0) return; @@ -110,6 +120,7 @@ static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) mutex_unlock(&mem_id_lock); } +EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model); void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) { @@ -119,7 +130,7 @@ void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG"); - __xdp_rxq_info_unreg_mem_model(xdp_rxq); + xdp_rxq_info_unreg_mem_model(xdp_rxq); xdp_rxq->reg_state = REG_STATE_UNREGISTERED; xdp_rxq->dev = NULL; @@ -398,3 +409,41 @@ void xdp_attachment_setup(struct xdp_attachment_info *info, info->flags = bpf->flags; } EXPORT_SYMBOL_GPL(xdp_attachment_setup); + +struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) +{ + unsigned int metasize, totsize; + void *addr, *data_to_copy; + struct xdp_frame *xdpf; + struct page *page; + + /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */ + metasize = xdp_data_meta_unsupported(xdp) ? 0 : + xdp->data - xdp->data_meta; + totsize = xdp->data_end - xdp->data + metasize; + + if (sizeof(*xdpf) + totsize > PAGE_SIZE) + return NULL; + + page = dev_alloc_page(); + if (!page) + return NULL; + + addr = page_to_virt(page); + xdpf = addr; + memset(xdpf, 0, sizeof(*xdpf)); + + addr += sizeof(*xdpf); + data_to_copy = metasize ? xdp->data_meta : xdp->data; + memcpy(addr, data_to_copy, totsize); + + xdpf->data = addr + metasize; + xdpf->len = totsize - metasize; + xdpf->headroom = 0; + xdpf->metasize = metasize; + xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; + + xdp_return_buff(xdp); + return xdpf; +} +EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame); diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index bfd43e8f2c06..d0b3e69c6b39 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -1363,7 +1363,7 @@ static int dn_dev_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu" " %04hu %03d %02x %-10s %-7s %-7s\n", - dev->name ? dev->name : "???", + dev->name, dn_type2asc(dn_db->parms.mode), 0, 0, dn_db->t3, dn_db->parms.t3, diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 7f4534828f6c..a65d553e730d 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c @@ -29,6 +29,7 @@ #include <linux/keyctl.h> #include <linux/err.h> #include <linux/seq_file.h> +#include <linux/dns_resolver.h> #include <keys/dns_resolver-type.h> #include <keys/user-type.h> #include "internal.h" @@ -48,27 +49,86 @@ const struct cred *dns_resolver_cache; /* * Preparse instantiation data for a dns_resolver key. * - * The data must be a NUL-terminated string, with the NUL char accounted in - * datalen. + * For normal hostname lookups, the data must be a NUL-terminated string, with + * the NUL char accounted in datalen. * * If the data contains a '#' characters, then we take the clause after each * one to be an option of the form 'key=value'. The actual data of interest is * the string leading up to the first '#'. For instance: * * "ip1,ip2,...#foo=bar" + * + * For server list requests, the data must begin with a NUL char and be + * followed by a byte indicating the version of the data format. Version 1 + * looks something like (note this is packed): + * + * u8 Non-string marker (ie. 0) + * u8 Content (DNS_PAYLOAD_IS_*) + * u8 Version (e.g. 1) + * u8 Source of server list + * u8 Lookup status of server list + * u8 Number of servers + * foreach-server { + * __le16 Name length + * __le16 Priority (as per SRV record, low first) + * __le16 Weight (as per SRV record, higher first) + * __le16 Port + * u8 Source of address list + * u8 Lookup status of address list + * u8 Protocol (DNS_SERVER_PROTOCOL_*) + * u8 Number of addresses + * char[] Name (not NUL-terminated) + * foreach-address { + * u8 Family (DNS_ADDRESS_IS_*) + * union { + * u8[4] ipv4_addr + * u8[16] ipv6_addr + * } + * } + * } + * */ static int dns_resolver_preparse(struct key_preparsed_payload *prep) { + const struct dns_payload_header *bin; struct user_key_payload *upayload; unsigned long derrno; int ret; int datalen = prep->datalen, result_len = 0; const char *data = prep->data, *end, *opt; + if (datalen <= 1 || !data) + return -EINVAL; + + if (data[0] == 0) { + /* It may be a server list. */ + if (datalen <= sizeof(*bin)) + return -EINVAL; + + bin = (const struct dns_payload_header *)data; + kenter("[%u,%u],%u", bin->content, bin->version, datalen); + if (bin->content != DNS_PAYLOAD_IS_SERVER_LIST) { + pr_warn_ratelimited( + "dns_resolver: Unsupported content type (%u)\n", + bin->content); + return -EINVAL; + } + + if (bin->version != 1) { + pr_warn_ratelimited( + "dns_resolver: Unsupported server list version (%u)\n", + bin->version); + return -EINVAL; + } + + result_len = datalen; + goto store_result; + } + kenter("'%*.*s',%u", datalen, datalen, data, datalen); - if (datalen <= 1 || !data || data[datalen - 1] != '\0') + if (!data || data[datalen - 1] != '\0') return -EINVAL; datalen--; @@ -144,6 +204,7 @@ dns_resolver_preparse(struct key_preparsed_payload *prep) return 0; } +store_result: kdebug("store result"); prep->quotalen = result_len; diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c index 49da67034f29..76338c38738a 100644 --- a/net/dns_resolver/dns_query.c +++ b/net/dns_resolver/dns_query.c @@ -148,12 +148,9 @@ int dns_query(const char *type, const char *name, size_t namelen, if (_result) { ret = -ENOMEM; - *_result = kmalloc(len + 1, GFP_KERNEL); + *_result = kmemdup_nul(upayload->data, len, GFP_KERNEL); if (!*_result) goto put; - - memcpy(*_result, upayload->data, len); - (*_result)[len] = '\0'; } if (_expiry) diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index 4183e4ba27a5..48c41918fb35 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -38,6 +38,9 @@ config NET_DSA_TAG_DSA config NET_DSA_TAG_EDSA bool +config NET_DSA_TAG_GSWIP + bool + config NET_DSA_TAG_KSZ bool diff --git a/net/dsa/Makefile b/net/dsa/Makefile index 9e4d3536f977..6e721f7a2947 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -9,6 +9,7 @@ dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o dsa_core-$(CONFIG_NET_DSA_TAG_BRCM_PREPEND) += tag_brcm.o dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o +dsa_core-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o dsa_core-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o dsa_core-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o dsa_core-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 9f3209ff7ffd..a69c1790bbfc 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -52,6 +52,9 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = { #ifdef CONFIG_NET_DSA_TAG_EDSA [DSA_TAG_PROTO_EDSA] = &edsa_netdev_ops, #endif +#ifdef CONFIG_NET_DSA_TAG_GSWIP + [DSA_TAG_PROTO_GSWIP] = &gswip_netdev_ops, +#endif #ifdef CONFIG_NET_DSA_TAG_KSZ [DSA_TAG_PROTO_KSZ] = &ksz_netdev_ops, #endif @@ -70,6 +73,52 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = { [DSA_TAG_PROTO_NONE] = &none_ops, }; +const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops) +{ + const char *protocol_name[DSA_TAG_LAST] = { +#ifdef CONFIG_NET_DSA_TAG_BRCM + [DSA_TAG_PROTO_BRCM] = "brcm", +#endif +#ifdef CONFIG_NET_DSA_TAG_BRCM_PREPEND + [DSA_TAG_PROTO_BRCM_PREPEND] = "brcm-prepend", +#endif +#ifdef CONFIG_NET_DSA_TAG_DSA + [DSA_TAG_PROTO_DSA] = "dsa", +#endif +#ifdef CONFIG_NET_DSA_TAG_EDSA + [DSA_TAG_PROTO_EDSA] = "edsa", +#endif +#ifdef CONFIG_NET_DSA_TAG_GSWIP + [DSA_TAG_PROTO_GSWIP] = "gswip", +#endif +#ifdef CONFIG_NET_DSA_TAG_KSZ + [DSA_TAG_PROTO_KSZ] = "ksz", +#endif +#ifdef CONFIG_NET_DSA_TAG_LAN9303 + [DSA_TAG_PROTO_LAN9303] = "lan9303", +#endif +#ifdef CONFIG_NET_DSA_TAG_MTK + [DSA_TAG_PROTO_MTK] = "mtk", +#endif +#ifdef CONFIG_NET_DSA_TAG_QCA + [DSA_TAG_PROTO_QCA] = "qca", +#endif +#ifdef CONFIG_NET_DSA_TAG_TRAILER + [DSA_TAG_PROTO_TRAILER] = "trailer", +#endif + [DSA_TAG_PROTO_NONE] = "none", + }; + unsigned int i; + + BUILD_BUG_ON(ARRAY_SIZE(protocol_name) != DSA_TAG_LAST); + + for (i = 0; i < ARRAY_SIZE(dsa_device_ops); i++) + if (ops == dsa_device_ops[i]) + return protocol_name[i]; + + return protocol_name[DSA_TAG_PROTO_NONE]; +}; + const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol) { const struct dsa_device_ops *ops; diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 3964c6f7a7c0..9e4fd04ab53c 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -86,6 +86,7 @@ struct dsa_slave_priv { /* dsa.c */ const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol); bool dsa_schedule_work(struct work_struct *work); +const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops); /* legacy.c */ #if IS_ENABLED(CONFIG_NET_DSA_LEGACY) @@ -205,6 +206,9 @@ extern const struct dsa_device_ops dsa_netdev_ops; /* tag_edsa.c */ extern const struct dsa_device_ops edsa_netdev_ops; +/* tag_gswip.c */ +extern const struct dsa_device_ops gswip_netdev_ops; + /* tag_ksz.c */ extern const struct dsa_device_ops ksz_netdev_ops; diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c index 42a7b85b84e1..8aa92b09db76 100644 --- a/net/dsa/legacy.c +++ b/net/dsa/legacy.c @@ -392,8 +392,7 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd) } /* Drop our reference to the MDIO bus device */ - if (pd->chip[i].host_dev) - put_device(pd->chip[i].host_dev); + put_device(pd->chip[i].host_dev); } kfree(pd->chip); } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 1c45c1d6d241..3f840b6eea69 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1058,6 +1058,27 @@ static struct device_type dsa_type = { .name = "dsa", }; +static ssize_t tagging_show(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct net_device *dev = to_net_dev(d); + struct dsa_port *dp = dsa_slave_to_port(dev); + + return sprintf(buf, "%s\n", + dsa_tag_protocol_to_str(dp->cpu_dp->tag_ops)); +} +static DEVICE_ATTR_RO(tagging); + +static struct attribute *dsa_slave_attrs[] = { + &dev_attr_tagging.attr, + NULL +}; + +static const struct attribute_group dsa_group = { + .name = "dsa", + .attrs = dsa_slave_attrs, +}; + static void dsa_slave_phylink_validate(struct net_device *dev, unsigned long *supported, struct phylink_link_state *state) @@ -1353,8 +1374,14 @@ int dsa_slave_create(struct dsa_port *port) goto out_phy; } + ret = sysfs_create_group(&slave_dev->dev.kobj, &dsa_group); + if (ret) + goto out_unreg; + return 0; +out_unreg: + unregister_netdev(slave_dev); out_phy: rtnl_lock(); phylink_disconnect_phy(p->dp->pl); @@ -1378,6 +1405,7 @@ void dsa_slave_destroy(struct net_device *slave_dev) rtnl_unlock(); dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER); + sysfs_remove_group(&slave_dev->dev.kobj, &dsa_group); unregister_netdev(slave_dev); phylink_destroy(dp->pl); free_percpu(p->stats64); diff --git a/net/dsa/tag_gswip.c b/net/dsa/tag_gswip.c new file mode 100644 index 000000000000..49e9b73f1be3 --- /dev/null +++ b/net/dsa/tag_gswip.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel / Lantiq GSWIP V2.0 PMAC tag support + * + * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de> + */ + +#include <linux/bitops.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <net/dsa.h> + +#include "dsa_priv.h" + +#define GSWIP_TX_HEADER_LEN 4 + +/* special tag in TX path header */ +/* Byte 0 */ +#define GSWIP_TX_SLPID_SHIFT 0 /* source port ID */ +#define GSWIP_TX_SLPID_CPU 2 +#define GSWIP_TX_SLPID_APP1 3 +#define GSWIP_TX_SLPID_APP2 4 +#define GSWIP_TX_SLPID_APP3 5 +#define GSWIP_TX_SLPID_APP4 6 +#define GSWIP_TX_SLPID_APP5 7 + +/* Byte 1 */ +#define GSWIP_TX_CRCGEN_DIS BIT(7) +#define GSWIP_TX_DPID_SHIFT 0 /* destination group ID */ +#define GSWIP_TX_DPID_ELAN 0 +#define GSWIP_TX_DPID_EWAN 1 +#define GSWIP_TX_DPID_CPU 2 +#define GSWIP_TX_DPID_APP1 3 +#define GSWIP_TX_DPID_APP2 4 +#define GSWIP_TX_DPID_APP3 5 +#define GSWIP_TX_DPID_APP4 6 +#define GSWIP_TX_DPID_APP5 7 + +/* Byte 2 */ +#define GSWIP_TX_PORT_MAP_EN BIT(7) +#define GSWIP_TX_PORT_MAP_SEL BIT(6) +#define GSWIP_TX_LRN_DIS BIT(5) +#define GSWIP_TX_CLASS_EN BIT(4) +#define GSWIP_TX_CLASS_SHIFT 0 +#define GSWIP_TX_CLASS_MASK GENMASK(3, 0) + +/* Byte 3 */ +#define GSWIP_TX_DPID_EN BIT(0) +#define GSWIP_TX_PORT_MAP_SHIFT 1 +#define GSWIP_TX_PORT_MAP_MASK GENMASK(6, 1) + +#define GSWIP_RX_HEADER_LEN 8 + +/* special tag in RX path header */ +/* Byte 7 */ +#define GSWIP_RX_SPPID_SHIFT 4 +#define GSWIP_RX_SPPID_MASK GENMASK(6, 4) + +static struct sk_buff *gswip_tag_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct dsa_port *dp = dsa_slave_to_port(dev); + int err; + u8 *gswip_tag; + + err = skb_cow_head(skb, GSWIP_TX_HEADER_LEN); + if (err) + return NULL; + + skb_push(skb, GSWIP_TX_HEADER_LEN); + + gswip_tag = skb->data; + gswip_tag[0] = GSWIP_TX_SLPID_CPU; + gswip_tag[1] = GSWIP_TX_DPID_ELAN; + gswip_tag[2] = GSWIP_TX_PORT_MAP_EN | GSWIP_TX_PORT_MAP_SEL; + gswip_tag[3] = BIT(dp->index + GSWIP_TX_PORT_MAP_SHIFT) & GSWIP_TX_PORT_MAP_MASK; + gswip_tag[3] |= GSWIP_TX_DPID_EN; + + return skb; +} + +static struct sk_buff *gswip_tag_rcv(struct sk_buff *skb, + struct net_device *dev, + struct packet_type *pt) +{ + int port; + u8 *gswip_tag; + + if (unlikely(!pskb_may_pull(skb, GSWIP_RX_HEADER_LEN))) + return NULL; + + gswip_tag = skb->data - ETH_HLEN; + + /* Get source port information */ + port = (gswip_tag[7] & GSWIP_RX_SPPID_MASK) >> GSWIP_RX_SPPID_SHIFT; + skb->dev = dsa_master_find_slave(dev, 0, port); + if (!skb->dev) + return NULL; + + /* remove GSWIP tag */ + skb_pull_rcsum(skb, GSWIP_RX_HEADER_LEN); + + return skb; +} + +const struct dsa_device_ops gswip_netdev_ops = { + .xmit = gswip_tag_xmit, + .rcv = gswip_tag_rcv, +}; diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index e7857a8ac86d..d14226ecfde4 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -260,7 +260,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev, } sub_frag_mem_limit(fq->q.net, sum_truesize); - head->next = NULL; + skb_mark_not_on_list(head); head->dev = ldev; head->tstamp = fq->q.stamp; @@ -463,7 +463,6 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net) table[0].data = &ieee802154_lowpan->frags.high_thresh; table[0].extra1 = &ieee802154_lowpan->frags.low_thresh; - table[0].extra2 = &init_net.ieee802154_lowpan.frags.high_thresh; table[1].data = &ieee802154_lowpan->frags.low_thresh; table[1].extra2 = &ieee802154_lowpan->frags.high_thresh; table[2].data = &ieee802154_lowpan->frags.timeout; diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 4dd95cdd8070..c01fa791260d 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -461,9 +461,9 @@ static int ah4_err(struct sk_buff *skb, u32 info) return 0; if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) - ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0); + ipv4_update_pmtu(skb, net, info, 0, IPPROTO_AH); else - ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0); + ipv4_redirect(skb, net, 0, IPPROTO_AH); xfrm_state_put(x); return 0; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index e90c89ef8c08..850a6f13a082 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1255,6 +1255,8 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event, change_info = ptr; if (change_info->flags_changed & IFF_NOARP) neigh_changeaddr(&arp_tbl, dev); + if (!netif_carrier_ok(dev)) + neigh_carrier_down(&arp_tbl, dev); break; default: break; diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 82178cc69c96..777fa3b7fb13 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -1512,7 +1512,7 @@ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, * * Description: * Parse the packet's IP header looking for a CIPSO option. Returns a pointer - * to the start of the CIPSO option on success, NULL if one if not found. + * to the start of the CIPSO option on success, NULL if one is not found. * */ unsigned char *cipso_v4_optptr(const struct sk_buff *skb) @@ -1522,10 +1522,8 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb) int optlen; int taglen; - for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) { + for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 1; ) { switch (optptr[0]) { - case IPOPT_CIPSO: - return optptr; case IPOPT_END: return NULL; case IPOPT_NOOP: @@ -1534,6 +1532,11 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb) default: taglen = optptr[1]; } + if (!taglen || taglen > optlen) + return NULL; + if (optptr[0] == IPOPT_CIPSO) + return optptr; + optlen -= taglen; optptr += taglen; } diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index f915abff1350..300921417f89 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -42,7 +42,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len oif = sk->sk_bound_dev_if; saddr = inet->inet_saddr; if (ipv4_is_multicast(usin->sin_addr.s_addr)) { - if (!oif) + if (!oif || netif_index_is_l3_master(sock_net(sk), oif)) oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index ea4bd8a52422..d122ebbe5980 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -100,6 +100,15 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = { [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) }, [IFA_FLAGS] = { .type = NLA_U32 }, [IFA_RT_PRIORITY] = { .type = NLA_U32 }, + [IFA_TARGET_NETNSID] = { .type = NLA_S32 }, +}; + +struct inet_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; }; #define IN4_ADDR_HSIZE_SHIFT 8 @@ -773,7 +782,8 @@ static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft, } static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh, - __u32 *pvalid_lft, __u32 *pprefered_lft) + __u32 *pvalid_lft, __u32 *pprefered_lft, + struct netlink_ext_ack *extack) { struct nlattr *tb[IFA_MAX+1]; struct in_ifaddr *ifa; @@ -783,7 +793,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh, int err; err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy, - NULL); + extack); if (err < 0) goto errout; @@ -888,7 +898,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, ASSERT_RTNL(); - ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft); + ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft, extack); if (IS_ERR(ifa)) return PTR_ERR(ifa); @@ -1584,13 +1594,14 @@ static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp, } static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, - u32 portid, u32 seq, int event, unsigned int flags) + struct inet_fill_args *args) { struct ifaddrmsg *ifm; struct nlmsghdr *nlh; u32 preferred, valid; - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags); + nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm), + args->flags); if (!nlh) return -EMSGSIZE; @@ -1601,6 +1612,10 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, ifm->ifa_scope = ifa->ifa_scope; ifm->ifa_index = ifa->ifa_dev->dev->ifindex; + if (args->netnsid >= 0 && + nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) + goto nla_put_failure; + if (!(ifm->ifa_flags & IFA_F_PERMANENT)) { preferred = ifa->ifa_preferred_lft; valid = ifa->ifa_valid_lft; @@ -1645,9 +1660,71 @@ nla_put_failure: return -EMSGSIZE; } +static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh, + struct inet_fill_args *fillargs, + struct net **tgt_net, struct sock *sk, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[IFA_MAX+1]; + struct ifaddrmsg *ifm; + int err, i; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { + NL_SET_ERR_MSG(extack, "ipv4: Invalid header for address dump request"); + return -EINVAL; + } + + ifm = nlmsg_data(nlh); + if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) { + NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for address dump request"); + return -EINVAL; + } + if (ifm->ifa_index) { + NL_SET_ERR_MSG(extack, "ipv4: Filter by device index not supported for address dump"); + return -EINVAL; + } + + err = nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFA_MAX, + ifa_ipv4_policy, extack); + if (err < 0) + return err; + + for (i = 0; i <= IFA_MAX; ++i) { + if (!tb[i]) + continue; + + if (i == IFA_TARGET_NETNSID) { + struct net *net; + + fillargs->netnsid = nla_get_s32(tb[i]); + + net = rtnl_get_net_ns_capable(sk, fillargs->netnsid); + if (IS_ERR(net)) { + NL_SET_ERR_MSG(extack, "ipv4: Invalid target network namespace id"); + return PTR_ERR(net); + } + *tgt_net = net; + } else { + NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in dump request"); + return -EINVAL; + } + } + + return 0; +} + static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) { + const struct nlmsghdr *nlh = cb->nlh; + struct inet_fill_args fillargs = { + .portid = NETLINK_CB(cb->skb).portid, + .seq = nlh->nlmsg_seq, + .event = RTM_NEWADDR, + .flags = NLM_F_MULTI, + .netnsid = -1, + }; struct net *net = sock_net(skb->sk); + struct net *tgt_net = net; int h, s_h; int idx, s_idx; int ip_idx, s_ip_idx; @@ -1660,12 +1737,21 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) s_idx = idx = cb->args[1]; s_ip_idx = ip_idx = cb->args[2]; + if (cb->strict_check) { + int err; + + err = inet_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net, + skb->sk, cb->extack); + if (err < 0) + return err; + } + for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { idx = 0; - head = &net->dev_index_head[h]; + head = &tgt_net->dev_index_head[h]; rcu_read_lock(); - cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^ - net->dev_base_seq; + cb->seq = atomic_read(&tgt_net->ipv4.dev_addr_genid) ^ + tgt_net->dev_base_seq; hlist_for_each_entry_rcu(dev, head, index_hlist) { if (idx < s_idx) goto cont; @@ -1679,10 +1765,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) ifa = ifa->ifa_next, ip_idx++) { if (ip_idx < s_ip_idx) continue; - if (inet_fill_ifaddr(skb, ifa, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - RTM_NEWADDR, NLM_F_MULTI) < 0) { + if (inet_fill_ifaddr(skb, ifa, &fillargs) < 0) { rcu_read_unlock(); goto done; } @@ -1698,6 +1781,8 @@ done: cb->args[0] = h; cb->args[1] = idx; cb->args[2] = ip_idx; + if (fillargs.netnsid >= 0) + put_net(tgt_net); return skb->len; } @@ -1705,8 +1790,14 @@ done: static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, u32 portid) { + struct inet_fill_args fillargs = { + .portid = portid, + .seq = nlh ? nlh->nlmsg_seq : 0, + .event = event, + .flags = 0, + .netnsid = -1, + }; struct sk_buff *skb; - u32 seq = nlh ? nlh->nlmsg_seq : 0; int err = -ENOBUFS; struct net *net; @@ -1715,7 +1806,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, if (!skb) goto errout; - err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0); + err = inet_fill_ifaddr(skb, ifa, &fillargs); if (err < 0) { /* -EMSGSIZE implies BUG in inet_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); @@ -1995,6 +2086,7 @@ errout: static int inet_netconf_dump_devconf(struct sk_buff *skb, struct netlink_callback *cb) { + const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); int h, s_h; int idx, s_idx; @@ -2002,6 +2094,21 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb, struct in_device *in_dev; struct hlist_head *head; + if (cb->strict_check) { + struct netlink_ext_ack *extack = cb->extack; + struct netconfmsg *ncm; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) { + NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf dump request"); + return -EINVAL; + } + + if (nlmsg_attrlen(nlh, sizeof(*ncm))) { + NL_SET_ERR_MSG(extack, "ipv4: Invalid data after header in netconf dump request"); + return -EINVAL; + } + } + s_h = cb->args[0]; s_idx = idx = cb->args[1]; @@ -2021,7 +2128,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb, if (inet_netconf_fill_devconf(skb, dev->ifindex, &in_dev->cnf, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, + nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, NETCONFA_ALL) < 0) { @@ -2038,7 +2145,7 @@ cont: if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL, net->ipv4.devconf_all, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, + nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, NETCONFA_ALL) < 0) goto done; @@ -2049,7 +2156,7 @@ cont: if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT, net->ipv4.devconf_dflt, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, + nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, NETCONFA_ALL) < 0) goto done; diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 97689012b357..9e1c840596c5 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -683,12 +683,11 @@ static void esp_input_done_esn(struct crypto_async_request *base, int err) */ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) { - struct ip_esp_hdr *esph; struct crypto_aead *aead = x->data; struct aead_request *req; struct sk_buff *trailer; int ivlen = crypto_aead_ivsize(aead); - int elen = skb->len - sizeof(*esph) - ivlen; + int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen; int nfrags; int assoclen; int seqhilen; @@ -698,13 +697,13 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) struct scatterlist *sg; int err = -EINVAL; - if (!pskb_may_pull(skb, sizeof(*esph) + ivlen)) + if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) goto out; if (elen <= 0) goto out; - assoclen = sizeof(*esph); + assoclen = sizeof(struct ip_esp_hdr); seqhilen = 0; if (x->props.flags & XFRM_STATE_ESN) { @@ -820,9 +819,9 @@ static int esp4_err(struct sk_buff *skb, u32 info) return 0; if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) - ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0); + ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ESP); else - ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0); + ipv4_redirect(skb, net, 0, IPPROTO_ESP); xfrm_state_put(x); return 0; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 0113993e9b2c..0f1beceb47d5 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -315,6 +315,32 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb) return inet_select_addr(dev, ip_hdr(skb)->saddr, scope); } +bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev) +{ + bool dev_match = false; +#ifdef CONFIG_IP_ROUTE_MULTIPATH + int ret; + + for (ret = 0; ret < fi->fib_nhs; ret++) { + struct fib_nh *nh = &fi->fib_nh[ret]; + + if (nh->nh_dev == dev) { + dev_match = true; + break; + } else if (l3mdev_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) { + dev_match = true; + break; + } + } +#else + if (fi->fib_nh[0].nh_dev == dev) + dev_match = true; +#endif + + return dev_match; +} +EXPORT_SYMBOL_GPL(fib_info_nh_uses_dev); + /* Given (packet source, input interface) and optional (dst, oif, tos): * - (main) check, that source is valid i.e. not broadcast or our local * address. @@ -361,24 +387,8 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, (res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev))) goto e_inval; fib_combine_itag(itag, &res); - dev_match = false; - -#ifdef CONFIG_IP_ROUTE_MULTIPATH - for (ret = 0; ret < res.fi->fib_nhs; ret++) { - struct fib_nh *nh = &res.fi->fib_nh[ret]; - if (nh->nh_dev == dev) { - dev_match = true; - break; - } else if (l3mdev_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) { - dev_match = true; - break; - } - } -#else - if (FIB_RES_DEV(res) == dev) - dev_match = true; -#endif + dev_match = fib_info_nh_uses_dev(res.fi, dev); if (dev_match) { ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; return ret; @@ -792,8 +802,40 @@ errout: return err; } +int ip_valid_fib_dump_req(const struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) +{ + struct rtmsg *rtm; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) { + NL_SET_ERR_MSG(extack, "Invalid header for FIB dump request"); + return -EINVAL; + } + + rtm = nlmsg_data(nlh); + if (rtm->rtm_dst_len || rtm->rtm_src_len || rtm->rtm_tos || + rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope || + rtm->rtm_type) { + NL_SET_ERR_MSG(extack, "Invalid values in header for FIB dump request"); + return -EINVAL; + } + if (rtm->rtm_flags & ~(RTM_F_CLONED | RTM_F_PREFIX)) { + NL_SET_ERR_MSG(extack, "Invalid flags for FIB dump request"); + return -EINVAL; + } + + if (nlmsg_attrlen(nlh, sizeof(*rtm))) { + NL_SET_ERR_MSG(extack, "Invalid data after header in FIB dump request"); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ip_valid_fib_dump_req); + static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) { + const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); unsigned int h, s_h; unsigned int e = 0, s_e; @@ -801,8 +843,14 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) struct hlist_head *head; int dumped = 0, err; - if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) && - ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED) + if (cb->strict_check) { + err = ip_valid_fib_dump_req(nlh, cb->extack); + if (err < 0) + return err; + } + + if (nlmsg_len(nlh) >= sizeof(struct rtmsg) && + ((struct rtmsg *)nlmsg_data(nlh))->rtm_flags & RTM_F_CLONED) return skb->len; s_h = cb->args[0]; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 446204ca7406..b5c3937ca6ec 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -208,7 +208,6 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp) static void free_fib_info_rcu(struct rcu_head *head) { struct fib_info *fi = container_of(head, struct fib_info, rcu); - struct dst_metrics *m; change_nexthops(fi) { if (nexthop_nh->nh_dev) @@ -219,9 +218,8 @@ static void free_fib_info_rcu(struct rcu_head *head) rt_fibinfo_free(&nexthop_nh->nh_rth_input); } endfor_nexthops(fi); - m = fi->fib_metrics; - if (m != &dst_default_metrics && refcount_dec_and_test(&m->refcnt)) - kfree(m); + ip_fib_metrics_put(fi->fib_metrics); + kfree(fi); } @@ -797,8 +795,10 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_nh *nh, return -EINVAL; } dev = __dev_get_by_index(net, nh->nh_oif); - if (!dev) + if (!dev) { + NL_SET_ERR_MSG(extack, "Nexthop device required for onlink"); return -ENODEV; + } if (!(dev->flags & IFF_UP)) { NL_SET_ERR_MSG(extack, "Nexthop device is not up"); @@ -1018,13 +1018,6 @@ static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc) return true; } -static int -fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg) -{ - return ip_metrics_convert(fi->fib_net, cfg->fc_mx, cfg->fc_mx_len, - fi->fib_metrics->metrics); -} - struct fib_info *fib_create_info(struct fib_config *cfg, struct netlink_ext_ack *extack) { @@ -1082,16 +1075,14 @@ struct fib_info *fib_create_info(struct fib_config *cfg, fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); if (!fi) goto failure; - if (cfg->fc_mx) { - fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL); - if (unlikely(!fi->fib_metrics)) { - kfree(fi); - return ERR_PTR(err); - } - refcount_set(&fi->fib_metrics->refcnt, 1); - } else { - fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics; + fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx, + cfg->fc_mx_len); + if (unlikely(IS_ERR(fi->fib_metrics))) { + err = PTR_ERR(fi->fib_metrics); + kfree(fi); + return ERR_PTR(err); } + fib_info_cnt++; fi->fib_net = net; fi->fib_protocol = cfg->fc_protocol; @@ -1110,10 +1101,6 @@ struct fib_info *fib_create_info(struct fib_config *cfg, goto failure; } endfor_nexthops(fi) - err = fib_convert_metrics(fi, cfg); - if (err) - goto failure; - if (cfg->fc_mp) { #ifdef CONFIG_IP_ROUTE_MULTIPATH err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg, extack); diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index b798862b6be5..7efe740c06eb 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@ -86,13 +86,14 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, options = (__be32 *)(greh + 1); if (greh->flags & GRE_CSUM) { - if (skb_checksum_simple_validate(skb)) { + if (!skb_checksum_simple_validate(skb)) { + skb_checksum_try_convert(skb, IPPROTO_GRE, 0, + null_compute_pseudo); + } else if (csum_err) { *csum_err = true; return -EINVAL; } - skb_checksum_try_convert(skb, IPPROTO_GRE, 0, - null_compute_pseudo); options++; } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 695979b7ef6d..d832beed6e3a 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -1098,9 +1098,9 @@ void icmp_err(struct sk_buff *skb, u32 info) } if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) - ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ICMP, 0); + ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ICMP); else if (type == ICMP_REDIRECT) - ipv4_redirect(skb, net, 0, 0, IPPROTO_ICMP, 0); + ipv4_redirect(skb, net, 0, IPPROTO_ICMP); } /* diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index e7227128df2c..9b0158fa431f 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -260,8 +260,7 @@ out: spin_unlock(&qp->q.lock); out_rcu_unlock: rcu_read_unlock(); - if (head) - kfree_skb(head); + kfree_skb(head); ipq_put(qp); } @@ -382,7 +381,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) */ if (end < qp->q.len || ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) - goto err; + goto discard_qp; qp->q.flags |= INET_FRAG_LAST_IN; qp->q.len = end; } else { @@ -394,20 +393,20 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) if (end > qp->q.len) { /* Some bits beyond end -> corruption. */ if (qp->q.flags & INET_FRAG_LAST_IN) - goto err; + goto discard_qp; qp->q.len = end; } } if (end == offset) - goto err; + goto discard_qp; err = -ENOMEM; if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) - goto err; + goto discard_qp; err = pskb_trim_rcsum(skb, end - offset); if (err) - goto err; + goto discard_qp; /* Note : skb->rbnode and skb->dev share the same location. */ dev = skb->dev; @@ -423,6 +422,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) * We do the same here for IPv4 (and increment an snmp counter). */ + err = -EINVAL; /* Find out where to put this fragment. */ prev_tail = qp->q.fragments_tail; if (!prev_tail) @@ -431,7 +431,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) /* This is the common case: skb goes to the end. */ /* Detect and discard overlaps. */ if (offset < prev_tail->ip_defrag_offset + prev_tail->len) - goto discard_qp; + goto overlap; if (offset == prev_tail->ip_defrag_offset + prev_tail->len) ip4_frag_append_to_last_run(&qp->q, skb); else @@ -450,7 +450,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) FRAG_CB(skb1)->frag_run_len) rbn = &parent->rb_right; else /* Found an overlap with skb1. */ - goto discard_qp; + goto overlap; } while (*rbn); /* Here we have parent properly set, and rbn pointing to * one of its NULL left/right children. Insert skb. @@ -487,16 +487,18 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) skb->_skb_refdst = 0UL; err = ip_frag_reasm(qp, skb, prev_tail, dev); skb->_skb_refdst = orefdst; + if (err) + inet_frag_kill(&qp->q); return err; } skb_dst_drop(skb); return -EINPROGRESS; +overlap: + __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS); discard_qp: inet_frag_kill(&qp->q); - err = -EINVAL; - __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS); err: kfree_skb(skb); return err; @@ -621,7 +623,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, sub_frag_mem_limit(qp->q.net, head->truesize); *nextp = NULL; - head->next = NULL; + skb_mark_not_on_list(head); head->prev = NULL; head->dev = dev; head->tstamp = qp->q.stamp; @@ -820,7 +822,6 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net) table[0].data = &net->ipv4.frags.high_thresh; table[0].extra1 = &net->ipv4.frags.low_thresh; - table[0].extra2 = &init_net.ipv4.frags.high_thresh; table[1].data = &net->ipv4.frags.low_thresh; table[1].extra2 = &net->ipv4.frags.high_thresh; table[2].data = &net->ipv4.frags.timeout; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 8cce0e9ea08c..38befe829caf 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -232,22 +232,19 @@ static void gre_err(struct sk_buff *skb, u32 info) const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct tnl_ptk_info tpi; - bool csum_err = false; - if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), - iph->ihl * 4) < 0) { - if (!csum_err) /* ignore csum errors. */ - return; - } + if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP), + iph->ihl * 4) < 0) + return; if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { ipv4_update_pmtu(skb, dev_net(skb->dev), info, - skb->dev->ifindex, 0, IPPROTO_GRE, 0); + skb->dev->ifindex, IPPROTO_GRE); return; } if (type == ICMP_REDIRECT) { - ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0, - IPPROTO_GRE, 0); + ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, + IPPROTO_GRE); return; } diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 3196cf58f418..35a786c0aaa0 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -531,11 +531,7 @@ static void ip_sublist_rcv_finish(struct list_head *head) struct sk_buff *skb, *next; list_for_each_entry_safe(skb, next, head, list) { - list_del(&skb->list); - /* Handle ip{6}_forward case, as sch_direct_xmit have - * another kind of SKB-list usage (see validate_xmit_skb_list) - */ - skb->next = NULL; + skb_list_del_init(skb); dst_input(skb); } } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 9c4e72e9c60a..c09219e7f230 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -278,7 +278,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk, struct sk_buff *nskb = segs->next; int err; - segs->next = NULL; + skb_mark_not_on_list(segs); err = ip_fragment(net, sk, segs, mtu, ip_finish_output2); if (err && ret == 0) @@ -684,7 +684,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, skb = frag; frag = skb->next; - skb->next = NULL; + skb_mark_not_on_list(skb); } if (err == 0) { diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index f38cb21d773d..de31b302d69c 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -318,9 +318,9 @@ static int vti4_err(struct sk_buff *skb, u32 info) return 0; if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) - ipv4_update_pmtu(skb, net, info, 0, 0, protocol, 0); + ipv4_update_pmtu(skb, net, info, 0, protocol); else - ipv4_redirect(skb, net, 0, 0, protocol, 0); + ipv4_redirect(skb, net, 0, protocol); xfrm_state_put(x); return 0; diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index d97f4f2787f5..9119d012ba46 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -48,9 +48,9 @@ static int ipcomp4_err(struct sk_buff *skb, u32 info) return 0; if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) - ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0); + ipv4_update_pmtu(skb, net, info, 0, IPPROTO_COMP); else - ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0); + ipv4_redirect(skb, net, 0, IPPROTO_COMP); xfrm_state_put(x); return 0; diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index c891235b4966..e65287c27e3d 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -175,13 +175,12 @@ static int ipip_err(struct sk_buff *skb, u32 info) } if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { - ipv4_update_pmtu(skb, net, info, t->parms.link, 0, - iph->protocol, 0); + ipv4_update_pmtu(skb, net, info, t->parms.link, iph->protocol); goto out; } if (type == ICMP_REDIRECT) { - ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0); + ipv4_redirect(skb, net, t->parms.link, iph->protocol); goto out; } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 5660adcf7a04..91b0d5671649 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -2527,6 +2527,13 @@ errout_free: static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) { + if (cb->strict_check) { + int err = ip_valid_fib_dump_req(cb->nlh, cb->extack); + + if (err < 0) + return err; + } + return mr_rtm_dumproute(skb, cb, ipmr_mr_table_iter, _ipmr_fill_mroute, &mfc_unres_lock); } @@ -2710,6 +2717,31 @@ static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb) return true; } +static int ipmr_valid_dumplink(const struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) +{ + struct ifinfomsg *ifm; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { + NL_SET_ERR_MSG(extack, "ipv4: Invalid header for ipmr link dump"); + return -EINVAL; + } + + if (nlmsg_attrlen(nlh, sizeof(*ifm))) { + NL_SET_ERR_MSG(extack, "Invalid data after header in ipmr link dump"); + return -EINVAL; + } + + ifm = nlmsg_data(nlh); + if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || + ifm->ifi_change || ifm->ifi_index) { + NL_SET_ERR_MSG(extack, "Invalid values in header for ipmr link dump request"); + return -EINVAL; + } + + return 0; +} + static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); @@ -2718,6 +2750,13 @@ static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb) unsigned int e = 0, s_e; struct mr_table *mrt; + if (cb->strict_check) { + int err = ipmr_valid_dumplink(cb->nlh, cb->extack); + + if (err < 0) + return err; + } + s_t = cb->args[0]; s_e = cb->args[1]; diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c index 04311f7067e2..6d218f5a2e71 100644 --- a/net/ipv4/metrics.c +++ b/net/ipv4/metrics.c @@ -5,8 +5,8 @@ #include <net/net_namespace.h> #include <net/tcp.h> -int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len, - u32 *metrics) +static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, + int fc_mx_len, u32 *metrics) { bool ecn_ca = false; struct nlattr *nla; @@ -52,4 +52,28 @@ int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len, return 0; } -EXPORT_SYMBOL_GPL(ip_metrics_convert); + +struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx, + int fc_mx_len) +{ + struct dst_metrics *fib_metrics; + int err; + + if (!fc_mx) + return (struct dst_metrics *)&dst_default_metrics; + + fib_metrics = kzalloc(sizeof(*fib_metrics), GFP_KERNEL); + if (unlikely(!fib_metrics)) + return ERR_PTR(-ENOMEM); + + err = ip_metrics_convert(net, fc_mx, fc_mx_len, fib_metrics->metrics); + if (!err) { + refcount_set(&fib_metrics->refcnt, 1); + } else { + kfree(fib_metrics); + fib_metrics = ERR_PTR(err); + } + + return fib_metrics; +} +EXPORT_SYMBOL_GPL(ip_fib_metrics_init); diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index 12843c9ef142..0b10d8812828 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c @@ -36,7 +36,6 @@ static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4, const struct net_device *dev, u8 flags) { struct fib_result res; - bool dev_match; int ret __maybe_unused; if (fib_lookup(net, fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE)) @@ -46,21 +45,7 @@ static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4, if (res.type != RTN_LOCAL || !(flags & XT_RPFILTER_ACCEPT_LOCAL)) return false; } - dev_match = false; -#ifdef CONFIG_IP_ROUTE_MULTIPATH - for (ret = 0; ret < res.fi->fib_nhs; ret++) { - struct fib_nh *nh = &res.fi->fib_nh[ret]; - - if (nh->nh_dev == dev) { - dev_match = true; - break; - } - } -#else - if (FIB_RES_DEV(res) == dev) - dev_match = true; -#endif - return dev_match || flags & XT_RPFILTER_LOOSE; + return fib_info_nh_uses_dev(res.fi, dev) || flags & XT_RPFILTER_LOOSE; } static bool diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c index 6115bf1ff6f0..78a67f961d86 100644 --- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c @@ -264,7 +264,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb, return nf_nat_inet_fn(priv, skb, state); } -EXPORT_SYMBOL_GPL(nf_nat_ipv4_fn); static unsigned int nf_nat_ipv4_in(void *priv, struct sk_buff *skb, diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c index ad3aeff152ed..a9d5e013e555 100644 --- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c @@ -104,12 +104,26 @@ static int masq_device_event(struct notifier_block *this, return NOTIFY_DONE; } +static int inet_cmp(struct nf_conn *ct, void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + struct net_device *dev = ifa->ifa_dev->dev; + struct nf_conntrack_tuple *tuple; + + if (!device_cmp(ct, (void *)(long)dev->ifindex)) + return 0; + + tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; + + return ifa->ifa_address == tuple->dst.u3.ip; +} + static int masq_inet_event(struct notifier_block *this, unsigned long event, void *ptr) { struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev; - struct netdev_notifier_info info; + struct net *net = dev_net(idev->dev); /* The masq_dev_notifier will catch the case of the device going * down. So if the inetdev is dead and being destroyed we have @@ -119,8 +133,10 @@ static int masq_inet_event(struct notifier_block *this, if (idev->dead) return NOTIFY_DONE; - netdev_notifier_info_init(&info, idev->dev); - return masq_device_event(this, event, &info); + if (event == NETDEV_DOWN) + nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0); + + return NOTIFY_DONE; } static struct notifier_block masq_dev_notifier = { diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c index e50976e3c213..94eb25bc8d7e 100644 --- a/net/ipv4/netfilter/nft_fib_ipv4.c +++ b/net/ipv4/netfilter/nft_fib_ipv4.c @@ -76,10 +76,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, .flowi4_iif = LOOPBACK_IFINDEX, }; const struct net_device *oif; - struct net_device *found; -#ifdef CONFIG_IP_ROUTE_MULTIPATH - int i; -#endif + const struct net_device *found; /* * Do not set flowi4_oif, it restricts results (for example, asking @@ -146,25 +143,13 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, if (!oif) { found = FIB_RES_DEV(res); - goto ok; - } - -#ifdef CONFIG_IP_ROUTE_MULTIPATH - for (i = 0; i < res.fi->fib_nhs; i++) { - struct fib_nh *nh = &res.fi->fib_nh[i]; + } else { + if (!fib_info_nh_uses_dev(res.fi, oif)) + return; - if (nh->nh_dev == oif) { - found = nh->nh_dev; - goto ok; - } + found = oif; } - return; -#else - found = FIB_RES_DEV(res); - if (found != oif) - return; -#endif -ok: + switch (priv->result) { case NFT_FIB_RESULT_OIF: *dest = found->ifindex; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 8d7aaf118a30..7ccb5f87f70b 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -779,7 +779,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } if (ipv4_is_multicast(daddr)) { - if (!ipc.oif) + if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif)) ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 33df4d76db2d..8ca3eb06ba04 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -608,7 +608,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) tos |= RTO_ONLINK; if (ipv4_is_multicast(daddr)) { - if (!ipc.oif) + if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif)) ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 8501554e96a4..c0a9d26c06ce 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1041,17 +1041,15 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, } void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, - int oif, u32 mark, u8 protocol, int flow_flags) + int oif, u8 protocol) { const struct iphdr *iph = (const struct iphdr *) skb->data; struct flowi4 fl4; struct rtable *rt; - - if (!mark) - mark = IP4_REPLY_MARK(net, skb->mark); + u32 mark = IP4_REPLY_MARK(net, skb->mark); __build_flow_key(net, &fl4, NULL, iph, oif, - RT_TOS(iph->tos), protocol, mark, flow_flags); + RT_TOS(iph->tos), protocol, mark, 0); rt = __ip_route_output_key(net, &fl4); if (!IS_ERR(rt)) { __ip_rt_update_pmtu(rt, &fl4, mtu); @@ -1133,14 +1131,14 @@ out: EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); void ipv4_redirect(struct sk_buff *skb, struct net *net, - int oif, u32 mark, u8 protocol, int flow_flags) + int oif, u8 protocol) { const struct iphdr *iph = (const struct iphdr *) skb->data; struct flowi4 fl4; struct rtable *rt; __build_flow_key(net, &fl4, NULL, iph, oif, - RT_TOS(iph->tos), protocol, mark, flow_flags); + RT_TOS(iph->tos), protocol, 0, 0); rt = __ip_route_output_key(net, &fl4); if (!IS_ERR(rt)) { __ip_do_redirect(rt, skb, &fl4, false); @@ -1220,18 +1218,15 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt) src = ip_hdr(skb)->saddr; else { struct fib_result res; - struct flowi4 fl4; - struct iphdr *iph; - - iph = ip_hdr(skb); - - memset(&fl4, 0, sizeof(fl4)); - fl4.daddr = iph->daddr; - fl4.saddr = iph->saddr; - fl4.flowi4_tos = RT_TOS(iph->tos); - fl4.flowi4_oif = rt->dst.dev->ifindex; - fl4.flowi4_iif = skb->dev->ifindex; - fl4.flowi4_mark = skb->mark; + struct iphdr *iph = ip_hdr(skb); + struct flowi4 fl4 = { + .daddr = iph->daddr, + .saddr = iph->saddr, + .flowi4_tos = RT_TOS(iph->tos), + .flowi4_oif = rt->dst.dev->ifindex, + .flowi4_iif = skb->dev->ifindex, + .flowi4_mark = skb->mark, + }; rcu_read_lock(); if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0) @@ -1482,12 +1477,9 @@ void rt_del_uncached_list(struct rtable *rt) static void ipv4_dst_destroy(struct dst_entry *dst) { - struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); struct rtable *rt = (struct rtable *)dst; - if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) - kfree(p); - + ip_dst_metrics_put(dst); rt_del_uncached_list(rt); } @@ -1534,11 +1526,8 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, rt->rt_gateway = nh->nh_gw; rt->rt_uses_gateway = 1; } - dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true); - if (fi->fib_metrics != &dst_default_metrics) { - rt->dst._metrics |= DST_METRICS_REFCOUNTED; - refcount_inc(&fi->fib_metrics->refcnt); - } + ip_dst_init_metrics(&rt->dst, fi->fib_metrics); + #ifdef CONFIG_IP_ROUTE_CLASSID rt->dst.tclassid = nh->nh_tclassid; #endif @@ -2786,7 +2775,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, struct rtable *rt = NULL; struct sk_buff *skb; struct rtmsg *rtm; - struct flowi4 fl4; + struct flowi4 fl4 = {}; __be32 dst = 0; __be32 src = 0; kuid_t uid; @@ -2826,7 +2815,6 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, if (!skb) return -ENOBUFS; - memset(&fl4, 0, sizeof(fl4)); fl4.daddr = dst; fl4.saddr = src; fl4.flowi4_tos = rtm->rtm_tos; diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index c3387dfd725b..606f868d9f3f 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -88,7 +88,7 @@ u64 cookie_init_timestamp(struct request_sock *req) ts <<= TSBITS; ts |= options; } - return (u64)ts * (USEC_PER_SEC / TCP_TS_HZ); + return (u64)ts * (NSEC_PER_SEC / TCP_TS_HZ); } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 10c6246396cc..43ef83b2330e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1295,7 +1295,7 @@ new_segment: copy = size_goal; /* All packets are restored as if they have - * already been sent. skb_mstamp isn't set to + * already been sent. skb_mstamp_ns isn't set to * avoid wrong rtt estimation. */ if (tp->repair) @@ -1753,6 +1753,7 @@ static int tcp_zerocopy_receive(struct sock *sk, struct vm_area_struct *vma; struct sk_buff *skb = NULL; struct tcp_sock *tp; + int inq; int ret; if (address & (PAGE_SIZE - 1) || address != zc->address) @@ -1773,12 +1774,15 @@ static int tcp_zerocopy_receive(struct sock *sk, tp = tcp_sk(sk); seq = tp->copied_seq; - zc->length = min_t(u32, zc->length, tcp_inq(sk)); + inq = tcp_inq(sk); + zc->length = min_t(u32, zc->length, inq); zc->length &= ~(PAGE_SIZE - 1); - - zap_page_range(vma, address, zc->length); - - zc->recv_skip_hint = 0; + if (zc->length) { + zap_page_range(vma, address, zc->length); + zc->recv_skip_hint = 0; + } else { + zc->recv_skip_hint = inq; + } ret = 0; while (length + PAGE_SIZE <= zc->length) { if (zc->recv_skip_hint < PAGE_SIZE) { @@ -1801,8 +1805,17 @@ static int tcp_zerocopy_receive(struct sock *sk, frags++; } } - if (frags->size != PAGE_SIZE || frags->page_offset) + if (frags->size != PAGE_SIZE || frags->page_offset) { + int remaining = zc->recv_skip_hint; + + while (remaining && (frags->size != PAGE_SIZE || + frags->page_offset)) { + remaining -= frags->size; + frags++; + } + zc->recv_skip_hint -= remaining; break; + } ret = vm_insert_page(vma, address + length, skb_frag_page(frags)); if (ret) @@ -2403,16 +2416,10 @@ adjudge_to_death: sock_hold(sk); sock_orphan(sk); - /* It is the last release_sock in its life. It will remove backlog. */ - release_sock(sk); - - - /* Now socket is owned by kernel and we acquire BH lock - * to finish close. No need to check for user refs. - */ local_bh_disable(); bh_lock_sock(sk); - WARN_ON(sock_owned_by_user(sk)); + /* remove backlog if any, without releasing ownership. */ + __release_sock(sk); percpu_counter_inc(sk->sk_prot->orphan_count); @@ -2481,6 +2488,7 @@ adjudge_to_death: out: bh_unlock_sock(sk); local_bh_enable(); + release_sock(sk); sock_put(sk); } EXPORT_SYMBOL(tcp_close); @@ -2595,6 +2603,8 @@ int tcp_disconnect(struct sock *sk, int flags) tp->compressed_ack = 0; tp->bytes_sent = 0; tp->bytes_retrans = 0; + tp->duplicate_sack[0].start_seq = 0; + tp->duplicate_sack[0].end_seq = 0; tp->dsack_dups = 0; tp->reord_seen = 0; @@ -3894,8 +3904,8 @@ void __init tcp_init(void) init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare); init_net.ipv4.sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; - init_net.ipv4.sysctl_tcp_rmem[1] = 87380; - init_net.ipv4.sysctl_tcp_rmem[2] = max(87380, max_rshare); + init_net.ipv4.sysctl_tcp_rmem[1] = 131072; + init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare); pr_info("Hash tables configured (established %u bind %u)\n", tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 02ff2dde9609..a5786e3e2c16 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -128,6 +128,9 @@ static const u32 bbr_probe_rtt_mode_ms = 200; /* Skip TSO below the following bandwidth (bits/sec): */ static const int bbr_min_tso_rate = 1200000; +/* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck. */ +static const int bbr_pacing_marging_percent = 1; + /* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain * that will allow a smoothly increasing pacing rate that will double each RTT * and send the same number of packets per RTT that an un-paced, slow-starting @@ -208,12 +211,10 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain) { unsigned int mss = tcp_sk(sk)->mss_cache; - if (!tcp_needs_internal_pacing(sk)) - mss = tcp_mss_to_mtu(sk, mss); rate *= mss; rate *= gain; rate >>= BBR_SCALE; - rate *= USEC_PER_SEC; + rate *= USEC_PER_SEC / 100 * (100 - bbr_pacing_marging_percent); return rate >> BW_SCALE; } diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index ca61e2a659e7..cd4814f7e962 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -44,6 +44,7 @@ #include <linux/mm.h> #include <net/tcp.h> #include <linux/inet_diag.h> +#include "tcp_dctcp.h" #define DCTCP_MAX_ALPHA 1024U @@ -118,54 +119,6 @@ static u32 dctcp_ssthresh(struct sock *sk) return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); } -/* Minimal DCTP CE state machine: - * - * S: 0 <- last pkt was non-CE - * 1 <- last pkt was CE - */ - -static void dctcp_ce_state_0_to_1(struct sock *sk) -{ - struct dctcp *ca = inet_csk_ca(sk); - struct tcp_sock *tp = tcp_sk(sk); - - if (!ca->ce_state) { - /* State has changed from CE=0 to CE=1, force an immediate - * ACK to reflect the new CE state. If an ACK was delayed, - * send that first to reflect the prior CE state. - */ - if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) - __tcp_send_ack(sk, ca->prior_rcv_nxt); - inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; - } - - ca->prior_rcv_nxt = tp->rcv_nxt; - ca->ce_state = 1; - - tp->ecn_flags |= TCP_ECN_DEMAND_CWR; -} - -static void dctcp_ce_state_1_to_0(struct sock *sk) -{ - struct dctcp *ca = inet_csk_ca(sk); - struct tcp_sock *tp = tcp_sk(sk); - - if (ca->ce_state) { - /* State has changed from CE=1 to CE=0, force an immediate - * ACK to reflect the new CE state. If an ACK was delayed, - * send that first to reflect the prior CE state. - */ - if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) - __tcp_send_ack(sk, ca->prior_rcv_nxt); - inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; - } - - ca->prior_rcv_nxt = tp->rcv_nxt; - ca->ce_state = 0; - - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; -} - static void dctcp_update_alpha(struct sock *sk, u32 flags) { const struct tcp_sock *tp = tcp_sk(sk); @@ -230,12 +183,12 @@ static void dctcp_state(struct sock *sk, u8 new_state) static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) { + struct dctcp *ca = inet_csk_ca(sk); + switch (ev) { case CA_EVENT_ECN_IS_CE: - dctcp_ce_state_0_to_1(sk); - break; case CA_EVENT_ECN_NO_CE: - dctcp_ce_state_1_to_0(sk); + dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state); break; default: /* Don't care for the rest. */ diff --git a/net/ipv4/tcp_dctcp.h b/net/ipv4/tcp_dctcp.h new file mode 100644 index 000000000000..d69a77cbd0c7 --- /dev/null +++ b/net/ipv4/tcp_dctcp.h @@ -0,0 +1,40 @@ +#ifndef _TCP_DCTCP_H +#define _TCP_DCTCP_H + +static inline void dctcp_ece_ack_cwr(struct sock *sk, u32 ce_state) +{ + struct tcp_sock *tp = tcp_sk(sk); + + if (ce_state == 1) + tp->ecn_flags |= TCP_ECN_DEMAND_CWR; + else + tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; +} + +/* Minimal DCTP CE state machine: + * + * S: 0 <- last pkt was non-CE + * 1 <- last pkt was CE + */ +static inline void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt, + u32 *prior_rcv_nxt, u32 *ce_state) +{ + u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0; + + if (*ce_state != new_ce_state) { + /* CE state has changed, force an immediate ACK to + * reflect the new CE state. If an ACK was delayed, + * send that first to reflect the prior CE state. + */ + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { + dctcp_ece_ack_cwr(sk, *ce_state); + __tcp_send_ack(sk, *prior_rcv_nxt); + } + inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; + } + *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt; + *ce_state = new_ce_state; + dctcp_ece_ack_cwr(sk, new_ce_state); +} + +#endif diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 47e08c1b5bc3..188980c58f87 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -426,26 +426,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) } } -/* 3. Tuning rcvbuf, when connection enters established state. */ -static void tcp_fixup_rcvbuf(struct sock *sk) -{ - u32 mss = tcp_sk(sk)->advmss; - int rcvmem; - - rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) * - tcp_default_init_rwnd(mss); - - /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency - * Allow enough cushion so that sender is not limited by our window - */ - if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) - rcvmem <<= 2; - - if (sk->sk_rcvbuf < rcvmem) - sk->sk_rcvbuf = min(rcvmem, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); -} - -/* 4. Try to fixup all. It is made immediately after connection enters +/* 3. Try to fixup all. It is made immediately after connection enters * established state. */ void tcp_init_buffer_space(struct sock *sk) @@ -454,12 +435,10 @@ void tcp_init_buffer_space(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); int maxwin; - if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) - tcp_fixup_rcvbuf(sk); if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) tcp_sndbuf_expand(sk); - tp->rcvq_space.space = tp->rcv_wnd; + tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss); tcp_mstamp_refresh(tp); tp->rcvq_space.time = tp->tcp_mstamp; tp->rcvq_space.seq = tp->copied_seq; @@ -485,7 +464,7 @@ void tcp_init_buffer_space(struct sock *sk) tp->snd_cwnd_stamp = tcp_jiffies32; } -/* 5. Recalculate window clamp after socket hit its memory bounds. */ +/* 4. Recalculate window clamp after socket hit its memory bounds. */ static void tcp_clamp_window(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); @@ -1305,7 +1284,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, */ tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, start_seq, end_seq, dup_sack, pcount, - skb->skb_mstamp); + tcp_skb_timestamp_us(skb)); tcp_rate_skb_delivered(sk, skb, state->rate); if (skb == tp->lost_skb_hint) @@ -1580,7 +1559,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, TCP_SKB_CB(skb)->end_seq, dup_sack, tcp_skb_pcount(skb), - skb->skb_mstamp); + tcp_skb_timestamp_us(skb)); tcp_rate_skb_delivered(sk, skb, state->rate); if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) list_del_init(&skb->tcp_tsorted_anchor); @@ -3103,7 +3082,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, tp->retrans_out -= acked_pcount; flag |= FLAG_RETRANS_DATA_ACKED; } else if (!(sacked & TCPCB_SACKED_ACKED)) { - last_ackt = skb->skb_mstamp; + last_ackt = tcp_skb_timestamp_us(skb); WARN_ON_ONCE(last_ackt == 0); if (!first_ackt) first_ackt = last_ackt; @@ -3121,7 +3100,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, tp->delivered += acked_pcount; if (!tcp_skb_spurious_retrans(tp, skb)) tcp_rack_advance(tp, sacked, scb->end_seq, - skb->skb_mstamp); + tcp_skb_timestamp_us(skb)); } if (sacked & TCPCB_LOST) tp->lost_out -= acked_pcount; @@ -3215,7 +3194,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); } } else if (skb && rtt_update && sack_rtt_us >= 0 && - sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) { + sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, + tcp_skb_timestamp_us(skb))) { /* Do not re-arm RTO if the sack RTT is measured from data sent * after when the head was last (re)transmitted. Otherwise the * timeout may continue to extend in loss recovery. @@ -4199,6 +4179,17 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) tcp_sack_extend(tp->duplicate_sack, seq, end_seq); } +static void tcp_rcv_spurious_retrans(struct sock *sk, const struct sk_buff *skb) +{ + /* When the ACK path fails or drops most ACKs, the sender would + * timeout and spuriously retransmit the same segment repeatedly. + * The receiver remembers and reflects via DSACKs. Leverage the + * DSACK state and change the txhash to re-route speculatively. + */ + if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq) + sk_rethink_txhash(sk); +} + static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); @@ -4211,6 +4202,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; + tcp_rcv_spurious_retrans(sk, skb); if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) end_seq = tp->rcv_nxt; tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); @@ -4755,6 +4747,7 @@ queue_and_out: } if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { + tcp_rcv_spurious_retrans(sk, skb); /* A retransmit, 2nd most common case. Force an immediate ack. */ NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index cd426313a298..de47038afdf0 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -544,7 +544,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) BUG_ON(!skb); tcp_mstamp_refresh(tp); - delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp); + delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us); @@ -2551,7 +2551,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_tw_reuse = 2; cnt = tcp_hashinfo.ehash_mask + 1; - net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2; + net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2; net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo; net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 597dbd749f05..059b67af28b1 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -45,6 +45,22 @@ #include <trace/events/tcp.h> +/* Refresh clocks of a TCP socket, + * ensuring monotically increasing values. + */ +void tcp_mstamp_refresh(struct tcp_sock *tp) +{ + u64 val = tcp_clock_ns(); + + /* departure time for next data packet */ + if (val > tp->tcp_wstamp_ns) + tp->tcp_wstamp_ns = val; + + val = div_u64(val, NSEC_PER_USEC); + if (val > tp->tcp_mstamp) + tp->tcp_mstamp = val; +} + static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, int push_one, gfp_t gfp); @@ -179,21 +195,6 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); } - -u32 tcp_default_init_rwnd(u32 mss) -{ - /* Initial receive window should be twice of TCP_INIT_CWND to - * enable proper sending of new unsent data during fast recovery - * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a - * limit when mss is larger than 1460. - */ - u32 init_rwnd = TCP_INIT_CWND * 2; - - if (mss > 1460) - init_rwnd = max((1460 * init_rwnd) / mss, 2U); - return init_rwnd; -} - /* Determine a window scaling and initial window to offer. * Based on the assumption that the given amount of space * will be offered. Store the results in the tp structure. @@ -228,7 +229,10 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows) (*rcv_wnd) = min(space, MAX_TCP_WINDOW); else - (*rcv_wnd) = space; + (*rcv_wnd) = min_t(u32, space, U16_MAX); + + if (init_rcv_wnd) + *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); (*rcv_wscale) = 0; if (wscale_ok) { @@ -241,11 +245,6 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, (*rcv_wscale)++; } } - - if (!init_rcv_wnd) /* Use default unless specified otherwise */ - init_rcv_wnd = tcp_default_init_rwnd(mss); - *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); - /* Set the clamp no higher than max representable value */ (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp); } @@ -977,28 +976,34 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer) return HRTIMER_NORESTART; } -static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb) +static void tcp_internal_pacing(struct sock *sk) { - u64 len_ns; - u32 rate; - if (!tcp_needs_internal_pacing(sk)) return; - rate = sk->sk_pacing_rate; - if (!rate || rate == ~0U) - return; - - len_ns = (u64)skb->len * NSEC_PER_SEC; - do_div(len_ns, rate); hrtimer_start(&tcp_sk(sk)->pacing_timer, - ktime_add_ns(ktime_get(), len_ns), + ns_to_ktime(tcp_sk(sk)->tcp_wstamp_ns), HRTIMER_MODE_ABS_PINNED_SOFT); sock_hold(sk); } -static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb) +static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb) { - skb->skb_mstamp = tp->tcp_mstamp; + struct tcp_sock *tp = tcp_sk(sk); + + skb->skb_mstamp_ns = tp->tcp_wstamp_ns; + if (sk->sk_pacing_status != SK_PACING_NONE) { + u32 rate = sk->sk_pacing_rate; + + /* Original sch_fq does not pace first 10 MSS + * Note that tp->data_segs_out overflows after 2^32 packets, + * this is a minor annoyance. + */ + if (rate != ~0U && rate && tp->data_segs_out >= 10) { + tp->tcp_wstamp_ns += div_u64((u64)skb->len * NSEC_PER_SEC, rate); + + tcp_internal_pacing(sk); + } + } list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); } @@ -1045,7 +1050,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, if (unlikely(!skb)) return -ENOBUFS; } - skb->skb_mstamp = tp->tcp_mstamp; + skb->skb_mstamp_ns = tp->tcp_wstamp_ns; inet = inet_sk(sk); tcb = TCP_SKB_CB(skb); @@ -1137,7 +1142,6 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, tcp_event_data_sent(tp, sk); tp->data_segs_out += tcp_skb_pcount(skb); tp->bytes_sent += skb->len - tcp_header_size; - tcp_internal_pacing(sk, skb); } if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) @@ -1149,8 +1153,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); - /* Our usage of tstamp should remain private */ - skb->tstamp = 0; + /* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */ /* Cleanup our debris for IP stacks */ memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), @@ -1163,7 +1166,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, err = net_xmit_eval(err); } if (!err && oskb) { - tcp_update_skb_after_send(tp, oskb); + tcp_update_skb_after_send(sk, oskb); tcp_rate_skb_sent(sk, oskb); } return err; @@ -1966,7 +1969,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, head = tcp_rtx_queue_head(sk); if (!head) goto send_now; - age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp); + age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head)); /* If next ACK is likely to come too late (half srtt), do not defer */ if (age < (tp->srtt_us >> 4)) goto send_now; @@ -2312,7 +2315,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { /* "skb_mstamp" is used as a start point for the retransmit timer */ - tcp_update_skb_after_send(tp, skb); + tcp_update_skb_after_send(sk, skb); goto repair; /* Skip network transmission */ } @@ -2887,7 +2890,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) } tcp_skb_tsorted_restore(skb); if (!err) { - tcp_update_skb_after_send(tp, skb); + tcp_update_skb_after_send(sk, skb); tcp_rate_skb_sent(sk, skb); } } else { @@ -3205,10 +3208,10 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, memset(&opts, 0, sizeof(opts)); #ifdef CONFIG_SYN_COOKIES if (unlikely(req->cookie_ts)) - skb->skb_mstamp = cookie_init_timestamp(req); + skb->skb_mstamp_ns = cookie_init_timestamp(req); else #endif - skb->skb_mstamp = tcp_clock_us(); + skb->skb_mstamp_ns = tcp_clock_ns(); #ifdef CONFIG_TCP_MD5SIG rcu_read_lock(); @@ -3424,7 +3427,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); - syn->skb_mstamp = syn_data->skb_mstamp; + syn->skb_mstamp_ns = syn_data->skb_mstamp_ns; /* Now full SYN+DATA was cloned and sent (or not), * remove the SYN from the original skb (syn_data) diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c index 4dff40dad4dc..baed2186c7c6 100644 --- a/net/ipv4/tcp_rate.c +++ b/net/ipv4/tcp_rate.c @@ -55,8 +55,10 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb) * bandwidth estimate. */ if (!tp->packets_out) { - tp->first_tx_mstamp = skb->skb_mstamp; - tp->delivered_mstamp = skb->skb_mstamp; + u64 tstamp_us = tcp_skb_timestamp_us(skb); + + tp->first_tx_mstamp = tstamp_us; + tp->delivered_mstamp = tstamp_us; } TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp; @@ -88,13 +90,12 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, rs->is_app_limited = scb->tx.is_app_limited; rs->is_retrans = scb->sacked & TCPCB_RETRANS; + /* Record send time of most recently ACKed packet: */ + tp->first_tx_mstamp = tcp_skb_timestamp_us(skb); /* Find the duration of the "send phase" of this window: */ - rs->interval_us = tcp_stamp_us_delta( - skb->skb_mstamp, - scb->tx.first_tx_mstamp); + rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp, + scb->tx.first_tx_mstamp); - /* Record send time of most recently ACKed packet: */ - tp->first_tx_mstamp = skb->skb_mstamp; } /* Mark off the skb delivered once it's sacked to avoid being * used again when it's cumulatively acked. For acked packets diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index c81aadff769b..fdb715bdd2d1 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -50,7 +50,7 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk) s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd) { return tp->rack.rtt_us + reo_wnd - - tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp); + tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb)); } /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01): @@ -91,7 +91,8 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout) !(scb->sacked & TCPCB_SACKED_RETRANS)) continue; - if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp, + if (!tcp_rack_sent_after(tp->rack.mstamp, + tcp_skb_timestamp_us(skb), tp->rack.end_seq, scb->end_seq)) break; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 7fdf222a0bdf..61023d50cd60 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -360,7 +360,7 @@ static void tcp_probe_timer(struct sock *sk) */ start_ts = tcp_skb_timestamp(skb); if (!start_ts) - skb->skb_mstamp = tp->tcp_mstamp; + skb->skb_mstamp_ns = tp->tcp_wstamp_ns; else if (icsk->icsk_user_timeout && (s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout) goto abort; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index c32a4c16b7ff..cf8252d05a01 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1042,7 +1042,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } if (ipv4_is_multicast(daddr)) { - if (!ipc.oif) + if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif)) ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; @@ -1889,7 +1889,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) return 0; } -static DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key); +DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key); void udp_encap_enable(void) { static_branch_enable(&udp_encap_needed_key); diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 0c0522b79b43..802f2bc00d69 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -405,7 +405,7 @@ static struct sk_buff *udp4_gro_receive(struct list_head *head, { struct udphdr *uh = udp_gro_udphdr(skb); - if (unlikely(!uh)) + if (unlikely(!uh) || !static_branch_unlikely(&udp_encap_needed_key)) goto flush; /* Don't bother verifying checksum if we're going to flush anyway. */ diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c63ccce6425f..2496b12bf721 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -666,6 +666,7 @@ errout: static int inet6_netconf_dump_devconf(struct sk_buff *skb, struct netlink_callback *cb) { + const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); int h, s_h; int idx, s_idx; @@ -673,6 +674,21 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb, struct inet6_dev *idev; struct hlist_head *head; + if (cb->strict_check) { + struct netlink_ext_ack *extack = cb->extack; + struct netconfmsg *ncm; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) { + NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request"); + return -EINVAL; + } + + if (nlmsg_attrlen(nlh, sizeof(*ncm))) { + NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request"); + return -EINVAL; + } + } + s_h = cb->args[0]; s_idx = idx = cb->args[1]; @@ -692,7 +708,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb, if (inet6_netconf_fill_devconf(skb, dev->ifindex, &idev->cnf, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, + nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, NETCONFA_ALL) < 0) { @@ -709,7 +725,7 @@ cont: if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL, net->ipv6.devconf_all, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, + nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, NETCONFA_ALL) < 0) goto done; @@ -720,7 +736,7 @@ cont: if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT, net->ipv6.devconf_dflt, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, + nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, NETCONFA_ALL) < 0) goto done; @@ -997,6 +1013,7 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg, if (addr_type == IPV6_ADDR_ANY || addr_type & IPV6_ADDR_MULTICAST || (!(idev->dev->flags & IFF_LOOPBACK) && + !netif_is_l3_master(idev->dev) && addr_type & IPV6_ADDR_LOOPBACK)) return ERR_PTR(-EADDRNOTAVAIL); @@ -4489,6 +4506,7 @@ static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = { [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) }, [IFA_FLAGS] = { .len = sizeof(u32) }, [IFA_RT_PRIORITY] = { .len = sizeof(u32) }, + [IFA_TARGET_NETNSID] = { .type = NLA_S32 }, }; static int @@ -4791,19 +4809,39 @@ static inline int inet6_ifaddr_msgsize(void) + nla_total_size(4) /* IFA_RT_PRIORITY */; } +enum addr_type_t { + UNICAST_ADDR, + MULTICAST_ADDR, + ANYCAST_ADDR, +}; + +struct inet6_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + enum addr_type_t type; +}; + static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, - u32 portid, u32 seq, int event, unsigned int flags) + struct inet6_fill_args *args) { struct nlmsghdr *nlh; u32 preferred, valid; - nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags); + nlh = nlmsg_put(skb, args->portid, args->seq, args->event, + sizeof(struct ifaddrmsg), args->flags); if (!nlh) return -EMSGSIZE; put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), ifa->idev->dev->ifindex); + if (args->netnsid >= 0 && + nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) + goto error; + if (!((ifa->flags&IFA_F_PERMANENT) && (ifa->prefered_lft == INFINITY_LIFE_TIME))) { preferred = ifa->prefered_lft; @@ -4853,7 +4891,7 @@ error: } static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca, - u32 portid, u32 seq, int event, u16 flags) + struct inet6_fill_args *args) { struct nlmsghdr *nlh; u8 scope = RT_SCOPE_UNIVERSE; @@ -4862,10 +4900,15 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca, if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE) scope = RT_SCOPE_SITE; - nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags); + nlh = nlmsg_put(skb, args->portid, args->seq, args->event, + sizeof(struct ifaddrmsg), args->flags); if (!nlh) return -EMSGSIZE; + if (args->netnsid >= 0 && + nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) + return -EMSGSIZE; + put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 || put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp, @@ -4879,7 +4922,7 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca, } static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca, - u32 portid, u32 seq, int event, unsigned int flags) + struct inet6_fill_args *args) { struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt); int ifindex = dev ? dev->ifindex : 1; @@ -4889,10 +4932,15 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca, if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE) scope = RT_SCOPE_SITE; - nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags); + nlh = nlmsg_put(skb, args->portid, args->seq, args->event, + sizeof(struct ifaddrmsg), args->flags); if (!nlh) return -EMSGSIZE; + if (args->netnsid >= 0 && + nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) + return -EMSGSIZE; + put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 || put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp, @@ -4905,16 +4953,11 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca, return 0; } -enum addr_type_t { - UNICAST_ADDR, - MULTICAST_ADDR, - ANYCAST_ADDR, -}; - /* called with rcu_read_lock() */ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, - struct netlink_callback *cb, enum addr_type_t type, - int s_ip_idx, int *p_ip_idx) + struct netlink_callback *cb, + int s_ip_idx, int *p_ip_idx, + struct inet6_fill_args *fillargs) { struct ifmcaddr6 *ifmca; struct ifacaddr6 *ifaca; @@ -4922,19 +4965,16 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, int ip_idx = *p_ip_idx; read_lock_bh(&idev->lock); - switch (type) { + switch (fillargs->type) { case UNICAST_ADDR: { struct inet6_ifaddr *ifa; + fillargs->event = RTM_NEWADDR; /* unicast address incl. temp addr */ list_for_each_entry(ifa, &idev->addr_list, if_list) { if (++ip_idx < s_ip_idx) continue; - err = inet6_fill_ifaddr(skb, ifa, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - RTM_NEWADDR, - NLM_F_MULTI); + err = inet6_fill_ifaddr(skb, ifa, fillargs); if (err < 0) break; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); @@ -4942,31 +4982,26 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, break; } case MULTICAST_ADDR: + fillargs->event = RTM_GETMULTICAST; + /* multicast address */ for (ifmca = idev->mc_list; ifmca; ifmca = ifmca->next, ip_idx++) { if (ip_idx < s_ip_idx) continue; - err = inet6_fill_ifmcaddr(skb, ifmca, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - RTM_GETMULTICAST, - NLM_F_MULTI); + err = inet6_fill_ifmcaddr(skb, ifmca, fillargs); if (err < 0) break; } break; case ANYCAST_ADDR: + fillargs->event = RTM_GETANYCAST; /* anycast address */ for (ifaca = idev->ac_list; ifaca; ifaca = ifaca->aca_next, ip_idx++) { if (ip_idx < s_ip_idx) continue; - err = inet6_fill_ifacaddr(skb, ifaca, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - RTM_GETANYCAST, - NLM_F_MULTI); + err = inet6_fill_ifacaddr(skb, ifaca, fillargs); if (err < 0) break; } @@ -4979,10 +5014,71 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, return err; } +static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh, + struct inet6_fill_args *fillargs, + struct net **tgt_net, struct sock *sk, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[IFA_MAX+1]; + struct ifaddrmsg *ifm; + int err, i; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { + NL_SET_ERR_MSG_MOD(extack, "Invalid header for address dump request"); + return -EINVAL; + } + + ifm = nlmsg_data(nlh); + if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) { + NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address dump request"); + return -EINVAL; + } + if (ifm->ifa_index) { + NL_SET_ERR_MSG_MOD(extack, "Filter by device index not supported for address dump"); + return -EINVAL; + } + + err = nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFA_MAX, + ifa_ipv6_policy, extack); + if (err < 0) + return err; + + for (i = 0; i <= IFA_MAX; ++i) { + if (!tb[i]) + continue; + + if (i == IFA_TARGET_NETNSID) { + struct net *net; + + fillargs->netnsid = nla_get_s32(tb[i]); + net = rtnl_get_net_ns_capable(sk, fillargs->netnsid); + if (IS_ERR(net)) { + NL_SET_ERR_MSG_MOD(extack, "Invalid target network namespace id"); + return PTR_ERR(net); + } + *tgt_net = net; + } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request"); + return -EINVAL; + } + } + + return 0; +} + static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, enum addr_type_t type) { + const struct nlmsghdr *nlh = cb->nlh; + struct inet6_fill_args fillargs = { + .portid = NETLINK_CB(cb->skb).portid, + .seq = cb->nlh->nlmsg_seq, + .flags = NLM_F_MULTI, + .netnsid = -1, + .type = type, + }; struct net *net = sock_net(skb->sk); + struct net *tgt_net = net; int h, s_h; int idx, ip_idx; int s_idx, s_ip_idx; @@ -4994,11 +5090,20 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, s_idx = idx = cb->args[1]; s_ip_idx = ip_idx = cb->args[2]; + if (cb->strict_check) { + int err; + + err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net, + skb->sk, cb->extack); + if (err < 0) + return err; + } + rcu_read_lock(); - cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq; + cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq; for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { idx = 0; - head = &net->dev_index_head[h]; + head = &tgt_net->dev_index_head[h]; hlist_for_each_entry_rcu(dev, head, index_hlist) { if (idx < s_idx) goto cont; @@ -5009,8 +5114,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, if (!idev) goto cont; - if (in6_dump_addrs(idev, skb, cb, type, - s_ip_idx, &ip_idx) < 0) + if (in6_dump_addrs(idev, skb, cb, s_ip_idx, &ip_idx, + &fillargs) < 0) goto done; cont: idx++; @@ -5021,6 +5126,8 @@ done: cb->args[0] = h; cb->args[1] = idx; cb->args[2] = ip_idx; + if (fillargs.netnsid >= 0) + put_net(tgt_net); return skb->len; } @@ -5051,6 +5158,14 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); + struct inet6_fill_args fillargs = { + .portid = NETLINK_CB(in_skb).portid, + .seq = nlh->nlmsg_seq, + .event = RTM_NEWADDR, + .flags = 0, + .netnsid = -1, + }; + struct net *tgt_net = net; struct ifaddrmsg *ifm; struct nlattr *tb[IFA_MAX+1]; struct in6_addr *addr = NULL, *peer; @@ -5064,15 +5179,24 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh, if (err < 0) return err; + if (tb[IFA_TARGET_NETNSID]) { + fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]); + + tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk, + fillargs.netnsid); + if (IS_ERR(tgt_net)) + return PTR_ERR(tgt_net); + } + addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer); if (!addr) return -EINVAL; ifm = nlmsg_data(nlh); if (ifm->ifa_index) - dev = dev_get_by_index(net, ifm->ifa_index); + dev = dev_get_by_index(tgt_net, ifm->ifa_index); - ifa = ipv6_get_ifaddr(net, addr, dev, 1); + ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1); if (!ifa) { err = -EADDRNOTAVAIL; goto errout; @@ -5084,20 +5208,22 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh, goto errout_ifa; } - err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, RTM_NEWADDR, 0); + err = inet6_fill_ifaddr(skb, ifa, &fillargs); if (err < 0) { /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout_ifa; } - err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); + err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid); errout_ifa: in6_ifa_put(ifa); errout: if (dev) dev_put(dev); + if (fillargs.netnsid >= 0) + put_net(tgt_net); + return err; } @@ -5105,13 +5231,20 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) { struct sk_buff *skb; struct net *net = dev_net(ifa->idev->dev); + struct inet6_fill_args fillargs = { + .portid = 0, + .seq = 0, + .event = event, + .flags = 0, + .netnsid = -1, + }; int err = -ENOBUFS; skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC); if (!skb) goto errout; - err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0); + err = inet6_fill_ifaddr(skb, ifa, &fillargs); if (err < 0) { /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */ WARN_ON(err == -EMSGSIZE); @@ -5527,6 +5660,31 @@ nla_put_failure: return -EMSGSIZE; } +static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) +{ + struct ifinfomsg *ifm; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { + NL_SET_ERR_MSG_MOD(extack, "Invalid header for link dump request"); + return -EINVAL; + } + + if (nlmsg_attrlen(nlh, sizeof(*ifm))) { + NL_SET_ERR_MSG_MOD(extack, "Invalid data after header"); + return -EINVAL; + } + + ifm = nlmsg_data(nlh); + if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || + ifm->ifi_change || ifm->ifi_index) { + NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for dump request"); + return -EINVAL; + } + + return 0; +} + static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); @@ -5536,6 +5694,16 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) struct inet6_dev *idev; struct hlist_head *head; + /* only requests using strict checking can pass data to + * influence the dump + */ + if (cb->strict_check) { + int err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack); + + if (err < 0) + return err; + } + s_h = cb->args[0]; s_idx = cb->args[1]; diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index 1d6ced37ad71..0d1ee82ee55b 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -458,20 +458,52 @@ static int ip6addrlbl_fill(struct sk_buff *skb, return 0; } +static int ip6addrlbl_valid_dump_req(const struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) +{ + struct ifaddrlblmsg *ifal; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifal))) { + NL_SET_ERR_MSG_MOD(extack, "Invalid header for address label dump request"); + return -EINVAL; + } + + ifal = nlmsg_data(nlh); + if (ifal->__ifal_reserved || ifal->ifal_prefixlen || + ifal->ifal_flags || ifal->ifal_index || ifal->ifal_seq) { + NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address label dump request"); + return -EINVAL; + } + + if (nlmsg_attrlen(nlh, sizeof(*ifal))) { + NL_SET_ERR_MSG_MOD(extack, "Invalid data after header for address label dump requewst"); + return -EINVAL; + } + + return 0; +} + static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb) { + const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); struct ip6addrlbl_entry *p; int idx = 0, s_idx = cb->args[0]; int err; + if (cb->strict_check) { + err = ip6addrlbl_valid_dump_req(nlh, cb->extack); + if (err < 0) + return err; + } + rcu_read_lock(); hlist_for_each_entry_rcu(p, &net->ipv6.ip6addrlbl_table.head, list) { if (idx >= s_idx) { err = ip6addrlbl_fill(skb, p, net->ipv6.ip6addrlbl_table.seq, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, + nlh->nlmsg_seq, RTM_NEWADDRLABEL, NLM_F_MULTI); if (err < 0) diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 9a4261e50272..e9c8cfdf4b4c 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -209,6 +209,7 @@ lookup_protocol: np->hop_limit = -1; np->mcast_hops = IPV6_DEFAULT_MCASTHOPS; np->mc_loop = 1; + np->mc_all = 1; np->pmtudisc = IPV6_PMTUDISC_WANT; np->repflow = net->ipv6.sysctl.flowlabel_reflect; sk->sk_ipv6only = net->ipv6.sysctl.bindv6only; @@ -467,12 +468,10 @@ void inet6_destroy_sock(struct sock *sk) /* Release rx options */ skb = xchg(&np->pktoptions, NULL); - if (skb) - kfree_skb(skb); + kfree_skb(skb); skb = xchg(&np->rxpmtu, NULL); - if (skb) - kfree_skb(skb); + kfree_skb(skb); /* Free flowlabels */ fl6_free_socklist(sk); diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 88a7579c23bd..63b2b66f9dfa 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -601,12 +601,11 @@ static void esp_input_done_esn(struct crypto_async_request *base, int err) static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) { - struct ip_esp_hdr *esph; struct crypto_aead *aead = x->data; struct aead_request *req; struct sk_buff *trailer; int ivlen = crypto_aead_ivsize(aead); - int elen = skb->len - sizeof(*esph) - ivlen; + int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen; int nfrags; int assoclen; int seqhilen; @@ -616,7 +615,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) u8 *iv; struct scatterlist *sg; - if (!pskb_may_pull(skb, sizeof(*esph) + ivlen)) { + if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) { ret = -EINVAL; goto out; } @@ -626,7 +625,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) goto out; } - assoclen = sizeof(*esph); + assoclen = sizeof(struct ip_esp_hdr); seqhilen = 0; if (x->props.flags & XFRM_STATE_ESN) { diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index cbe46175bb59..0783af11b0b7 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -29,6 +29,7 @@ #include <linux/list.h> #include <linux/slab.h> +#include <net/ip.h> #include <net/ipv6.h> #include <net/ndisc.h> #include <net/addrconf.h> @@ -46,6 +47,7 @@ struct fib6_cleaner { int (*func)(struct fib6_info *, void *arg); int sernum; void *arg; + bool skip_notify; }; #ifdef CONFIG_IPV6_SUBTREES @@ -160,8 +162,6 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags) } INIT_LIST_HEAD(&f6i->fib6_siblings); - f6i->fib6_metrics = (struct dst_metrics *)&dst_default_metrics; - atomic_inc(&f6i->fib6_ref); return f6i; @@ -171,7 +171,6 @@ void fib6_info_destroy_rcu(struct rcu_head *head) { struct fib6_info *f6i = container_of(head, struct fib6_info, rcu); struct rt6_exception_bucket *bucket; - struct dst_metrics *m; WARN_ON(f6i->fib6_node); @@ -205,9 +204,7 @@ void fib6_info_destroy_rcu(struct rcu_head *head) if (f6i->fib6_nh.nh_dev) dev_put(f6i->fib6_nh.nh_dev); - m = f6i->fib6_metrics; - if (m != &dst_default_metrics && refcount_dec_and_test(&m->refcnt)) - kfree(m); + ip_fib_metrics_put(f6i->fib6_metrics); kfree(f6i); } @@ -570,6 +567,7 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) { + const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); unsigned int h, s_h; unsigned int e = 0, s_e; @@ -579,6 +577,13 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) struct hlist_head *head; int res = 0; + if (cb->strict_check) { + int err = ip_valid_fib_dump_req(nlh, cb->extack); + + if (err < 0) + return err; + } + s_h = cb->args[0]; s_e = cb->args[1]; @@ -1954,6 +1959,7 @@ static int fib6_clean_node(struct fib6_walker *w) struct fib6_cleaner *c = container_of(w, struct fib6_cleaner, w); struct nl_info info = { .nl_net = c->net, + .skip_notify = c->skip_notify, }; if (c->sernum != FIB6_NO_SERNUM_CHANGE && @@ -2005,7 +2011,7 @@ static int fib6_clean_node(struct fib6_walker *w) static void fib6_clean_tree(struct net *net, struct fib6_node *root, int (*func)(struct fib6_info *, void *arg), - int sernum, void *arg) + int sernum, void *arg, bool skip_notify) { struct fib6_cleaner c; @@ -2017,13 +2023,14 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root, c.sernum = sernum; c.arg = arg; c.net = net; + c.skip_notify = skip_notify; fib6_walk(net, &c.w); } static void __fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *), - int sernum, void *arg) + int sernum, void *arg, bool skip_notify) { struct fib6_table *table; struct hlist_head *head; @@ -2035,7 +2042,7 @@ static void __fib6_clean_all(struct net *net, hlist_for_each_entry_rcu(table, head, tb6_hlist) { spin_lock_bh(&table->tb6_lock); fib6_clean_tree(net, &table->tb6_root, - func, sernum, arg); + func, sernum, arg, skip_notify); spin_unlock_bh(&table->tb6_lock); } } @@ -2045,14 +2052,21 @@ static void __fib6_clean_all(struct net *net, void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *), void *arg) { - __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg); + __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg, false); +} + +void fib6_clean_all_skip_notify(struct net *net, + int (*func)(struct fib6_info *, void *), + void *arg) +{ + __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg, true); } static void fib6_flush_trees(struct net *net) { int new_sernum = fib6_new_sernum(net); - __fib6_clean_all(net, NULL, new_sernum, NULL); + __fib6_clean_all(net, NULL, new_sernum, NULL, false); } /* diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index e493b041d4ac..515adbdba1d2 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -427,35 +427,17 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct net *net = dev_net(skb->dev); - const struct gre_base_hdr *greh; const struct ipv6hdr *ipv6h; - int grehlen = sizeof(*greh); + struct tnl_ptk_info tpi; struct ip6_tnl *t; - int key_off = 0; - __be16 flags; - __be32 key; - if (!pskb_may_pull(skb, offset + grehlen)) - return; - greh = (const struct gre_base_hdr *)(skb->data + offset); - flags = greh->flags; - if (flags & (GRE_VERSION | GRE_ROUTING)) + if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IPV6), + offset) < 0) return; - if (flags & GRE_CSUM) - grehlen += 4; - if (flags & GRE_KEY) { - key_off = grehlen + offset; - grehlen += 4; - } - if (!pskb_may_pull(skb, offset + grehlen)) - return; ipv6h = (const struct ipv6hdr *)skb->data; - greh = (const struct gre_base_hdr *)(skb->data + offset); - key = key_off ? *(__be32 *)(skb->data + key_off) : 0; - t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, - key, greh->protocol); + tpi.key, tpi.proto); if (!t) return; diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 6242682be876..96577e742afd 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -178,7 +178,8 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev, */ if ((ipv6_addr_loopback(&hdr->saddr) || ipv6_addr_loopback(&hdr->daddr)) && - !(dev->flags & IFF_LOOPBACK)) + !(dev->flags & IFF_LOOPBACK) && + !netif_is_l3_master(dev)) goto err; /* RFC4291 Errata ID: 3480 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index f9f8f554d141..89e0d5118afe 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -725,7 +725,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, skb = frag; frag = skb->next; - skb->next = NULL; + skb_mark_not_on_list(skb); } kfree(tmp_hdr); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index d0b7e0249c13..d7563ef76518 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -85,7 +85,8 @@ static struct mr_table *ip6mr_new_table(struct net *net, u32 id); static void ip6mr_free_table(struct mr_table *mrt); static void ip6_mr_forward(struct net *net, struct mr_table *mrt, - struct sk_buff *skb, struct mfc6_cache *cache); + struct net_device *dev, struct sk_buff *skb, + struct mfc6_cache *cache); static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, mifi_t mifi, int assert); static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc, @@ -138,6 +139,9 @@ static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6, .flags = FIB_LOOKUP_NOREF, }; + /* update flow if oif or iif point to device enslaved to l3mdev */ + l3mdev_update_flow(net, flowi6_to_flowi(flp6)); + err = fib_rules_lookup(net->ipv6.mr6_rules_ops, flowi6_to_flowi(flp6), 0, &arg); if (err < 0) @@ -164,7 +168,9 @@ static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp, return -EINVAL; } - mrt = ip6mr_get_table(rule->fr_net, rule->table); + arg->table = fib_rule_get_table(rule, arg); + + mrt = ip6mr_get_table(rule->fr_net, arg->table); if (!mrt) return -EAGAIN; res->mrt = mrt; @@ -1014,7 +1020,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt, } rtnl_unicast(skb, net, NETLINK_CB(skb).portid); } else - ip6_mr_forward(net, mrt, skb, c); + ip6_mr_forward(net, mrt, skb->dev, skb, c); } } @@ -1120,7 +1126,7 @@ static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, /* Queue a packet for resolution. It gets locked cache entry! */ static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi, - struct sk_buff *skb) + struct sk_buff *skb, struct net_device *dev) { struct mfc6_cache *c; bool found = false; @@ -1180,6 +1186,10 @@ static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi, kfree_skb(skb); err = -ENOBUFS; } else { + if (dev) { + skb->dev = dev; + skb->skb_iif = dev->ifindex; + } skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb); err = 0; } @@ -2043,11 +2053,12 @@ static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev) } static void ip6_mr_forward(struct net *net, struct mr_table *mrt, - struct sk_buff *skb, struct mfc6_cache *c) + struct net_device *dev, struct sk_buff *skb, + struct mfc6_cache *c) { int psend = -1; int vif, ct; - int true_vifi = ip6mr_find_vif(mrt, skb->dev); + int true_vifi = ip6mr_find_vif(mrt, dev); vif = c->_c.mfc_parent; c->_c.mfc_un.res.pkt++; @@ -2073,7 +2084,7 @@ static void ip6_mr_forward(struct net *net, struct mr_table *mrt, /* * Wrong interface: drop packet and (maybe) send PIM assert. */ - if (mrt->vif_table[vif].dev != skb->dev) { + if (mrt->vif_table[vif].dev != dev) { c->_c.mfc_un.res.wrong_if++; if (true_vifi >= 0 && mrt->mroute_do_assert && @@ -2154,6 +2165,19 @@ int ip6_mr_input(struct sk_buff *skb) .flowi6_mark = skb->mark, }; int err; + struct net_device *dev; + + /* skb->dev passed in is the master dev for vrfs. + * Get the proper interface that does have a vif associated with it. + */ + dev = skb->dev; + if (netif_is_l3_master(skb->dev)) { + dev = dev_get_by_index_rcu(net, IPCB(skb)->iif); + if (!dev) { + kfree_skb(skb); + return -ENODEV; + } + } err = ip6mr_fib_lookup(net, &fl6, &mrt); if (err < 0) { @@ -2165,7 +2189,7 @@ int ip6_mr_input(struct sk_buff *skb) cache = ip6mr_cache_find(mrt, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr); if (!cache) { - int vif = ip6mr_find_vif(mrt, skb->dev); + int vif = ip6mr_find_vif(mrt, dev); if (vif >= 0) cache = ip6mr_cache_find_any(mrt, @@ -2179,9 +2203,9 @@ int ip6_mr_input(struct sk_buff *skb) if (!cache) { int vif; - vif = ip6mr_find_vif(mrt, skb->dev); + vif = ip6mr_find_vif(mrt, dev); if (vif >= 0) { - int err = ip6mr_cache_unresolved(mrt, vif, skb); + int err = ip6mr_cache_unresolved(mrt, vif, skb, dev); read_unlock(&mrt_lock); return err; @@ -2191,7 +2215,7 @@ int ip6_mr_input(struct sk_buff *skb) return -ENODEV; } - ip6_mr_forward(net, mrt, skb, cache); + ip6_mr_forward(net, mrt, dev, skb, cache); read_unlock(&mrt_lock); @@ -2257,7 +2281,7 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, iph->saddr = rt->rt6i_src.addr; iph->daddr = rt->rt6i_dst.addr; - err = ip6mr_cache_unresolved(mrt, vif, skb2); + err = ip6mr_cache_unresolved(mrt, vif, skb2, dev); read_unlock(&mrt_lock); return err; @@ -2433,6 +2457,15 @@ errout: static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) { + const struct nlmsghdr *nlh = cb->nlh; + + if (cb->strict_check) { + int err = ip_valid_fib_dump_req(nlh, cb->extack); + + if (err < 0) + return err; + } + return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter, _ip6mr_fill_mroute, &mfc_unres_lock); } diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index c0cac9cc3a28..381ce38940ae 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -674,6 +674,13 @@ done: retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr); break; } + case IPV6_MULTICAST_ALL: + if (optlen < sizeof(int)) + goto e_inval; + np->mc_all = valbool; + retv = 0; + break; + case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: { @@ -1266,6 +1273,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, val = np->mcast_oif; break; + case IPV6_MULTICAST_ALL: + val = np->mc_all; + break; + case IPV6_UNICAST_IF: val = (__force int)htonl((__u32) np->ucast_oif); break; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 4ae54aaca373..6895e1dc0b03 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -636,7 +636,7 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, } if (!mc) { rcu_read_unlock(); - return true; + return np->mc_all; } read_lock(&mc->sflock); psl = mc->sflist; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 0ec273997d1d..a25cfdd47c89 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1533,7 +1533,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) if (!ndopts.nd_opts_rh) { ip6_redirect_no_header(skb, dev_net(skb->dev), - skb->dev->ifindex, 0); + skb->dev->ifindex); return; } @@ -1784,6 +1784,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, change_info = ptr; if (change_info->flags_changed & IFF_NOARP) neigh_changeaddr(&nd_tbl, dev); + if (!netif_carrier_ok(dev)) + neigh_carrier_down(&nd_tbl, dev); break; case NETDEV_DOWN: neigh_ifdown(&nd_tbl, dev); diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c index 8b147440fbdc..af737b47b9b5 100644 --- a/net/ipv6/netfilter/ip6t_ipv6header.c +++ b/net/ipv6/netfilter/ip6t_ipv6header.c @@ -65,7 +65,10 @@ ipv6header_mt6(const struct sk_buff *skb, struct xt_action_param *par) } hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr); - BUG_ON(hp == NULL); + if (!hp) { + par->hotdrop = true; + return false; + } /* Calculate the header length */ if (nexthdr == NEXTHDR_FRAGMENT) diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c index 2c99b94eeca3..21bf6bf04323 100644 --- a/net/ipv6/netfilter/ip6t_rt.c +++ b/net/ipv6/netfilter/ip6t_rt.c @@ -137,7 +137,10 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par) sizeof(_addr), &_addr); - BUG_ON(ap == NULL); + if (ap == NULL) { + par->hotdrop = true; + return false; + } if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) { pr_debug("i=%d temp=%d;\n", i, temp); @@ -166,7 +169,10 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par) + temp * sizeof(_addr), sizeof(_addr), &_addr); - BUG_ON(ap == NULL); + if (ap == NULL) { + par->hotdrop = true; + return false; + } if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp])) break; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 8f68a518d9db..b8ac369f98ad 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -450,7 +450,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic sub_frag_mem_limit(fq->q.net, head->truesize); head->ignore_df = 1; - head->next = NULL; + skb_mark_not_on_list(head); head->dev = dev; head->tstamp = fq->q.stamp; ipv6_hdr(head)->payload_len = htons(payload_len); diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c index e6eb7cf9b54f..3e4bf2286abe 100644 --- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c @@ -87,18 +87,30 @@ static struct notifier_block masq_dev_notifier = { struct masq_dev_work { struct work_struct work; struct net *net; + struct in6_addr addr; int ifindex; }; +static int inet_cmp(struct nf_conn *ct, void *work) +{ + struct masq_dev_work *w = (struct masq_dev_work *)work; + struct nf_conntrack_tuple *tuple; + + if (!device_cmp(ct, (void *)(long)w->ifindex)) + return 0; + + tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; + + return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6); +} + static void iterate_cleanup_work(struct work_struct *work) { struct masq_dev_work *w; - long index; w = container_of(work, struct masq_dev_work, work); - index = w->ifindex; - nf_ct_iterate_cleanup_net(w->net, device_cmp, (void *)index, 0, 0); + nf_ct_iterate_cleanup_net(w->net, inet_cmp, (void *)w, 0, 0); put_net(w->net); kfree(w); @@ -147,6 +159,7 @@ static int masq_inet_event(struct notifier_block *this, INIT_WORK(&w->work, iterate_cleanup_work); w->ifindex = dev->ifindex; w->net = net; + w->addr = ifa->addr; schedule_work(&w->work); return NOTIFY_DONE; diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 5c5b4f79296e..5c3c92713096 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -145,7 +145,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, */ if (end < fq->q.len || ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) - goto err; + goto discard_fq; fq->q.flags |= INET_FRAG_LAST_IN; fq->q.len = end; } else { @@ -162,20 +162,20 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, if (end > fq->q.len) { /* Some bits beyond end -> corruption. */ if (fq->q.flags & INET_FRAG_LAST_IN) - goto err; + goto discard_fq; fq->q.len = end; } } if (end == offset) - goto err; + goto discard_fq; /* Point into the IP datagram 'data' part. */ if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) - goto err; + goto discard_fq; if (pskb_trim_rcsum(skb, end - offset)) - goto err; + goto discard_fq; /* Find out which fragments are in front and at the back of us * in the chain of fragments so far. We must know where to put @@ -388,7 +388,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, } sub_frag_mem_limit(fq->q.net, sum_truesize); - head->next = NULL; + skb_mark_not_on_list(head); head->dev = dev; head->tstamp = fq->q.stamp; ipv6_hdr(head)->payload_len = htons(payload_len); @@ -418,6 +418,7 @@ out_fail: rcu_read_lock(); __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); rcu_read_unlock(); + inet_frag_kill(&fq->q); return -1; } @@ -553,7 +554,6 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net) table[0].data = &net->ipv6.frags.high_thresh; table[0].extra1 = &net->ipv6.frags.low_thresh; - table[0].extra2 = &init_net.ipv6.frags.high_thresh; table[1].data = &net->ipv6.frags.low_thresh; table[1].extra2 = &net->ipv6.frags.high_thresh; table[2].data = &net->ipv6.frags.timeout; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index a366c05a239d..f4e08b0689a8 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -364,14 +364,11 @@ EXPORT_SYMBOL(ip6_dst_alloc); static void ip6_dst_destroy(struct dst_entry *dst) { - struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); struct rt6_info *rt = (struct rt6_info *)dst; struct fib6_info *from; struct inet6_dev *idev; - if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) - kfree(p); - + ip_dst_metrics_put(dst); rt6_uncached_list_del(rt); idev = rt->rt6i_idev; @@ -978,11 +975,7 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) { rt->rt6i_flags &= ~RTF_EXPIRES; rcu_assign_pointer(rt->from, from); - dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); - if (from->fib6_metrics != &dst_default_metrics) { - rt->dst._metrics |= DST_METRICS_REFCOUNTED; - refcount_inc(&from->fib6_metrics->refcnt); - } + ip_dst_init_metrics(&rt->dst, from->fib6_metrics); } /* Caller must already hold reference to @ort */ @@ -1000,7 +993,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) #ifdef CONFIG_IPV6_SUBTREES rt->rt6i_src = ort->fib6_src; #endif - rt->rt6i_prefsrc = ort->fib6_prefsrc; } static struct fib6_node* fib6_backtrack(struct fib6_node *fn, @@ -1454,11 +1446,6 @@ static int rt6_insert_exception(struct rt6_info *nrt, if (ort->fib6_src.plen) src_key = &nrt->rt6i_src.addr; #endif - - /* Update rt6i_prefsrc as it could be changed - * in rt6_remove_prefsrc() - */ - nrt->rt6i_prefsrc = ort->fib6_prefsrc; /* rt6_mtu_change() might lower mtu on ort. * Only insert this exception route if its mtu * is less than ort's mtu value. @@ -1640,25 +1627,6 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt) rcu_read_unlock(); } -static void rt6_exceptions_remove_prefsrc(struct fib6_info *rt) -{ - struct rt6_exception_bucket *bucket; - struct rt6_exception *rt6_ex; - int i; - - bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, - lockdep_is_held(&rt6_exception_lock)); - - if (bucket) { - for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { - hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { - rt6_ex->rt6i->rt6i_prefsrc.plen = 0; - } - bucket++; - } - } -} - static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev, struct rt6_info *rt, int mtu) { @@ -2103,7 +2071,8 @@ struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, { bool any_src; - if (rt6_need_strict(&fl6->daddr)) { + if (ipv6_addr_type(&fl6->daddr) & + (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) { struct dst_entry *dst; dst = l3mdev_link_scope_lookup(net, fl6); @@ -2373,15 +2342,14 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, { const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; struct dst_entry *dst; - struct flowi6 fl6; - - memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_oif = oif; - fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark); - fl6.daddr = iph->daddr; - fl6.saddr = iph->saddr; - fl6.flowlabel = ip6_flowinfo(iph); - fl6.flowi6_uid = uid; + struct flowi6 fl6 = { + .flowi6_oif = oif, + .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark), + .daddr = iph->daddr, + .saddr = iph->saddr, + .flowlabel = ip6_flowinfo(iph), + .flowi6_uid = uid, + }; dst = ip6_route_output(net, NULL, &fl6); if (!dst->error) @@ -2532,16 +2500,15 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, { const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; struct dst_entry *dst; - struct flowi6 fl6; - - memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_iif = LOOPBACK_IFINDEX; - fl6.flowi6_oif = oif; - fl6.flowi6_mark = mark; - fl6.daddr = iph->daddr; - fl6.saddr = iph->saddr; - fl6.flowlabel = ip6_flowinfo(iph); - fl6.flowi6_uid = uid; + struct flowi6 fl6 = { + .flowi6_iif = LOOPBACK_IFINDEX, + .flowi6_oif = oif, + .flowi6_mark = mark, + .daddr = iph->daddr, + .saddr = iph->saddr, + .flowlabel = ip6_flowinfo(iph), + .flowi6_uid = uid, + }; dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr); rt6_do_redirect(dst, NULL, skb); @@ -2549,21 +2516,18 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, } EXPORT_SYMBOL_GPL(ip6_redirect); -void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif, - u32 mark) +void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif) { const struct ipv6hdr *iph = ipv6_hdr(skb); const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb); struct dst_entry *dst; - struct flowi6 fl6; - - memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_iif = LOOPBACK_IFINDEX; - fl6.flowi6_oif = oif; - fl6.flowi6_mark = mark; - fl6.daddr = msg->dest; - fl6.saddr = iph->daddr; - fl6.flowi6_uid = sock_net_uid(net, NULL); + struct flowi6 fl6 = { + .flowi6_iif = LOOPBACK_IFINDEX, + .flowi6_oif = oif, + .daddr = msg->dest, + .saddr = iph->daddr, + .flowi6_uid = sock_net_uid(net, NULL), + }; dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr); rt6_do_redirect(dst, NULL, skb); @@ -2734,24 +2698,6 @@ out: return entries > rt_max_size; } -static int ip6_convert_metrics(struct net *net, struct fib6_info *rt, - struct fib6_config *cfg) -{ - struct dst_metrics *p; - - if (!cfg->fc_mx) - return 0; - - p = kzalloc(sizeof(*rt->fib6_metrics), GFP_KERNEL); - if (unlikely(!p)) - return -ENOMEM; - - refcount_set(&p->refcnt, 1); - rt->fib6_metrics = p; - - return ip_metrics_convert(net, cfg->fc_mx, cfg->fc_mx_len, p->metrics); -} - static struct rt6_info *ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg, const struct in6_addr *gw_addr, @@ -3027,13 +2973,17 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, if (!rt) goto out; + rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len); + if (IS_ERR(rt->fib6_metrics)) { + err = PTR_ERR(rt->fib6_metrics); + /* Do not leave garbage there. */ + rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics; + goto out; + } + if (cfg->fc_flags & RTF_ADDRCONF) rt->dst_nocount = true; - err = ip6_convert_metrics(net, rt, cfg); - if (err < 0) - goto out; - if (cfg->fc_flags & RTF_EXPIRES) fib6_set_expires(rt, jiffies + clock_t_to_jiffies(cfg->fc_expires)); @@ -3142,8 +3092,6 @@ install_route: rt->fib6_nh.nh_dev = dev; rt->fib6_table = table; - cfg->fc_nlinfo.nl_net = dev_net(dev); - if (idev) in6_dev_put(idev); @@ -3635,23 +3583,23 @@ static void rtmsg_to_fib6_config(struct net *net, struct in6_rtmsg *rtmsg, struct fib6_config *cfg) { - memset(cfg, 0, sizeof(*cfg)); + *cfg = (struct fib6_config){ + .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ? + : RT6_TABLE_MAIN, + .fc_ifindex = rtmsg->rtmsg_ifindex, + .fc_metric = rtmsg->rtmsg_metric, + .fc_expires = rtmsg->rtmsg_info, + .fc_dst_len = rtmsg->rtmsg_dst_len, + .fc_src_len = rtmsg->rtmsg_src_len, + .fc_flags = rtmsg->rtmsg_flags, + .fc_type = rtmsg->rtmsg_type, - cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ? - : RT6_TABLE_MAIN; - cfg->fc_ifindex = rtmsg->rtmsg_ifindex; - cfg->fc_metric = rtmsg->rtmsg_metric; - cfg->fc_expires = rtmsg->rtmsg_info; - cfg->fc_dst_len = rtmsg->rtmsg_dst_len; - cfg->fc_src_len = rtmsg->rtmsg_src_len; - cfg->fc_flags = rtmsg->rtmsg_flags; - cfg->fc_type = rtmsg->rtmsg_type; - - cfg->fc_nlinfo.nl_net = net; + .fc_nlinfo.nl_net = net, - cfg->fc_dst = rtmsg->rtmsg_dst; - cfg->fc_src = rtmsg->rtmsg_src; - cfg->fc_gateway = rtmsg->rtmsg_gateway; + .fc_dst = rtmsg->rtmsg_dst, + .fc_src = rtmsg->rtmsg_src, + .fc_gateway = rtmsg->rtmsg_gateway, + }; } int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg) @@ -3758,6 +3706,7 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net, if (!f6i) return ERR_PTR(-ENOMEM); + f6i->fib6_metrics = ip_fib_metrics_init(net, NULL, 0); f6i->dst_nocount = true; f6i->dst_host = true; f6i->fib6_protocol = RTPROT_KERNEL; @@ -3800,8 +3749,6 @@ static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg) spin_lock_bh(&rt6_exception_lock); /* remove prefsrc entry */ rt->fib6_prefsrc.plen = 0; - /* need to update cache as well */ - rt6_exceptions_remove_prefsrc(rt); spin_unlock_bh(&rt6_exception_lock); } return 0; @@ -4079,8 +4026,12 @@ void rt6_sync_down_dev(struct net_device *dev, unsigned long event) .event = event, }, }; + struct net *net = dev_net(dev); - fib6_clean_all(dev_net(dev), fib6_ifdown, &arg); + if (net->ipv6.sysctl.skip_notify_on_dev_down) + fib6_clean_all_skip_notify(net, fib6_ifdown, &arg); + else + fib6_clean_all(net, fib6_ifdown, &arg); } void rt6_disable_ip(struct net_device *dev, unsigned long event) @@ -4170,20 +4121,25 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, int err; err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy, - NULL); + extack); if (err < 0) goto errout; err = -EINVAL; rtm = nlmsg_data(nlh); - memset(cfg, 0, sizeof(*cfg)); - cfg->fc_table = rtm->rtm_table; - cfg->fc_dst_len = rtm->rtm_dst_len; - cfg->fc_src_len = rtm->rtm_src_len; - cfg->fc_flags = RTF_UP; - cfg->fc_protocol = rtm->rtm_protocol; - cfg->fc_type = rtm->rtm_type; + *cfg = (struct fib6_config){ + .fc_table = rtm->rtm_table, + .fc_dst_len = rtm->rtm_dst_len, + .fc_src_len = rtm->rtm_src_len, + .fc_flags = RTF_UP, + .fc_protocol = rtm->rtm_protocol, + .fc_type = rtm->rtm_type, + + .fc_nlinfo.portid = NETLINK_CB(skb).portid, + .fc_nlinfo.nlh = nlh, + .fc_nlinfo.nl_net = sock_net(skb->sk), + }; if (rtm->rtm_type == RTN_UNREACHABLE || rtm->rtm_type == RTN_BLACKHOLE || @@ -4199,10 +4155,6 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK); - cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid; - cfg->fc_nlinfo.nlh = nlh; - cfg->fc_nlinfo.nl_net = sock_net(skb->sk); - if (tb[RTA_GATEWAY]) { cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]); cfg->fc_flags |= RTF_GATEWAY; @@ -4850,7 +4802,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, struct rt6_info *rt; struct sk_buff *skb; struct rtmsg *rtm; - struct flowi6 fl6; + struct flowi6 fl6 = {}; bool fibmatch; err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy, @@ -4859,7 +4811,6 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, goto errout; err = -EINVAL; - memset(&fl6, 0, sizeof(fl6)); rtm = nlmsg_data(nlh); fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0); fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH); @@ -5084,7 +5035,10 @@ int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write, return 0; } -struct ctl_table ipv6_route_table_template[] = { +static int zero; +static int one = 1; + +static struct ctl_table ipv6_route_table_template[] = { { .procname = "flush", .data = &init_net.ipv6.sysctl.flush_delay, @@ -5155,6 +5109,15 @@ struct ctl_table ipv6_route_table_template[] = { .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, + { + .procname = "skip_notify_on_dev_down", + .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &zero, + .extra2 = &one, + }, { } }; @@ -5178,6 +5141,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net) table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; + table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down; /* Don't export sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) @@ -5242,6 +5206,7 @@ static int __net_init ip6_route_net_init(struct net *net) net->ipv6.sysctl.ip6_rt_gc_elasticity = 9; net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ; net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40; + net->ipv6.sysctl.skip_notify_on_dev_down = 0; net->ipv6.ip6_rt_gc_expire = 30*HZ; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index e9400ffa7875..51c9f75f34b9 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -534,13 +534,13 @@ static int ipip6_err(struct sk_buff *skb, u32 info) if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { ipv4_update_pmtu(skb, dev_net(skb->dev), info, - t->parms.link, 0, iph->protocol, 0); + t->parms.link, iph->protocol); err = 0; goto out; } if (type == ICMP_REDIRECT) { - ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, - iph->protocol, 0); + ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, + iph->protocol); err = 0; goto out; } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 28c4aa5078fc..374e7d302f26 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -548,7 +548,7 @@ static __inline__ void udpv6_err(struct sk_buff *skb, __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); } -static DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key); +DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key); void udpv6_encap_enable(void) { static_branch_enable(&udpv6_encap_needed_key); diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 95dee9ca8d22..1b8e161ac527 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -119,7 +119,7 @@ static struct sk_buff *udp6_gro_receive(struct list_head *head, { struct udphdr *uh = udp_gro_udphdr(skb); - if (unlikely(!uh)) + if (unlikely(!uh) || !static_branch_unlikely(&udpv6_encap_needed_key)) goto flush; /* Don't bother verifying checksum if we're going to flush anyway. */ diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index e2f16a0173a9..45115c125569 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -48,7 +48,7 @@ static struct iucv_interface *pr_iucv; static const u8 iprm_shutdown[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; -#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) +#define TRGCLS_SIZE FIELD_SIZEOF(struct iucv_message, class) #define __iucv_sock_wait(sk, condition, timeo, ret) \ do { \ @@ -320,13 +320,9 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, struct sk_buff *nskb; int err, confirm_recv = 0; - memset(skb->head, 0, ETH_HLEN); - phs_hdr = skb_push(skb, sizeof(struct af_iucv_trans_hdr)); - skb_reset_mac_header(skb); + phs_hdr = skb_push(skb, sizeof(*phs_hdr)); + memset(phs_hdr, 0, sizeof(*phs_hdr)); skb_reset_network_header(skb); - skb_push(skb, ETH_HLEN); - skb_reset_mac_header(skb); - memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr)); phs_hdr->magic = ETH_P_AF_IUCV; phs_hdr->version = 1; @@ -350,6 +346,9 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, if (imsg) memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); + skb_push(skb, ETH_HLEN); + memset(skb->data, 0, ETH_HLEN); + skb->dev = iucv->hs_dev; if (!skb->dev) { err = -ENODEV; @@ -1943,8 +1942,7 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16]) /***************** HiperSockets transport callbacks ********************/ static void afiucv_swap_src_dest(struct sk_buff *skb) { - struct af_iucv_trans_hdr *trans_hdr = - (struct af_iucv_trans_hdr *)skb->data; + struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb); char tmpID[8]; char tmpName[8]; @@ -1967,13 +1965,12 @@ static void afiucv_swap_src_dest(struct sk_buff *skb) **/ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) { + struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb); struct sock *nsk; struct iucv_sock *iucv, *niucv; - struct af_iucv_trans_hdr *trans_hdr; int err; iucv = iucv_sk(sk); - trans_hdr = (struct af_iucv_trans_hdr *)skb->data; if (!iucv) { /* no sock - connection refused */ afiucv_swap_src_dest(skb); @@ -2034,15 +2031,13 @@ out: static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); - struct af_iucv_trans_hdr *trans_hdr = - (struct af_iucv_trans_hdr *)skb->data; if (!iucv) goto out; if (sk->sk_state != IUCV_BOUND) goto out; bh_lock_sock(sk); - iucv->msglimit_peer = trans_hdr->window; + iucv->msglimit_peer = iucv_trans_hdr(skb)->window; sk->sk_state = IUCV_CONNECTED; sk->sk_state_change(sk); bh_unlock_sock(sk); @@ -2098,8 +2093,6 @@ out: static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); - struct af_iucv_trans_hdr *trans_hdr = - (struct af_iucv_trans_hdr *)skb->data; if (!iucv) return NET_RX_SUCCESS; @@ -2107,7 +2100,7 @@ static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) if (sk->sk_state != IUCV_CONNECTED) return NET_RX_SUCCESS; - atomic_sub(trans_hdr->window, &iucv->msg_sent); + atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent); iucv_sock_wake_msglim(sk); return NET_RX_SUCCESS; } @@ -2170,22 +2163,13 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, int err = NET_RX_SUCCESS; char nullstring[8]; - if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) { - WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d", - (int)skb->len, - (int)(ETH_HLEN + sizeof(struct af_iucv_trans_hdr))); + if (!pskb_may_pull(skb, sizeof(*trans_hdr))) { + WARN_ONCE(1, "AF_IUCV failed to receive skb, len=%u", skb->len); kfree_skb(skb); return NET_RX_SUCCESS; } - if (skb_headlen(skb) < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) - if (skb_linearize(skb)) { - WARN_ONCE(1, "AF_IUCV skb_linearize failed, len=%d", - (int)skb->len); - kfree_skb(skb); - return NET_RX_SUCCESS; - } - skb_pull(skb, ETH_HLEN); - trans_hdr = (struct af_iucv_trans_hdr *)skb->data; + + trans_hdr = iucv_trans_hdr(skb); EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c index 260b3dc1b4a2..64d4bef04e73 100644 --- a/net/llc/llc_core.c +++ b/net/llc/llc_core.c @@ -127,9 +127,7 @@ void llc_sap_close(struct llc_sap *sap) list_del_rcu(&sap->node); spin_unlock_bh(&llc_sap_list_lock); - synchronize_rcu(); - - kfree(sap); + kfree_rcu(sap, rcu); } static struct packet_type llc_packet_type __read_mostly = { diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 76e30f4797fb..f869e35d0974 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig @@ -27,20 +27,6 @@ config MAC80211_RC_MINSTREL ---help--- This option enables the 'minstrel' TX rate control algorithm -config MAC80211_RC_MINSTREL_HT - bool "Minstrel 802.11n support" if EXPERT - depends on MAC80211_RC_MINSTREL - default y - ---help--- - This option enables the 'minstrel_ht' TX rate control algorithm - -config MAC80211_RC_MINSTREL_VHT - bool "Minstrel 802.11ac support" if EXPERT - depends on MAC80211_RC_MINSTREL_HT - default n - ---help--- - This option enables VHT in the 'minstrel_ht' TX rate control algorithm - choice prompt "Default rate control algorithm" depends on MAC80211_HAS_RC @@ -62,8 +48,7 @@ endchoice config MAC80211_RC_DEFAULT string - default "minstrel_ht" if MAC80211_RC_DEFAULT_MINSTREL && MAC80211_RC_MINSTREL_HT - default "minstrel" if MAC80211_RC_DEFAULT_MINSTREL + default "minstrel_ht" if MAC80211_RC_DEFAULT_MINSTREL default "" endif diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index bb707789ef2b..4f03ebe732fa 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile @@ -53,13 +53,14 @@ mac80211-$(CONFIG_PM) += pm.o CFLAGS_trace.o := -I$(src) -rc80211_minstrel-y := rc80211_minstrel.o -rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_debugfs.o +rc80211_minstrel-y := \ + rc80211_minstrel.o \ + rc80211_minstrel_ht.o -rc80211_minstrel_ht-y := rc80211_minstrel_ht.o -rc80211_minstrel_ht-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_ht_debugfs.o +rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += \ + rc80211_minstrel_debugfs.o \ + rc80211_minstrel_ht_debugfs.o mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y) -mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y) ccflags-y += -DDEBUG diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 5d22eda8a6b1..51622333d460 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -158,12 +158,10 @@ static int ieee80211_change_iface(struct wiphy *wiphy, if (ret) return ret; - if (type == NL80211_IFTYPE_AP_VLAN && - params && params->use_4addr == 0) { + if (type == NL80211_IFTYPE_AP_VLAN && params->use_4addr == 0) { RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); ieee80211_check_fast_rx_iface(sdata); - } else if (type == NL80211_IFTYPE_STATION && - params && params->use_4addr >= 0) { + } else if (type == NL80211_IFTYPE_STATION && params->use_4addr >= 0) { sdata->u.mgd.use_4addr = params->use_4addr; } @@ -792,6 +790,48 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, return 0; } +static int ieee80211_set_ftm_responder_params( + struct ieee80211_sub_if_data *sdata, + const u8 *lci, size_t lci_len, + const u8 *civicloc, size_t civicloc_len) +{ + struct ieee80211_ftm_responder_params *new, *old; + struct ieee80211_bss_conf *bss_conf; + u8 *pos; + int len; + + if ((!lci || !lci_len) && (!civicloc || !civicloc_len)) + return 1; + + bss_conf = &sdata->vif.bss_conf; + old = bss_conf->ftmr_params; + len = lci_len + civicloc_len; + + new = kzalloc(sizeof(*new) + len, GFP_KERNEL); + if (!new) + return -ENOMEM; + + pos = (u8 *)(new + 1); + if (lci_len) { + new->lci_len = lci_len; + new->lci = pos; + memcpy(pos, lci, lci_len); + pos += lci_len; + } + + if (civicloc_len) { + new->civicloc_len = civicloc_len; + new->civicloc = pos; + memcpy(pos, civicloc, civicloc_len); + pos += civicloc_len; + } + + bss_conf->ftmr_params = new; + kfree(old); + + return 0; +} + static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, struct cfg80211_beacon_data *params, const struct ieee80211_csa_settings *csa) @@ -865,6 +905,20 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, if (err == 0) changed |= BSS_CHANGED_AP_PROBE_RESP; + if (params->ftm_responder != -1) { + sdata->vif.bss_conf.ftm_responder = params->ftm_responder; + err = ieee80211_set_ftm_responder_params(sdata, + params->lci, + params->lci_len, + params->civicloc, + params->civicloc_len); + + if (err < 0) + return err; + + changed |= BSS_CHANGED_FTM_RESPONDER; + } + rcu_assign_pointer(sdata->u.ap.beacon, new); if (old) @@ -911,6 +965,9 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, sdata->vif.bss_conf.beacon_int = params->beacon_interval; + if (params->he_cap) + sdata->vif.bss_conf.he_support = true; + mutex_lock(&local->mtx); err = ieee80211_vif_use_channel(sdata, ¶ms->chandef, IEEE80211_CHANCTX_SHARED); @@ -1062,6 +1119,9 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) kfree_rcu(old_probe_resp, rcu_head); sdata->u.ap.driver_smps_mode = IEEE80211_SMPS_OFF; + kfree(sdata->vif.bss_conf.ftmr_params); + sdata->vif.bss_conf.ftmr_params = NULL; + __sta_info_flush(sdata, true); ieee80211_free_keys(sdata, true); @@ -1092,50 +1152,6 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) return 0; } -/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */ -struct iapp_layer2_update { - u8 da[ETH_ALEN]; /* broadcast */ - u8 sa[ETH_ALEN]; /* STA addr */ - __be16 len; /* 6 */ - u8 dsap; /* 0 */ - u8 ssap; /* 0 */ - u8 control; - u8 xid_info[3]; -} __packed; - -static void ieee80211_send_layer2_update(struct sta_info *sta) -{ - struct iapp_layer2_update *msg; - struct sk_buff *skb; - - /* Send Level 2 Update Frame to update forwarding tables in layer 2 - * bridge devices */ - - skb = dev_alloc_skb(sizeof(*msg)); - if (!skb) - return; - msg = skb_put(skb, sizeof(*msg)); - - /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID) - * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */ - - eth_broadcast_addr(msg->da); - memcpy(msg->sa, sta->sta.addr, ETH_ALEN); - msg->len = htons(6); - msg->dsap = 0; - msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */ - msg->control = 0xaf; /* XID response lsb.1111F101. - * F=0 (no poll command; unsolicited frame) */ - msg->xid_info[0] = 0x81; /* XID format identifier */ - msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */ - msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */ - - skb->dev = sta->sdata->dev; - skb->protocol = eth_type_trans(skb, sta->sdata->dev); - memset(skb->cb, 0, sizeof(skb->cb)); - netif_rx_ni(skb); -} - static int sta_apply_auth_flags(struct ieee80211_local *local, struct sta_info *sta, u32 mask, u32 set) @@ -1499,7 +1515,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, } if (layer2_update) - ieee80211_send_layer2_update(sta); + cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr); rcu_read_unlock(); @@ -1601,7 +1617,7 @@ static int ieee80211_change_station(struct wiphy *wiphy, if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) ieee80211_vif_inc_num_mcast(sta->sdata); - ieee80211_send_layer2_update(sta); + cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr); } err = sta_apply_parameters(local, sta, params); @@ -2918,6 +2934,20 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon) memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); pos += beacon->probe_resp_len; } + if (beacon->ftm_responder) + new_beacon->ftm_responder = beacon->ftm_responder; + if (beacon->lci) { + new_beacon->lci_len = beacon->lci_len; + new_beacon->lci = pos; + memcpy(pos, beacon->lci, beacon->lci_len); + pos += beacon->lci_len; + } + if (beacon->civicloc) { + new_beacon->civicloc_len = beacon->civicloc_len; + new_beacon->civicloc = pos; + memcpy(pos, beacon->civicloc, beacon->civicloc_len); + pos += beacon->civicloc_len; + } return new_beacon; } @@ -3808,6 +3838,17 @@ out: return ret; } +static int +ieee80211_get_ftm_responder_stats(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_ftm_responder_stats *ftm_stats) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + return drv_get_ftm_responder_stats(local, sdata, ftm_stats); +} + const struct cfg80211_ops mac80211_config_ops = { .add_virtual_intf = ieee80211_add_iface, .del_virtual_intf = ieee80211_del_iface, @@ -3902,4 +3943,5 @@ const struct cfg80211_ops mac80211_config_ops = { .set_multicast_to_unicast = ieee80211_set_multicast_to_unicast, .tx_control_port = ieee80211_tx_control_port, .get_txq_stats = ieee80211_get_txq_stats, + .get_ftm_responder_stats = ieee80211_get_ftm_responder_stats, }; diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index b5adf3625d16..3fe541e358f3 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -3,6 +3,7 @@ * * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2018 Intel Corporation * * GPLv2 * @@ -214,6 +215,9 @@ static const char *hw_flag_names[] = { FLAG(SUPPORTS_TDLS_BUFFER_STA), FLAG(DEAUTH_NEED_MGD_TX_PREP), FLAG(DOESNT_SUPPORT_QOS_NDP), + FLAG(BUFF_MMPDU_TXQ), + FLAG(SUPPORTS_VHT_EXT_NSS_BW), + FLAG(STA_MMPDU_TXQ), #undef FLAG }; diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 4105081dc1df..af5185a836e5 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -4,6 +4,7 @@ * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -140,7 +141,7 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf, { struct sta_info *sta = file->private_data; struct ieee80211_local *local = sta->local; - size_t bufsz = AQM_TXQ_ENTRY_LEN*(IEEE80211_NUM_TIDS+1); + size_t bufsz = AQM_TXQ_ENTRY_LEN * (IEEE80211_NUM_TIDS + 2); char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf; struct txq_info *txqi; ssize_t rv; @@ -162,7 +163,9 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf, bufsz+buf-p, "tid ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets flags\n"); - for (i = 0; i < IEEE80211_NUM_TIDS; i++) { + for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { + if (!sta->sta.txq[i]) + continue; txqi = to_txq_info(sta->sta.txq[i]); p += scnprintf(p, bufsz+buf-p, "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s)\n", @@ -487,12 +490,368 @@ static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf, p += scnprintf(p, sizeof(buf)+buf-p, "MCS TX highest: %d Mbps\n", le16_to_cpu(vhtc->vht_mcs.tx_highest)); +#undef PFLAG } return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); } STA_OPS(vht_capa); +static ssize_t sta_he_capa_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char *buf, *p; + size_t buf_sz = PAGE_SIZE; + struct sta_info *sta = file->private_data; + struct ieee80211_sta_he_cap *hec = &sta->sta.he_cap; + struct ieee80211_he_mcs_nss_supp *nss = &hec->he_mcs_nss_supp; + u8 ppe_size; + u8 *cap; + int i; + ssize_t ret; + + buf = kmalloc(buf_sz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + p += scnprintf(p, buf_sz + buf - p, "HE %ssupported\n", + hec->has_he ? "" : "not "); + if (!hec->has_he) + goto out; + + cap = hec->he_cap_elem.mac_cap_info; + p += scnprintf(p, buf_sz + buf - p, + "MAC-CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n", + cap[0], cap[1], cap[2], cap[3], cap[4], cap[5]); + +#define PRINT(fmt, ...) \ + p += scnprintf(p, buf_sz + buf - p, "\t\t" fmt "\n", \ + ##__VA_ARGS__) + +#define PFLAG(t, n, a, b) \ + do { \ + if (cap[n] & IEEE80211_HE_##t##_CAP##n##_##a) \ + PRINT("%s", b); \ + } while (0) + +#define PFLAG_RANGE(t, i, n, s, m, off, fmt) \ + do { \ + u8 msk = IEEE80211_HE_##t##_CAP##i##_##n##_MASK; \ + u8 idx = ((cap[i] & msk) >> (ffs(msk) - 1)) + off; \ + PRINT(fmt, (s << idx) + (m * idx)); \ + } while (0) + +#define PFLAG_RANGE_DEFAULT(t, i, n, s, m, off, fmt, a, b) \ + do { \ + if (cap[i] == IEEE80211_HE_##t ##_CAP##i##_##n##_##a) { \ + PRINT("%s", b); \ + break; \ + } \ + PFLAG_RANGE(t, i, n, s, m, off, fmt); \ + } while (0) + + PFLAG(MAC, 0, HTC_HE, "HTC-HE"); + PFLAG(MAC, 0, TWT_REQ, "TWT-REQ"); + PFLAG(MAC, 0, TWT_RES, "TWT-RES"); + PFLAG_RANGE_DEFAULT(MAC, 0, DYNAMIC_FRAG, 0, 1, 0, + "DYNAMIC-FRAG-LEVEL-%d", NOT_SUPP, "NOT-SUPP"); + PFLAG_RANGE_DEFAULT(MAC, 0, MAX_NUM_FRAG_MSDU, 1, 0, 0, + "MAX-NUM-FRAG-MSDU-%d", UNLIMITED, "UNLIMITED"); + + PFLAG_RANGE_DEFAULT(MAC, 1, MIN_FRAG_SIZE, 128, 0, -1, + "MIN-FRAG-SIZE-%d", UNLIMITED, "UNLIMITED"); + PFLAG_RANGE_DEFAULT(MAC, 1, TF_MAC_PAD_DUR, 0, 8, 0, + "TF-MAC-PAD-DUR-%dUS", MASK, "UNKNOWN"); + PFLAG_RANGE(MAC, 1, MULTI_TID_AGG_RX_QOS, 0, 1, 1, + "MULTI-TID-AGG-RX-QOS-%d"); + + if (cap[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) { + switch (((cap[2] << 1) | (cap[1] >> 7)) & 0x3) { + case 0: + PRINT("LINK-ADAPTATION-NO-FEEDBACK"); + break; + case 1: + PRINT("LINK-ADAPTATION-RESERVED"); + break; + case 2: + PRINT("LINK-ADAPTATION-UNSOLICITED-FEEDBACK"); + break; + case 3: + PRINT("LINK-ADAPTATION-BOTH"); + break; + } + } + + PFLAG(MAC, 2, ALL_ACK, "ALL-ACK"); + PFLAG(MAC, 2, TRS, "TRS"); + PFLAG(MAC, 2, BSR, "BSR"); + PFLAG(MAC, 2, BCAST_TWT, "BCAST-TWT"); + PFLAG(MAC, 2, 32BIT_BA_BITMAP, "32BIT-BA-BITMAP"); + PFLAG(MAC, 2, MU_CASCADING, "MU-CASCADING"); + PFLAG(MAC, 2, ACK_EN, "ACK-EN"); + + PFLAG(MAC, 3, OMI_CONTROL, "OMI-CONTROL"); + PFLAG(MAC, 3, OFDMA_RA, "OFDMA-RA"); + + switch (cap[3] & IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) { + case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_USE_VHT: + PRINT("MAX-AMPDU-LEN-EXP-USE-VHT"); + break; + case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_1: + PRINT("MAX-AMPDU-LEN-EXP-VHT-1"); + break; + case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2: + PRINT("MAX-AMPDU-LEN-EXP-VHT-2"); + break; + case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED: + PRINT("MAX-AMPDU-LEN-EXP-RESERVED"); + break; + } + + PFLAG(MAC, 3, AMSDU_FRAG, "AMSDU-FRAG"); + PFLAG(MAC, 3, FLEX_TWT_SCHED, "FLEX-TWT-SCHED"); + PFLAG(MAC, 3, RX_CTRL_FRAME_TO_MULTIBSS, "RX-CTRL-FRAME-TO-MULTIBSS"); + + PFLAG(MAC, 4, BSRP_BQRP_A_MPDU_AGG, "BSRP-BQRP-A-MPDU-AGG"); + PFLAG(MAC, 4, QTP, "QTP"); + PFLAG(MAC, 4, BQR, "BQR"); + PFLAG(MAC, 4, SRP_RESP, "SRP-RESP"); + PFLAG(MAC, 4, NDP_FB_REP, "NDP-FB-REP"); + PFLAG(MAC, 4, OPS, "OPS"); + PFLAG(MAC, 4, AMDSU_IN_AMPDU, "AMSDU-IN-AMPDU"); + + PRINT("MULTI-TID-AGG-TX-QOS-%d", ((cap[5] << 1) | (cap[4] >> 7)) & 0x7); + + PFLAG(MAC, 5, SUBCHAN_SELECVITE_TRANSMISSION, + "SUBCHAN-SELECVITE-TRANSMISSION"); + PFLAG(MAC, 5, UL_2x996_TONE_RU, "UL-2x996-TONE-RU"); + PFLAG(MAC, 5, OM_CTRL_UL_MU_DATA_DIS_RX, "OM-CTRL-UL-MU-DATA-DIS-RX"); + + cap = hec->he_cap_elem.phy_cap_info; + p += scnprintf(p, buf_sz + buf - p, + "PHY CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n", + cap[0], cap[1], cap[2], cap[3], cap[4], cap[5], cap[6], + cap[7], cap[8], cap[9], cap[10]); + + PFLAG(PHY, 0, CHANNEL_WIDTH_SET_40MHZ_IN_2G, + "CHANNEL-WIDTH-SET-40MHZ-IN-2G"); + PFLAG(PHY, 0, CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G, + "CHANNEL-WIDTH-SET-40MHZ-80MHZ-IN-5G"); + PFLAG(PHY, 0, CHANNEL_WIDTH_SET_160MHZ_IN_5G, + "CHANNEL-WIDTH-SET-160MHZ-IN-5G"); + PFLAG(PHY, 0, CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, + "CHANNEL-WIDTH-SET-80PLUS80-MHZ-IN-5G"); + PFLAG(PHY, 0, CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G, + "CHANNEL-WIDTH-SET-RU-MAPPING-IN-2G"); + PFLAG(PHY, 0, CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G, + "CHANNEL-WIDTH-SET-RU-MAPPING-IN-5G"); + + switch (cap[1] & IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK) { + case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ: + PRINT("PREAMBLE-PUNC-RX-80MHZ-ONLY-SECOND-20MHZ"); + break; + case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ: + PRINT("PREAMBLE-PUNC-RX-80MHZ-ONLY-SECOND-40MHZ"); + break; + case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ: + PRINT("PREAMBLE-PUNC-RX-160MHZ-ONLY-SECOND-20MHZ"); + break; + case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ: + PRINT("PREAMBLE-PUNC-RX-160MHZ-ONLY-SECOND-40MHZ"); + break; + } + + PFLAG(PHY, 1, DEVICE_CLASS_A, + "IEEE80211-HE-PHY-CAP1-DEVICE-CLASS-A"); + PFLAG(PHY, 1, LDPC_CODING_IN_PAYLOAD, + "LDPC-CODING-IN-PAYLOAD"); + PFLAG(PHY, 1, HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US, + "HY-CAP1-HE-LTF-AND-GI-FOR-HE-PPDUS-0-8US"); + PRINT("MIDAMBLE-RX-MAX-NSTS-%d", ((cap[2] << 1) | (cap[1] >> 7)) & 0x3); + + PFLAG(PHY, 2, NDP_4x_LTF_AND_3_2US, "NDP-4X-LTF-AND-3-2US"); + PFLAG(PHY, 2, STBC_TX_UNDER_80MHZ, "STBC-TX-UNDER-80MHZ"); + PFLAG(PHY, 2, STBC_RX_UNDER_80MHZ, "STBC-RX-UNDER-80MHZ"); + PFLAG(PHY, 2, DOPPLER_TX, "DOPPLER-TX"); + PFLAG(PHY, 2, DOPPLER_RX, "DOPPLER-RX"); + PFLAG(PHY, 2, UL_MU_FULL_MU_MIMO, "UL-MU-FULL-MU-MIMO"); + PFLAG(PHY, 2, UL_MU_PARTIAL_MU_MIMO, "UL-MU-PARTIAL-MU-MIMO"); + + switch (cap[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK) { + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM: + PRINT("DCM-MAX-CONST-TX-NO-DCM"); + break; + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK: + PRINT("DCM-MAX-CONST-TX-BPSK"); + break; + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK: + PRINT("DCM-MAX-CONST-TX-QPSK"); + break; + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM: + PRINT("DCM-MAX-CONST-TX-16-QAM"); + break; + } + + PFLAG(PHY, 3, DCM_MAX_TX_NSS_1, "DCM-MAX-TX-NSS-1"); + PFLAG(PHY, 3, DCM_MAX_TX_NSS_2, "DCM-MAX-TX-NSS-2"); + + switch (cap[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK) { + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM: + PRINT("DCM-MAX-CONST-RX-NO-DCM"); + break; + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK: + PRINT("DCM-MAX-CONST-RX-BPSK"); + break; + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK: + PRINT("DCM-MAX-CONST-RX-QPSK"); + break; + case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM: + PRINT("DCM-MAX-CONST-RX-16-QAM"); + break; + } + + PFLAG(PHY, 3, DCM_MAX_RX_NSS_1, "DCM-MAX-RX-NSS-1"); + PFLAG(PHY, 3, DCM_MAX_RX_NSS_2, "DCM-MAX-RX-NSS-2"); + PFLAG(PHY, 3, RX_HE_MU_PPDU_FROM_NON_AP_STA, + "RX-HE-MU-PPDU-FROM-NON-AP-STA"); + PFLAG(PHY, 3, SU_BEAMFORMER, "SU-BEAMFORMER"); + + PFLAG(PHY, 4, SU_BEAMFORMEE, "SU-BEAMFORMEE"); + PFLAG(PHY, 4, MU_BEAMFORMER, "MU-BEAMFORMER"); + + PFLAG_RANGE(PHY, 4, BEAMFORMEE_MAX_STS_UNDER_80MHZ, 0, 1, 4, + "BEAMFORMEE-MAX-STS-UNDER-%d"); + PFLAG_RANGE(PHY, 4, BEAMFORMEE_MAX_STS_ABOVE_80MHZ, 0, 1, 4, + "BEAMFORMEE-MAX-STS-ABOVE-%d"); + + PFLAG_RANGE(PHY, 5, BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ, 0, 1, 1, + "NUM-SND-DIM-UNDER-80MHZ-%d"); + PFLAG_RANGE(PHY, 5, BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ, 0, 1, 1, + "NUM-SND-DIM-ABOVE-80MHZ-%d"); + PFLAG(PHY, 5, NG16_SU_FEEDBACK, "NG16-SU-FEEDBACK"); + PFLAG(PHY, 5, NG16_MU_FEEDBACK, "NG16-MU-FEEDBACK"); + + PFLAG(PHY, 6, CODEBOOK_SIZE_42_SU, "CODEBOOK-SIZE-42-SU"); + PFLAG(PHY, 6, CODEBOOK_SIZE_75_MU, "CODEBOOK-SIZE-75-MU"); + PFLAG(PHY, 6, TRIG_SU_BEAMFORMER_FB, "TRIG-SU-BEAMFORMER-FB"); + PFLAG(PHY, 6, TRIG_MU_BEAMFORMER_FB, "TRIG-MU-BEAMFORMER-FB"); + PFLAG(PHY, 6, TRIG_CQI_FB, "TRIG-CQI-FB"); + PFLAG(PHY, 6, PARTIAL_BW_EXT_RANGE, "PARTIAL-BW-EXT-RANGE"); + PFLAG(PHY, 6, PARTIAL_BANDWIDTH_DL_MUMIMO, + "PARTIAL-BANDWIDTH-DL-MUMIMO"); + PFLAG(PHY, 6, PPE_THRESHOLD_PRESENT, "PPE-THRESHOLD-PRESENT"); + + PFLAG(PHY, 7, SRP_BASED_SR, "SRP-BASED-SR"); + PFLAG(PHY, 7, POWER_BOOST_FACTOR_AR, "POWER-BOOST-FACTOR-AR"); + PFLAG(PHY, 7, HE_SU_MU_PPDU_4XLTF_AND_08_US_GI, + "HE-SU-MU-PPDU-4XLTF-AND-08-US-GI"); + PFLAG_RANGE(PHY, 7, MAX_NC, 0, 1, 1, "MAX-NC-%d"); + PFLAG(PHY, 7, STBC_TX_ABOVE_80MHZ, "STBC-TX-ABOVE-80MHZ"); + PFLAG(PHY, 7, STBC_RX_ABOVE_80MHZ, "STBC-RX-ABOVE-80MHZ"); + + PFLAG(PHY, 8, HE_ER_SU_PPDU_4XLTF_AND_08_US_GI, + "HE-ER-SU-PPDU-4XLTF-AND-08-US-GI"); + PFLAG(PHY, 8, 20MHZ_IN_40MHZ_HE_PPDU_IN_2G, + "20MHZ-IN-40MHZ-HE-PPDU-IN-2G"); + PFLAG(PHY, 8, 20MHZ_IN_160MHZ_HE_PPDU, "20MHZ-IN-160MHZ-HE-PPDU"); + PFLAG(PHY, 8, 80MHZ_IN_160MHZ_HE_PPDU, "80MHZ-IN-160MHZ-HE-PPDU"); + PFLAG(PHY, 8, HE_ER_SU_1XLTF_AND_08_US_GI, + "HE-ER-SU-1XLTF-AND-08-US-GI"); + PFLAG(PHY, 8, MIDAMBLE_RX_TX_2X_AND_1XLTF, + "MIDAMBLE-RX-TX-2X-AND-1XLTF"); + + switch (cap[8] & IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_MASK) { + case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_20MHZ: + PRINT("DDCM-MAX-BW-20MHZ"); + break; + case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_40MHZ: + PRINT("DCM-MAX-BW-40MHZ"); + break; + case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_80MHZ: + PRINT("DCM-MAX-BW-80MHZ"); + break; + case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ: + PRINT("DCM-MAX-BW-160-OR-80P80-MHZ"); + break; + } + + PFLAG(PHY, 9, LONGER_THAN_16_SIGB_OFDM_SYM, + "LONGER-THAN-16-SIGB-OFDM-SYM"); + PFLAG(PHY, 9, NON_TRIGGERED_CQI_FEEDBACK, + "NON-TRIGGERED-CQI-FEEDBACK"); + PFLAG(PHY, 9, TX_1024_QAM_LESS_THAN_242_TONE_RU, + "TX-1024-QAM-LESS-THAN-242-TONE-RU"); + PFLAG(PHY, 9, RX_1024_QAM_LESS_THAN_242_TONE_RU, + "RX-1024-QAM-LESS-THAN-242-TONE-RU"); + PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB, + "RX-FULL-BW-SU-USING-MU-WITH-COMP-SIGB"); + PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB, + "RX-FULL-BW-SU-USING-MU-WITH-NON-COMP-SIGB"); + +#undef PFLAG_RANGE_DEFAULT +#undef PFLAG_RANGE +#undef PFLAG + +#define PRINT_NSS_SUPP(f, n) \ + do { \ + int i; \ + u16 v = le16_to_cpu(nss->f); \ + p += scnprintf(p, buf_sz + buf - p, n ": %#.4x\n", v); \ + for (i = 0; i < 8; i += 2) { \ + switch ((v >> i) & 0x3) { \ + case 0: \ + PRINT(n "-%d-SUPPORT-0-7", i / 2); \ + break; \ + case 1: \ + PRINT(n "-%d-SUPPORT-0-9", i / 2); \ + break; \ + case 2: \ + PRINT(n "-%d-SUPPORT-0-11", i / 2); \ + break; \ + case 3: \ + PRINT(n "-%d-NOT-SUPPORTED", i / 2); \ + break; \ + } \ + } \ + } while (0) + + PRINT_NSS_SUPP(rx_mcs_80, "RX-MCS-80"); + PRINT_NSS_SUPP(tx_mcs_80, "TX-MCS-80"); + + if (cap[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) { + PRINT_NSS_SUPP(rx_mcs_160, "RX-MCS-160"); + PRINT_NSS_SUPP(tx_mcs_160, "TX-MCS-160"); + } + + if (cap[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) { + PRINT_NSS_SUPP(rx_mcs_80p80, "RX-MCS-80P80"); + PRINT_NSS_SUPP(tx_mcs_80p80, "TX-MCS-80P80"); + } + +#undef PRINT_NSS_SUPP +#undef PRINT + + if (!(cap[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)) + goto out; + + p += scnprintf(p, buf_sz + buf - p, "PPE-THRESHOLDS: %#.2x", + hec->ppe_thres[0]); + + ppe_size = ieee80211_he_ppe_size(hec->ppe_thres[0], cap); + for (i = 1; i < ppe_size; i++) { + p += scnprintf(p, buf_sz + buf - p, " %#.2x", + hec->ppe_thres[i]); + } + p += scnprintf(p, buf_sz + buf - p, "\n"); + +out: + ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return ret; +} +STA_OPS(he_capa); #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0400, \ @@ -538,6 +897,7 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) DEBUGFS_ADD(agg_status); DEBUGFS_ADD(ht_capa); DEBUGFS_ADD(vht_capa); + DEBUGFS_ADD(he_capa); DEBUGFS_ADD_COUNTER(rx_duplicates, rx_stats.num_duplicates); DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments); diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 8f6998091d26..0b1747a2313d 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -1173,6 +1173,32 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local, local->ops->wake_tx_queue(&local->hw, &txq->txq); } +static inline int drv_can_aggregate_in_amsdu(struct ieee80211_local *local, + struct sk_buff *head, + struct sk_buff *skb) +{ + if (!local->ops->can_aggregate_in_amsdu) + return true; + + return local->ops->can_aggregate_in_amsdu(&local->hw, head, skb); +} + +static inline int +drv_get_ftm_responder_stats(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_ftm_responder_stats *ftm_stats) +{ + u32 ret = -EOPNOTSUPP; + + if (local->ops->get_ftm_responder_stats) + ret = local->ops->get_ftm_responder_stats(&local->hw, + &sdata->vif, + ftm_stats); + trace_drv_get_ftm_responder_stats(local, sdata, ftm_stats); + + return ret; +} + static inline int drv_start_nan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_nan_conf *conf) diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index f0f5fedb8caa..0d704e8d7078 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -1070,7 +1070,9 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, struct ieee80211_vht_cap cap_ie; struct ieee80211_sta_vht_cap cap = sta->sta.vht_cap; - ieee80211_chandef_vht_oper(elems->vht_operation, + ieee80211_chandef_vht_oper(&local->hw, + elems->vht_operation, + elems->ht_operation, &chandef); memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie)); ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 172aeae21ae9..10a05062e4a0 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -377,6 +377,7 @@ struct ieee80211_mgd_auth_data { u8 key[WLAN_KEY_LEN_WEP104]; u8 key_len, key_idx; bool done; + bool peer_confirmed; bool timeout_started; u16 sae_trans, sae_status; @@ -818,6 +819,7 @@ enum txq_info_flags { IEEE80211_TXQ_STOP, IEEE80211_TXQ_AMPDU, IEEE80211_TXQ_NO_AMSDU, + IEEE80211_TXQ_STOP_NETIF_TX, }; /** @@ -1198,6 +1200,9 @@ struct ieee80211_local { /* number of RX chains the hardware has */ u8 rx_chains; + /* bitmap of which sbands were copied */ + u8 sband_allocated; + int tx_headroom; /* required headroom for hardware/radiotap */ /* Tasklet and skb queue to process calls from IRQ mode. All frames @@ -1226,6 +1231,7 @@ struct ieee80211_local { struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; struct tasklet_struct tx_pending_tasklet; + struct tasklet_struct wake_txqs_tasklet; atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES]; @@ -2038,6 +2044,7 @@ void ieee80211_txq_remove_vlan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); void ieee80211_fill_txq_stats(struct cfg80211_txq_stats *txqstats, struct txq_info *txqi); +void ieee80211_wake_txqs(unsigned long data); void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, u16 transaction, u16 auth_alg, u16 status, const u8 *extra, size_t extra_len, const u8 *bssid, @@ -2106,7 +2113,9 @@ u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo); /* channel management */ bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper, struct cfg80211_chan_def *chandef); -bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper, +bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, + const struct ieee80211_vht_operation *oper, + const struct ieee80211_ht_operation *htop, struct cfg80211_chan_def *chandef); u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c); diff --git a/net/mac80211/key.c b/net/mac80211/key.c index c054ac85793c..4700718e010f 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -248,6 +248,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) increment_tailroom_need_count(sdata); + key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; ret = drv_set_key(key->local, DISABLE_KEY, sdata, sta ? &sta->sta : NULL, &key->conf); @@ -256,8 +257,65 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) "failed to remove key (%d, %pM) from hardware (%d)\n", key->conf.keyidx, sta ? sta->sta.addr : bcast_addr, ret); +} - key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; +static int ieee80211_hw_key_replace(struct ieee80211_key *old_key, + struct ieee80211_key *new_key, + bool ptk0rekey) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_local *local; + struct sta_info *sta; + int ret; + + /* Aggregation sessions are OK when running on SW crypto. + * A broken remote STA may cause issues not observed with HW + * crypto, though. + */ + if (!(old_key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) + return 0; + + assert_key_lock(old_key->local); + sta = old_key->sta; + + /* PTK only using key ID 0 needs special handling on rekey */ + if (new_key && sta && ptk0rekey) { + local = old_key->local; + sdata = old_key->sdata; + + /* Stop TX till we are on the new key */ + old_key->flags |= KEY_FLAG_TAINTED; + ieee80211_clear_fast_xmit(sta); + + /* Aggregation sessions during rekey are complicated due to the + * reorder buffer and retransmits. Side step that by blocking + * aggregation during rekey and tear down running sessions. + */ + if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) { + set_sta_flag(sta, WLAN_STA_BLOCK_BA); + ieee80211_sta_tear_down_BA_sessions(sta, + AGG_STOP_LOCAL_REQUEST); + } + + if (!wiphy_ext_feature_isset(local->hw.wiphy, + NL80211_EXT_FEATURE_CAN_REPLACE_PTK0)) { + pr_warn_ratelimited("Rekeying PTK for STA %pM but driver can't safely do that.", + sta->sta.addr); + /* Flushing the driver queues *may* help prevent + * the clear text leaks and freezes. + */ + ieee80211_flush_queues(local, sdata, false); + } + } + + ieee80211_key_disable_hw_accel(old_key); + + if (new_key) + ret = ieee80211_key_enable_hw_accel(new_key); + else + ret = 0; + + return ret; } static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, @@ -316,38 +374,57 @@ void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, } -static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, +static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, bool pairwise, struct ieee80211_key *old, struct ieee80211_key *new) { int idx; + int ret; bool defunikey, defmultikey, defmgmtkey; /* caller must provide at least one old/new */ if (WARN_ON(!new && !old)) - return; + return 0; if (new) list_add_tail_rcu(&new->list, &sdata->key_list); WARN_ON(new && old && new->conf.keyidx != old->conf.keyidx); - if (old) + if (old) { idx = old->conf.keyidx; - else + /* TODO: proper implement and test "Extended Key ID for + * Individually Addressed Frames" from IEEE 802.11-2016. + * Till then always assume only key ID 0 is used for + * pairwise keys.*/ + ret = ieee80211_hw_key_replace(old, new, pairwise); + } else { + /* new must be provided in case old is not */ idx = new->conf.keyidx; + if (!new->local->wowlan) + ret = ieee80211_key_enable_hw_accel(new); + else + ret = 0; + } + + if (ret) + return ret; if (sta) { if (pairwise) { rcu_assign_pointer(sta->ptk[idx], new); sta->ptk_idx = idx; - ieee80211_check_fast_xmit(sta); + if (new) { + clear_sta_flag(sta, WLAN_STA_BLOCK_BA); + ieee80211_check_fast_xmit(sta); + } } else { rcu_assign_pointer(sta->gtk[idx], new); } - ieee80211_check_fast_rx(sta); + if (new) + ieee80211_check_fast_rx(sta); } else { defunikey = old && old == key_mtx_dereference(sdata->local, @@ -380,6 +457,8 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, if (old) list_del_rcu(&old->list); + + return 0; } struct ieee80211_key * @@ -575,9 +654,6 @@ static void ieee80211_key_free_common(struct ieee80211_key *key) static void __ieee80211_key_destroy(struct ieee80211_key *key, bool delay_tailroom) { - if (key->local) - ieee80211_key_disable_hw_accel(key); - if (key->local) { struct ieee80211_sub_if_data *sdata = key->sdata; @@ -654,7 +730,6 @@ int ieee80211_key_link(struct ieee80211_key *key, struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { - struct ieee80211_local *local = sdata->local; struct ieee80211_key *old_key; int idx = key->conf.keyidx; bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; @@ -691,17 +766,13 @@ int ieee80211_key_link(struct ieee80211_key *key, increment_tailroom_need_count(sdata); - ieee80211_key_replace(sdata, sta, pairwise, old_key, key); - ieee80211_key_destroy(old_key, delay_tailroom); - - ieee80211_debugfs_key_add(key); + ret = ieee80211_key_replace(sdata, sta, pairwise, old_key, key); - if (!local->wowlan) { - ret = ieee80211_key_enable_hw_accel(key); - if (ret) - ieee80211_key_free(key, delay_tailroom); + if (!ret) { + ieee80211_debugfs_key_add(key); + ieee80211_key_destroy(old_key, delay_tailroom); } else { - ret = 0; + ieee80211_key_free(key, delay_tailroom); } out: diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 513627896204..83e71e6b2ebe 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -4,6 +4,7 @@ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -610,6 +611,18 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, local->ops = ops; local->use_chanctx = use_chanctx; + /* + * We need a bit of data queued to build aggregates properly, so + * instruct the TCP stack to allow more than a single ms of data + * to be queued in the stack. The value is a bit-shift of 1 + * second, so 8 is ~4ms of queued data. Only affects local TCP + * sockets. + * This is the default, anyhow - drivers may need to override it + * for local reasons (longer buffers, longer completion time, or + * similar). + */ + local->hw.tx_sk_pacing_shift = 8; + /* set up some defaults */ local->hw.queues = 1; local->hw.max_rates = 1; @@ -684,6 +697,10 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, (unsigned long)local); + if (ops->wake_tx_queue) + tasklet_init(&local->wake_txqs_tasklet, ieee80211_wake_txqs, + (unsigned long)local); + tasklet_init(&local->tasklet, ieee80211_tasklet_handler, (unsigned long) local); @@ -1154,6 +1171,53 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) goto fail_rate; } + if (local->rate_ctrl) { + clear_bit(IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, hw->flags); + if (local->rate_ctrl->ops->capa & RATE_CTRL_CAPA_VHT_EXT_NSS_BW) + ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); + } + + /* + * If the VHT capabilities don't have IEEE80211_VHT_EXT_NSS_BW_CAPABLE, + * or have it when we don't, copy the sband structure and set/clear it. + * This is necessary because rate scaling algorithms could be switched + * and have different support values. + * Print a message so that in the common case the reallocation can be + * avoided. + */ + BUILD_BUG_ON(NUM_NL80211_BANDS > 8 * sizeof(local->sband_allocated)); + for (band = 0; band < NUM_NL80211_BANDS; band++) { + struct ieee80211_supported_band *sband; + bool local_cap, ie_cap; + + local_cap = ieee80211_hw_check(hw, SUPPORTS_VHT_EXT_NSS_BW); + + sband = local->hw.wiphy->bands[band]; + if (!sband || !sband->vht_cap.vht_supported) + continue; + + ie_cap = !!(sband->vht_cap.vht_mcs.tx_highest & + cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE)); + + if (local_cap == ie_cap) + continue; + + sband = kmemdup(sband, sizeof(*sband), GFP_KERNEL); + if (!sband) { + result = -ENOMEM; + goto fail_rate; + } + + wiphy_dbg(hw->wiphy, "copying sband (band %d) due to VHT EXT NSS BW flag\n", + band); + + sband->vht_cap.vht_mcs.tx_highest ^= + cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); + + local->hw.wiphy->bands[band] = sband; + local->sband_allocated |= BIT(band); + } + /* add one default STA interface if supported */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) && !ieee80211_hw_check(hw, NO_AUTO_VIF)) { @@ -1272,6 +1336,7 @@ static int ieee80211_free_ack_frame(int id, void *p, void *data) void ieee80211_free_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); + enum nl80211_band band; mutex_destroy(&local->iflist_mtx); mutex_destroy(&local->mtx); @@ -1287,6 +1352,12 @@ void ieee80211_free_hw(struct ieee80211_hw *hw) ieee80211_free_led_names(local); + for (band = 0; band < NUM_NL80211_BANDS; band++) { + if (!(local->sband_allocated & BIT(band))) + continue; + kfree(local->hw.wiphy->bands[band]); + } + wiphy_free(local->hw.wiphy); } EXPORT_SYMBOL(ieee80211_free_hw); @@ -1304,18 +1375,12 @@ static int __init ieee80211_init(void) if (ret) return ret; - ret = rc80211_minstrel_ht_init(); - if (ret) - goto err_minstrel; - ret = ieee80211_iface_init(); if (ret) goto err_netdev; return 0; err_netdev: - rc80211_minstrel_ht_exit(); - err_minstrel: rc80211_minstrel_exit(); return ret; @@ -1323,7 +1388,6 @@ static int __init ieee80211_init(void) static void __exit ieee80211_exit(void) { - rc80211_minstrel_ht_exit(); rc80211_minstrel_exit(); ieee80211s_stop(); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index d51da26e9c18..8bad414c52ad 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2008, 2009 open80211s Ltd. + * Copyright (C) 2018 Intel Corporation * Authors: Luis Carlos Cobo <luisca@cozybit.com> * Javier Cardona <javier@cozybit.com> * @@ -98,7 +99,9 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chandef.chan, NL80211_CHAN_NO_HT); ieee80211_chandef_ht_oper(ie->ht_operation, &sta_chan_def); - ieee80211_chandef_vht_oper(ie->vht_operation, &sta_chan_def); + ieee80211_chandef_vht_oper(&sdata->local->hw, + ie->vht_operation, ie->ht_operation, + &sta_chan_def); if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef, &sta_chan_def)) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 3dbecae4be73..d2bc8d57c87e 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -220,7 +220,8 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, memcpy(&he_oper_vht_cap, he_oper->optional, 3); he_oper_vht_cap.basic_mcs_set = cpu_to_le16(0); - if (!ieee80211_chandef_vht_oper(&he_oper_vht_cap, + if (!ieee80211_chandef_vht_oper(&sdata->local->hw, + &he_oper_vht_cap, ht_oper, &vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) sdata_info(sdata, @@ -228,7 +229,8 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, ret = IEEE80211_STA_DISABLE_HE; goto out; } - } else if (!ieee80211_chandef_vht_oper(vht_oper, &vht_chandef)) { + } else if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_oper, + ht_oper, &vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) sdata_info(sdata, "AP VHT information is invalid, disable VHT\n"); @@ -2759,13 +2761,40 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, auth_data->key_idx, tx_flags); } +static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata, + const u8 *bssid) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct sta_info *sta; + + sdata_info(sdata, "authenticated\n"); + ifmgd->auth_data->done = true; + ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; + ifmgd->auth_data->timeout_started = true; + run_again(sdata, ifmgd->auth_data->timeout); + + /* move station state to auth */ + mutex_lock(&sdata->local->sta_mtx); + sta = sta_info_get(sdata, bssid); + if (!sta) { + WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid); + return false; + } + if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) { + sdata_info(sdata, "failed moving %pM to auth\n", bssid); + return false; + } + mutex_unlock(&sdata->local->sta_mtx); + + return true; +} + static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 bssid[ETH_ALEN]; u16 auth_alg, auth_transaction, status_code; - struct sta_info *sta; struct ieee80211_event event = { .type = MLME_EVENT, .u.mlme.data = AUTH_EVENT, @@ -2789,7 +2818,11 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, status_code = le16_to_cpu(mgmt->u.auth.status_code); if (auth_alg != ifmgd->auth_data->algorithm || - auth_transaction != ifmgd->auth_data->expected_transaction) { + (auth_alg != WLAN_AUTH_SAE && + auth_transaction != ifmgd->auth_data->expected_transaction) || + (auth_alg == WLAN_AUTH_SAE && + (auth_transaction < ifmgd->auth_data->expected_transaction || + auth_transaction > 2))) { sdata_info(sdata, "%pM unexpected authentication state: alg %d (expected %d) transact %d (expected %d)\n", mgmt->sa, auth_alg, ifmgd->auth_data->algorithm, auth_transaction, @@ -2832,35 +2865,17 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, event.u.mlme.status = MLME_SUCCESS; drv_event_callback(sdata->local, sdata, &event); - sdata_info(sdata, "authenticated\n"); - ifmgd->auth_data->done = true; - ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; - ifmgd->auth_data->timeout_started = true; - run_again(sdata, ifmgd->auth_data->timeout); - - if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && - ifmgd->auth_data->expected_transaction != 2) { - /* - * Report auth frame to user space for processing since another - * round of Authentication frames is still needed. - */ - cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); - return; + if (ifmgd->auth_data->algorithm != WLAN_AUTH_SAE || + (auth_transaction == 2 && + ifmgd->auth_data->expected_transaction == 2)) { + if (!ieee80211_mark_sta_auth(sdata, bssid)) + goto out_err; + } else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && + auth_transaction == 2) { + sdata_info(sdata, "SAE peer confirmed\n"); + ifmgd->auth_data->peer_confirmed = true; } - /* move station state to auth */ - mutex_lock(&sdata->local->sta_mtx); - sta = sta_info_get(sdata, bssid); - if (!sta) { - WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid); - goto out_err; - } - if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) { - sdata_info(sdata, "failed moving %pM to auth\n", bssid); - goto out_err; - } - mutex_unlock(&sdata->local->sta_mtx); - cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); return; out_err: @@ -3237,19 +3252,16 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, } if (bss_conf->he_support) { - u32 he_oper_params = - le32_to_cpu(elems.he_operation->he_oper_params); + bss_conf->bss_color = + le32_get_bits(elems.he_operation->he_oper_params, + IEEE80211_HE_OPERATION_BSS_COLOR_MASK); - bss_conf->bss_color = he_oper_params & - IEEE80211_HE_OPERATION_BSS_COLOR_MASK; bss_conf->htc_trig_based_pkt_ext = - (he_oper_params & - IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK) << - IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET; + le32_get_bits(elems.he_operation->he_oper_params, + IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK); bss_conf->frame_time_rts_th = - (he_oper_params & - IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK) << - IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET; + le32_get_bits(elems.he_operation->he_oper_params, + IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK); bss_conf->multi_sta_back_32bit = sta->sta.he_cap.he_cap_elem.mac_cap_info[2] & @@ -4879,6 +4891,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgd_auth_data *auth_data; u16 auth_alg; int err; + bool cont_auth; /* prepare auth data structure */ @@ -4913,6 +4926,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, return -EOPNOTSUPP; } + if (ifmgd->assoc_data) + return -EBUSY; + auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len + req->ie_len, GFP_KERNEL); if (!auth_data) @@ -4932,6 +4948,13 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, auth_data->data_len += req->auth_data_len - 4; } + /* Check if continuing authentication or trying to authenticate with the + * same BSS that we were in the process of authenticating with and avoid + * removal and re-addition of the STA entry in + * ieee80211_prep_connection(). + */ + cont_auth = ifmgd->auth_data && req->bss == ifmgd->auth_data->bss; + if (req->ie && req->ie_len) { memcpy(&auth_data->data[auth_data->data_len], req->ie, req->ie_len); @@ -4948,18 +4971,26 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, /* try to authenticate/probe */ - if ((ifmgd->auth_data && !ifmgd->auth_data->done) || - ifmgd->assoc_data) { - err = -EBUSY; - goto err_free; + if (ifmgd->auth_data) { + if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE) { + auth_data->peer_confirmed = + ifmgd->auth_data->peer_confirmed; + } + ieee80211_destroy_auth_data(sdata, cont_auth); } - if (ifmgd->auth_data) - ieee80211_destroy_auth_data(sdata, false); - /* prep auth_data so we don't go into idle on disassoc */ ifmgd->auth_data = auth_data; + /* If this is continuation of an ongoing SAE authentication exchange + * (i.e., request to send SAE Confirm) and the peer has already + * confirmed, mark authentication completed since we are about to send + * out SAE Confirm. + */ + if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE && + auth_data->peer_confirmed && auth_data->sae_trans == 2) + ieee80211_mark_sta_auth(sdata, req->bss->bssid); + if (ifmgd->associated) { u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; @@ -4977,7 +5008,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); - err = ieee80211_prep_connection(sdata, req->bss, false, false); + err = ieee80211_prep_connection(sdata, req->bss, cont_auth, false); if (err) goto err_clear; @@ -4998,7 +5029,6 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, mutex_lock(&sdata->local->mtx); ieee80211_vif_release_channel(sdata); mutex_unlock(&sdata->local->mtx); - err_free: kfree(auth_data); return err; } diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index 8212bfeb71d6..d59198191a79 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -95,18 +95,5 @@ static inline void rc80211_minstrel_exit(void) } #endif -#ifdef CONFIG_MAC80211_RC_MINSTREL_HT -int rc80211_minstrel_ht_init(void); -void rc80211_minstrel_ht_exit(void); -#else -static inline int rc80211_minstrel_ht_init(void) -{ - return 0; -} -static inline void rc80211_minstrel_ht_exit(void) -{ -} -#endif - #endif /* IEEE80211_RATE_H */ diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 07fb219327d6..a34e9c2ca626 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -167,12 +167,6 @@ minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs) if (unlikely(!mrs->att_hist)) { mrs->prob_ewma = cur_prob; } else { - /* update exponential weighted moving variance */ - mrs->prob_ewmv = minstrel_ewmv(mrs->prob_ewmv, - cur_prob, - mrs->prob_ewma, - EWMA_LEVEL); - /*update exponential weighted moving avarage */ mrs->prob_ewma = minstrel_ewma(mrs->prob_ewma, cur_prob, @@ -572,141 +566,6 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, minstrel_update_rates(mp, mi); } -static void * -minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) -{ - struct ieee80211_supported_band *sband; - struct minstrel_sta_info *mi; - struct minstrel_priv *mp = priv; - struct ieee80211_hw *hw = mp->hw; - int max_rates = 0; - int i; - - mi = kzalloc(sizeof(struct minstrel_sta_info), gfp); - if (!mi) - return NULL; - - for (i = 0; i < NUM_NL80211_BANDS; i++) { - sband = hw->wiphy->bands[i]; - if (sband && sband->n_bitrates > max_rates) - max_rates = sband->n_bitrates; - } - - mi->r = kcalloc(max_rates, sizeof(struct minstrel_rate), gfp); - if (!mi->r) - goto error; - - mi->sample_table = kmalloc_array(max_rates, SAMPLE_COLUMNS, gfp); - if (!mi->sample_table) - goto error1; - - mi->last_stats_update = jiffies; - return mi; - -error1: - kfree(mi->r); -error: - kfree(mi); - return NULL; -} - -static void -minstrel_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta) -{ - struct minstrel_sta_info *mi = priv_sta; - - kfree(mi->sample_table); - kfree(mi->r); - kfree(mi); -} - -static void -minstrel_init_cck_rates(struct minstrel_priv *mp) -{ - static const int bitrates[4] = { 10, 20, 55, 110 }; - struct ieee80211_supported_band *sband; - u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef); - int i, j; - - sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ]; - if (!sband) - return; - - for (i = 0, j = 0; i < sband->n_bitrates; i++) { - struct ieee80211_rate *rate = &sband->bitrates[i]; - - if (rate->flags & IEEE80211_RATE_ERP_G) - continue; - - if ((rate_flags & sband->bitrates[i].flags) != rate_flags) - continue; - - for (j = 0; j < ARRAY_SIZE(bitrates); j++) { - if (rate->bitrate != bitrates[j]) - continue; - - mp->cck_rates[j] = i; - break; - } - } -} - -static void * -minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) -{ - struct minstrel_priv *mp; - - mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC); - if (!mp) - return NULL; - - /* contention window settings - * Just an approximation. Using the per-queue values would complicate - * the calculations and is probably unnecessary */ - mp->cw_min = 15; - mp->cw_max = 1023; - - /* number of packets (in %) to use for sampling other rates - * sample less often for non-mrr packets, because the overhead - * is much higher than with mrr */ - mp->lookaround_rate = 5; - mp->lookaround_rate_mrr = 10; - - /* maximum time that the hw is allowed to stay in one MRR segment */ - mp->segment_size = 6000; - - if (hw->max_rate_tries > 0) - mp->max_retry = hw->max_rate_tries; - else - /* safe default, does not necessarily have to match hw properties */ - mp->max_retry = 7; - - if (hw->max_rates >= 4) - mp->has_mrr = true; - - mp->hw = hw; - mp->update_interval = 100; - -#ifdef CONFIG_MAC80211_DEBUGFS - mp->fixed_rate_idx = (u32) -1; - mp->dbg_fixed_rate = debugfs_create_u32("fixed_rate_idx", - 0666, debugfsdir, &mp->fixed_rate_idx); -#endif - - minstrel_init_cck_rates(mp); - - return mp; -} - -static void -minstrel_free(void *priv) -{ -#ifdef CONFIG_MAC80211_DEBUGFS - debugfs_remove(((struct minstrel_priv *)priv)->dbg_fixed_rate); -#endif - kfree(priv); -} - static u32 minstrel_get_expected_throughput(void *priv_sta) { struct minstrel_sta_info *mi = priv_sta; @@ -725,29 +584,8 @@ static u32 minstrel_get_expected_throughput(void *priv_sta) } const struct rate_control_ops mac80211_minstrel = { - .name = "minstrel", .tx_status_ext = minstrel_tx_status, .get_rate = minstrel_get_rate, .rate_init = minstrel_rate_init, - .alloc = minstrel_alloc, - .free = minstrel_free, - .alloc_sta = minstrel_alloc_sta, - .free_sta = minstrel_free_sta, -#ifdef CONFIG_MAC80211_DEBUGFS - .add_sta_debugfs = minstrel_add_sta_debugfs, - .remove_sta_debugfs = minstrel_remove_sta_debugfs, -#endif .get_expected_throughput = minstrel_get_expected_throughput, }; - -int __init -rc80211_minstrel_init(void) -{ - return ieee80211_rate_control_register(&mac80211_minstrel); -} - -void -rc80211_minstrel_exit(void) -{ - ieee80211_rate_control_unregister(&mac80211_minstrel); -} diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h index be6c3f35f48b..23ec953e3a24 100644 --- a/net/mac80211/rc80211_minstrel.h +++ b/net/mac80211/rc80211_minstrel.h @@ -35,19 +35,6 @@ minstrel_ewma(int old, int new, int weight) return old + incr; } -/* - * Perform EWMV (Exponentially Weighted Moving Variance) calculation - */ -static inline int -minstrel_ewmv(int old_ewmv, int cur_prob, int prob_ewma, int weight) -{ - int diff, incr; - - diff = cur_prob - prob_ewma; - incr = (EWMA_DIV - weight) * diff / EWMA_DIV; - return weight * (old_ewmv + MINSTREL_TRUNC(diff * incr)) / EWMA_DIV; -} - struct minstrel_rate_stats { /* current / last sampling period attempts/success counters */ u16 attempts, last_attempts; @@ -56,11 +43,8 @@ struct minstrel_rate_stats { /* total attempts/success counters */ u32 att_hist, succ_hist; - /* statistis of packet delivery probability - * prob_ewma - exponential weighted moving average of prob - * prob_ewmsd - exp. weighted moving standard deviation of prob */ + /* prob_ewma - exponential weighted moving average of prob */ u16 prob_ewma; - u16 prob_ewmv; /* maximum retry counts */ u8 retry_count; @@ -109,11 +93,6 @@ struct minstrel_sta_info { /* sampling table */ u8 *sample_table; - -#ifdef CONFIG_MAC80211_DEBUGFS - struct dentry *dbg_stats; - struct dentry *dbg_stats_csv; -#endif }; struct minstrel_priv { @@ -137,7 +116,6 @@ struct minstrel_priv { * - setting will be applied on next update */ u32 fixed_rate_idx; - struct dentry *dbg_fixed_rate; #endif }; @@ -146,17 +124,8 @@ struct minstrel_debugfs_info { char buf[]; }; -/* Get EWMSD (Exponentially Weighted Moving Standard Deviation) * 10 */ -static inline int -minstrel_get_ewmsd10(struct minstrel_rate_stats *mrs) -{ - unsigned int ewmv = mrs->prob_ewmv; - return int_sqrt(MINSTREL_TRUNC(ewmv * 1000 * 1000)); -} - extern const struct rate_control_ops mac80211_minstrel; void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); -void minstrel_remove_sta_debugfs(void *priv, void *priv_sta); /* Recalculate success probabilities and counters for a given rate using EWMA */ void minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs); @@ -165,7 +134,5 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma); /* debugfs */ int minstrel_stats_open(struct inode *inode, struct file *file); int minstrel_stats_csv_open(struct inode *inode, struct file *file); -ssize_t minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos); -int minstrel_stats_release(struct inode *inode, struct file *file); #endif diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c index 9ad7d63d3e5b..c8afd85b51a0 100644 --- a/net/mac80211/rc80211_minstrel_debugfs.c +++ b/net/mac80211/rc80211_minstrel_debugfs.c @@ -54,22 +54,6 @@ #include <net/mac80211.h> #include "rc80211_minstrel.h" -ssize_t -minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) -{ - struct minstrel_debugfs_info *ms; - - ms = file->private_data; - return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len); -} - -int -minstrel_stats_release(struct inode *inode, struct file *file) -{ - kfree(file->private_data); - return 0; -} - int minstrel_stats_open(struct inode *inode, struct file *file) { @@ -86,14 +70,13 @@ minstrel_stats_open(struct inode *inode, struct file *file) p = ms->buf; p += sprintf(p, "\n"); p += sprintf(p, - "best __________rate_________ ________statistics________ ____last_____ ______sum-of________\n"); + "best __________rate_________ ____statistics___ ____last_____ ______sum-of________\n"); p += sprintf(p, - "rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [retry|suc|att] [#success | #attempts]\n"); + "rate [name idx airtime max_tp] [avg(tp) avg(prob)] [retry|suc|att] [#success | #attempts]\n"); for (i = 0; i < mi->n_rates; i++) { struct minstrel_rate *mr = &mi->r[i]; struct minstrel_rate_stats *mrs = &mi->r[i].stats; - unsigned int prob_ewmsd; *(p++) = (i == mi->max_tp_rate[0]) ? 'A' : ' '; *(p++) = (i == mi->max_tp_rate[1]) ? 'B' : ' '; @@ -109,15 +92,13 @@ minstrel_stats_open(struct inode *inode, struct file *file) tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100)); tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); - prob_ewmsd = minstrel_get_ewmsd10(mrs); - p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u" + p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u" " %3u %3u %-3u " "%9llu %-9llu\n", tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, - prob_ewmsd / 10, prob_ewmsd % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, @@ -135,14 +116,6 @@ minstrel_stats_open(struct inode *inode, struct file *file) return 0; } -static const struct file_operations minstrel_stat_fops = { - .owner = THIS_MODULE, - .open = minstrel_stats_open, - .read = minstrel_stats_read, - .release = minstrel_stats_release, - .llseek = default_llseek, -}; - int minstrel_stats_csv_open(struct inode *inode, struct file *file) { @@ -161,7 +134,6 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file) for (i = 0; i < mi->n_rates; i++) { struct minstrel_rate *mr = &mi->r[i]; struct minstrel_rate_stats *mrs = &mi->r[i].stats; - unsigned int prob_ewmsd; p += sprintf(p, "%s" ,((i == mi->max_tp_rate[0]) ? "A" : "")); p += sprintf(p, "%s" ,((i == mi->max_tp_rate[1]) ? "B" : "")); @@ -177,14 +149,12 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file) tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100)); tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); - prob_ewmsd = minstrel_get_ewmsd10(mrs); - p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,%u," + p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,%u," "%llu,%llu,%d,%d\n", tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, - prob_ewmsd / 10, prob_ewmsd % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, @@ -200,33 +170,3 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file) return 0; } - -static const struct file_operations minstrel_stat_csv_fops = { - .owner = THIS_MODULE, - .open = minstrel_stats_csv_open, - .read = minstrel_stats_read, - .release = minstrel_stats_release, - .llseek = default_llseek, -}; - -void -minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir) -{ - struct minstrel_sta_info *mi = priv_sta; - - mi->dbg_stats = debugfs_create_file("rc_stats", 0444, dir, mi, - &minstrel_stat_fops); - - mi->dbg_stats_csv = debugfs_create_file("rc_stats_csv", 0444, dir, mi, - &minstrel_stat_csv_fops); -} - -void -minstrel_remove_sta_debugfs(void *priv, void *priv_sta) -{ - struct minstrel_sta_info *mi = priv_sta; - - debugfs_remove(mi->dbg_stats); - - debugfs_remove(mi->dbg_stats_csv); -} diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 67ebdeaffbbc..f466ec37d161 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -52,22 +52,23 @@ _streams - 1 /* MCS rate information for an MCS group */ -#define MCS_GROUP(_streams, _sgi, _ht40) \ +#define MCS_GROUP(_streams, _sgi, _ht40, _s) \ [GROUP_IDX(_streams, _sgi, _ht40)] = { \ .streams = _streams, \ + .shift = _s, \ .flags = \ IEEE80211_TX_RC_MCS | \ (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ (_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \ .duration = { \ - MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26), \ - MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52), \ - MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78), \ - MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104), \ - MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156), \ - MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208), \ - MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234), \ - MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) \ + MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26) >> _s, \ + MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52) >> _s, \ + MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78) >> _s, \ + MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104) >> _s, \ + MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156) >> _s, \ + MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208) >> _s, \ + MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234) >> _s, \ + MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) >> _s \ } \ } @@ -80,9 +81,10 @@ #define BW2VBPS(_bw, r3, r2, r1) \ (_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1) -#define VHT_GROUP(_streams, _sgi, _bw) \ +#define VHT_GROUP(_streams, _sgi, _bw, _s) \ [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \ .streams = _streams, \ + .shift = _s, \ .flags = \ IEEE80211_TX_RC_VHT_MCS | \ (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ @@ -90,25 +92,25 @@ _bw == BW_40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \ .duration = { \ MCS_DURATION(_streams, _sgi, \ - BW2VBPS(_bw, 117, 54, 26)), \ + BW2VBPS(_bw, 117, 54, 26)) >> _s, \ MCS_DURATION(_streams, _sgi, \ - BW2VBPS(_bw, 234, 108, 52)), \ + BW2VBPS(_bw, 234, 108, 52)) >> _s, \ MCS_DURATION(_streams, _sgi, \ - BW2VBPS(_bw, 351, 162, 78)), \ + BW2VBPS(_bw, 351, 162, 78)) >> _s, \ MCS_DURATION(_streams, _sgi, \ - BW2VBPS(_bw, 468, 216, 104)), \ + BW2VBPS(_bw, 468, 216, 104)) >> _s, \ MCS_DURATION(_streams, _sgi, \ - BW2VBPS(_bw, 702, 324, 156)), \ + BW2VBPS(_bw, 702, 324, 156)) >> _s, \ MCS_DURATION(_streams, _sgi, \ - BW2VBPS(_bw, 936, 432, 208)), \ + BW2VBPS(_bw, 936, 432, 208)) >> _s, \ MCS_DURATION(_streams, _sgi, \ - BW2VBPS(_bw, 1053, 486, 234)), \ + BW2VBPS(_bw, 1053, 486, 234)) >> _s, \ MCS_DURATION(_streams, _sgi, \ - BW2VBPS(_bw, 1170, 540, 260)), \ + BW2VBPS(_bw, 1170, 540, 260)) >> _s, \ MCS_DURATION(_streams, _sgi, \ - BW2VBPS(_bw, 1404, 648, 312)), \ + BW2VBPS(_bw, 1404, 648, 312)) >> _s, \ MCS_DURATION(_streams, _sgi, \ - BW2VBPS(_bw, 1560, 720, 346)) \ + BW2VBPS(_bw, 1560, 720, 346)) >> _s \ } \ } @@ -121,28 +123,27 @@ (CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) + \ CCK_DURATION(_bitrate, _short, AVG_PKT_SIZE)) -#define CCK_DURATION_LIST(_short) \ - CCK_ACK_DURATION(10, _short), \ - CCK_ACK_DURATION(20, _short), \ - CCK_ACK_DURATION(55, _short), \ - CCK_ACK_DURATION(110, _short) +#define CCK_DURATION_LIST(_short, _s) \ + CCK_ACK_DURATION(10, _short) >> _s, \ + CCK_ACK_DURATION(20, _short) >> _s, \ + CCK_ACK_DURATION(55, _short) >> _s, \ + CCK_ACK_DURATION(110, _short) >> _s -#define CCK_GROUP \ +#define CCK_GROUP(_s) \ [MINSTREL_CCK_GROUP] = { \ - .streams = 0, \ + .streams = 1, \ .flags = 0, \ + .shift = _s, \ .duration = { \ - CCK_DURATION_LIST(false), \ - CCK_DURATION_LIST(true) \ + CCK_DURATION_LIST(false, _s), \ + CCK_DURATION_LIST(true, _s) \ } \ } -#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT static bool minstrel_vht_only = true; module_param(minstrel_vht_only, bool, 0644); MODULE_PARM_DESC(minstrel_vht_only, "Use only VHT rates when VHT is supported by sta."); -#endif /* * To enable sufficiently targeted rate sampling, MCS rates are divided into @@ -153,49 +154,47 @@ MODULE_PARM_DESC(minstrel_vht_only, * BW -> SGI -> #streams */ const struct mcs_group minstrel_mcs_groups[] = { - MCS_GROUP(1, 0, BW_20), - MCS_GROUP(2, 0, BW_20), - MCS_GROUP(3, 0, BW_20), + MCS_GROUP(1, 0, BW_20, 5), + MCS_GROUP(2, 0, BW_20, 4), + MCS_GROUP(3, 0, BW_20, 4), - MCS_GROUP(1, 1, BW_20), - MCS_GROUP(2, 1, BW_20), - MCS_GROUP(3, 1, BW_20), + MCS_GROUP(1, 1, BW_20, 5), + MCS_GROUP(2, 1, BW_20, 4), + MCS_GROUP(3, 1, BW_20, 4), - MCS_GROUP(1, 0, BW_40), - MCS_GROUP(2, 0, BW_40), - MCS_GROUP(3, 0, BW_40), + MCS_GROUP(1, 0, BW_40, 4), + MCS_GROUP(2, 0, BW_40, 4), + MCS_GROUP(3, 0, BW_40, 4), - MCS_GROUP(1, 1, BW_40), - MCS_GROUP(2, 1, BW_40), - MCS_GROUP(3, 1, BW_40), + MCS_GROUP(1, 1, BW_40, 4), + MCS_GROUP(2, 1, BW_40, 4), + MCS_GROUP(3, 1, BW_40, 4), - CCK_GROUP, + CCK_GROUP(8), -#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT - VHT_GROUP(1, 0, BW_20), - VHT_GROUP(2, 0, BW_20), - VHT_GROUP(3, 0, BW_20), + VHT_GROUP(1, 0, BW_20, 5), + VHT_GROUP(2, 0, BW_20, 4), + VHT_GROUP(3, 0, BW_20, 4), - VHT_GROUP(1, 1, BW_20), - VHT_GROUP(2, 1, BW_20), - VHT_GROUP(3, 1, BW_20), + VHT_GROUP(1, 1, BW_20, 5), + VHT_GROUP(2, 1, BW_20, 4), + VHT_GROUP(3, 1, BW_20, 4), - VHT_GROUP(1, 0, BW_40), - VHT_GROUP(2, 0, BW_40), - VHT_GROUP(3, 0, BW_40), + VHT_GROUP(1, 0, BW_40, 4), + VHT_GROUP(2, 0, BW_40, 4), + VHT_GROUP(3, 0, BW_40, 4), - VHT_GROUP(1, 1, BW_40), - VHT_GROUP(2, 1, BW_40), - VHT_GROUP(3, 1, BW_40), + VHT_GROUP(1, 1, BW_40, 4), + VHT_GROUP(2, 1, BW_40, 4), + VHT_GROUP(3, 1, BW_40, 4), - VHT_GROUP(1, 0, BW_80), - VHT_GROUP(2, 0, BW_80), - VHT_GROUP(3, 0, BW_80), + VHT_GROUP(1, 0, BW_80, 4), + VHT_GROUP(2, 0, BW_80, 4), + VHT_GROUP(3, 0, BW_80, 4), - VHT_GROUP(1, 1, BW_80), - VHT_GROUP(2, 1, BW_80), - VHT_GROUP(3, 1, BW_80), -#endif + VHT_GROUP(1, 1, BW_80, 4), + VHT_GROUP(2, 1, BW_80, 4), + VHT_GROUP(3, 1, BW_80, 4), }; static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly; @@ -282,7 +281,8 @@ minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, break; /* short preamble */ - if (!(mi->supported[group] & BIT(idx))) + if ((mi->supported[group] & BIT(idx + 4)) && + (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)) idx += 4; } return &mi->groups[group].rates[idx]; @@ -311,7 +311,8 @@ minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate, if (group != MINSTREL_CCK_GROUP) nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); - nsecs += minstrel_mcs_groups[group].duration[rate]; + nsecs += minstrel_mcs_groups[group].duration[rate] << + minstrel_mcs_groups[group].shift; /* * For the throughput calculation, limit the probability value to 90% to @@ -759,12 +760,19 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, minstrel_ht_update_rates(mp, mi); } +static inline int +minstrel_get_duration(int index) +{ + const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; + unsigned int duration = group->duration[index % MCS_GROUP_RATES]; + return duration << group->shift; +} + static void minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, int index) { struct minstrel_rate_stats *mrs; - const struct mcs_group *group; unsigned int tx_time, tx_time_rtscts, tx_time_data; unsigned int cw = mp->cw_min; unsigned int ctime = 0; @@ -783,8 +791,7 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, mrs->retry_count_rtscts = 2; mrs->retry_updated = true; - group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; - tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len / 1000; + tx_time_data = minstrel_get_duration(index) * ampdu_len / 1000; /* Contention time for first 2 tries */ ctime = (t_slot * cw) >> 1; @@ -878,20 +885,24 @@ minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi) int group = mi->max_prob_rate / MCS_GROUP_RATES; const struct mcs_group *g = &minstrel_mcs_groups[group]; int rate = mi->max_prob_rate % MCS_GROUP_RATES; + unsigned int duration; /* Disable A-MSDU if max_prob_rate is bad */ if (mi->groups[group].rates[rate].prob_ewma < MINSTREL_FRAC(50, 100)) return 1; + duration = g->duration[rate]; + duration <<= g->shift; + /* If the rate is slower than single-stream MCS1, make A-MSDU limit small */ - if (g->duration[rate] > MCS_DURATION(1, 0, 52)) + if (duration > MCS_DURATION(1, 0, 52)) return 500; /* * If the rate is slower than single-stream MCS4, limit A-MSDU to usual * data packet size */ - if (g->duration[rate] > MCS_DURATION(1, 0, 104)) + if (duration > MCS_DURATION(1, 0, 104)) return 1600; /* @@ -899,7 +910,7 @@ minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi) * rate success probability is less than 75%, limit A-MSDU to twice the usual * data packet size */ - if (g->duration[rate] > MCS_DURATION(1, 0, 260) || + if (duration > MCS_DURATION(1, 0, 260) || (minstrel_ht_get_prob_ewma(mi, mi->max_tp_rate[0]) < MINSTREL_FRAC(75, 100))) return 3200; @@ -946,13 +957,6 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) rate_control_set_rates(mp->hw, mi->sta, rates); } -static inline int -minstrel_get_duration(int index) -{ - const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; - return group->duration[index % MCS_GROUP_RATES]; -} - static int minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) { @@ -1000,10 +1004,13 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) return -1; /* - * Do not sample if the probability is already higher than 95% - * to avoid wasting airtime. + * Do not sample if the probability is already higher than 95%, + * or if the rate is 3 times slower than the current max probability + * rate, to avoid wasting airtime. */ - if (mrs->prob_ewma > MINSTREL_FRAC(95, 100)) + sample_dur = minstrel_get_duration(sample_idx); + if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) || + minstrel_get_duration(mi->max_prob_rate) * 3 < sample_dur) return -1; /* @@ -1013,7 +1020,6 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) cur_max_tp_streams = minstrel_mcs_groups[tp_rate1 / MCS_GROUP_RATES].streams; - sample_dur = minstrel_get_duration(sample_idx); if (sample_dur >= minstrel_get_duration(tp_rate2) && (cur_max_tp_streams - 1 < minstrel_mcs_groups[sample_group].streams || @@ -1077,18 +1083,23 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, return; sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES]; + sample_idx %= MCS_GROUP_RATES; + + if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP] && + (sample_idx >= 4) != txrc->short_preamble) + return; + info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; rate->count = 1; - if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) { + if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP]) { int idx = sample_idx % ARRAY_SIZE(mp->cck_rates); rate->idx = mp->cck_rates[idx]; } else if (sample_group->flags & IEEE80211_TX_RC_VHT_MCS) { ieee80211_rate_set_vht(rate, sample_idx % MCS_GROUP_RATES, sample_group->streams); } else { - rate->idx = sample_idx % MCS_GROUP_RATES + - (sample_group->streams - 1) * 8; + rate->idx = sample_idx + (sample_group->streams - 1) * 8; } rate->flags = sample_group->flags; @@ -1130,14 +1141,14 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, struct minstrel_ht_sta_priv *msp = priv_sta; struct minstrel_ht_sta *mi = &msp->ht; struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; - u16 sta_cap = sta->ht_cap.cap; + u16 ht_cap = sta->ht_cap.cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - struct sta_info *sinfo = container_of(sta, struct sta_info, sta); int use_vht; int n_supported = 0; int ack_dur; int stbc; int i; + bool ldpc; /* fall back to the old minstrel for legacy stations */ if (!sta->ht_cap.ht_supported) @@ -1145,12 +1156,10 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != MINSTREL_GROUPS_NB); -#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT if (vht_cap->vht_supported) use_vht = vht_cap->vht_mcs.tx_mcs_map != cpu_to_le16(~0); else -#endif - use_vht = 0; + use_vht = 0; msp->is_ht = true; memset(mi, 0, sizeof(*mi)); @@ -1175,16 +1184,22 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, } mi->sample_tries = 4; - /* TODO tx_flags for vht - ATM the RC API is not fine-grained enough */ if (!use_vht) { - stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >> + stbc = (ht_cap & IEEE80211_HT_CAP_RX_STBC) >> IEEE80211_HT_CAP_RX_STBC_SHIFT; - mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT; - if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING) - mi->tx_flags |= IEEE80211_TX_CTL_LDPC; + ldpc = ht_cap & IEEE80211_HT_CAP_LDPC_CODING; + } else { + stbc = (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK) >> + IEEE80211_VHT_CAP_RXSTBC_SHIFT; + + ldpc = vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC; } + mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT; + if (ldpc) + mi->tx_flags |= IEEE80211_TX_CTL_LDPC; + for (i = 0; i < ARRAY_SIZE(mi->groups); i++) { u32 gflags = minstrel_mcs_groups[i].flags; int bw, nss; @@ -1197,10 +1212,10 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, if (gflags & IEEE80211_TX_RC_SHORT_GI) { if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) { - if (!(sta_cap & IEEE80211_HT_CAP_SGI_40)) + if (!(ht_cap & IEEE80211_HT_CAP_SGI_40)) continue; } else { - if (!(sta_cap & IEEE80211_HT_CAP_SGI_20)) + if (!(ht_cap & IEEE80211_HT_CAP_SGI_20)) continue; } } @@ -1217,10 +1232,9 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, /* HT rate */ if (gflags & IEEE80211_TX_RC_MCS) { -#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT if (use_vht && minstrel_vht_only) continue; -#endif + mi->supported[i] = mcs->rx_mask[nss - 1]; if (mi->supported[i]) n_supported++; @@ -1258,8 +1272,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, if (!n_supported) goto use_legacy; - if (test_sta_flag(sinfo, WLAN_STA_SHORT_PREAMBLE)) - mi->cck_supported_short |= mi->cck_supported_short << 4; + mi->supported[MINSTREL_CCK_GROUP] |= mi->cck_supported_short << 4; /* create an initial rate table with the lowest supported rates */ minstrel_ht_update_stats(mp, mi); @@ -1340,16 +1353,88 @@ minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta) kfree(msp); } +static void +minstrel_ht_init_cck_rates(struct minstrel_priv *mp) +{ + static const int bitrates[4] = { 10, 20, 55, 110 }; + struct ieee80211_supported_band *sband; + u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef); + int i, j; + + sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ]; + if (!sband) + return; + + for (i = 0; i < sband->n_bitrates; i++) { + struct ieee80211_rate *rate = &sband->bitrates[i]; + + if (rate->flags & IEEE80211_RATE_ERP_G) + continue; + + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + + for (j = 0; j < ARRAY_SIZE(bitrates); j++) { + if (rate->bitrate != bitrates[j]) + continue; + + mp->cck_rates[j] = i; + break; + } + } +} + static void * minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) { - return mac80211_minstrel.alloc(hw, debugfsdir); + struct minstrel_priv *mp; + + mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC); + if (!mp) + return NULL; + + /* contention window settings + * Just an approximation. Using the per-queue values would complicate + * the calculations and is probably unnecessary */ + mp->cw_min = 15; + mp->cw_max = 1023; + + /* number of packets (in %) to use for sampling other rates + * sample less often for non-mrr packets, because the overhead + * is much higher than with mrr */ + mp->lookaround_rate = 5; + mp->lookaround_rate_mrr = 10; + + /* maximum time that the hw is allowed to stay in one MRR segment */ + mp->segment_size = 6000; + + if (hw->max_rate_tries > 0) + mp->max_retry = hw->max_rate_tries; + else + /* safe default, does not necessarily have to match hw properties */ + mp->max_retry = 7; + + if (hw->max_rates >= 4) + mp->has_mrr = true; + + mp->hw = hw; + mp->update_interval = 100; + +#ifdef CONFIG_MAC80211_DEBUGFS + mp->fixed_rate_idx = (u32) -1; + debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir, + &mp->fixed_rate_idx); +#endif + + minstrel_ht_init_cck_rates(mp); + + return mp; } static void minstrel_ht_free(void *priv) { - mac80211_minstrel.free(priv); + kfree(priv); } static u32 minstrel_ht_get_expected_throughput(void *priv_sta) @@ -1384,7 +1469,6 @@ static const struct rate_control_ops mac80211_minstrel_ht = { .free = minstrel_ht_free, #ifdef CONFIG_MAC80211_DEBUGFS .add_sta_debugfs = minstrel_ht_add_sta_debugfs, - .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs, #endif .get_expected_throughput = minstrel_ht_get_expected_throughput, }; @@ -1409,14 +1493,14 @@ static void __init init_sample_table(void) } int __init -rc80211_minstrel_ht_init(void) +rc80211_minstrel_init(void) { init_sample_table(); return ieee80211_rate_control_register(&mac80211_minstrel_ht); } void -rc80211_minstrel_ht_exit(void) +rc80211_minstrel_exit(void) { ieee80211_rate_control_unregister(&mac80211_minstrel_ht); } diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h index de1646c42e82..26b7a3244b47 100644 --- a/net/mac80211/rc80211_minstrel_ht.h +++ b/net/mac80211/rc80211_minstrel_ht.h @@ -15,11 +15,7 @@ */ #define MINSTREL_MAX_STREAMS 3 #define MINSTREL_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */ -#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT #define MINSTREL_VHT_STREAM_GROUPS 6 /* BW(=3) * SGI(=2) */ -#else -#define MINSTREL_VHT_STREAM_GROUPS 0 -#endif #define MINSTREL_HT_GROUPS_NB (MINSTREL_MAX_STREAMS * \ MINSTREL_HT_STREAM_GROUPS) @@ -34,16 +30,13 @@ #define MINSTREL_CCK_GROUP (MINSTREL_HT_GROUP_0 + MINSTREL_HT_GROUPS_NB) #define MINSTREL_VHT_GROUP_0 (MINSTREL_CCK_GROUP + 1) -#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT #define MCS_GROUP_RATES 10 -#else -#define MCS_GROUP_RATES 8 -#endif struct mcs_group { - u32 flags; - unsigned int streams; - unsigned int duration[MCS_GROUP_RATES]; + u16 flags; + u8 streams; + u8 shift; + u16 duration[MCS_GROUP_RATES]; }; extern const struct mcs_group minstrel_mcs_groups[]; @@ -110,17 +103,12 @@ struct minstrel_ht_sta_priv { struct minstrel_ht_sta ht; struct minstrel_sta_info legacy; }; -#ifdef CONFIG_MAC80211_DEBUGFS - struct dentry *dbg_stats; - struct dentry *dbg_stats_csv; -#endif void *ratelist; void *sample_table; bool is_ht; }; void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); -void minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta); int minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate, int prob_ewma); diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c index bfcc03152dc6..57820a5f2c16 100644 --- a/net/mac80211/rc80211_minstrel_ht_debugfs.c +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c @@ -15,6 +15,22 @@ #include "rc80211_minstrel.h" #include "rc80211_minstrel_ht.h" +static ssize_t +minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) +{ + struct minstrel_debugfs_info *ms; + + ms = file->private_data; + return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len); +} + +static int +minstrel_stats_release(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + return 0; +} + static char * minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) { @@ -41,7 +57,7 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j]; static const int bitrates[4] = { 10, 20, 55, 110 }; int idx = i * MCS_GROUP_RATES + j; - unsigned int prob_ewmsd; + unsigned int duration; if (!(mi->supported[i] & BIT(j))) continue; @@ -79,21 +95,21 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) p += sprintf(p, " %3u ", idx); /* tx_time[rate(i)] in usec */ - tx_time = DIV_ROUND_CLOSEST(mg->duration[j], 1000); + duration = mg->duration[j]; + duration <<= mg->shift; + tx_time = DIV_ROUND_CLOSEST(duration, 1000); p += sprintf(p, "%6u ", tx_time); tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100)); tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); - prob_ewmsd = minstrel_get_ewmsd10(mrs); - p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u" + p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u" " %3u %3u %-3u " "%9llu %-9llu\n", tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, - prob_ewmsd / 10, prob_ewmsd % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, @@ -130,9 +146,9 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file) p += sprintf(p, "\n"); p += sprintf(p, - " best ____________rate__________ ________statistics________ _____last____ ______sum-of________\n"); + " best ____________rate__________ ____statistics___ _____last____ ______sum-of________\n"); p += sprintf(p, - "mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [retry|suc|att] [#success | #attempts]\n"); + "mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob)] [retry|suc|att] [#success | #attempts]\n"); p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p); for (i = 0; i < MINSTREL_CCK_GROUP; i++) @@ -187,7 +203,7 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p) struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j]; static const int bitrates[4] = { 10, 20, 55, 110 }; int idx = i * MCS_GROUP_RATES + j; - unsigned int prob_ewmsd; + unsigned int duration; if (!(mi->supported[i] & BIT(j))) continue; @@ -222,20 +238,21 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p) } p += sprintf(p, "%u,", idx); - tx_time = DIV_ROUND_CLOSEST(mg->duration[j], 1000); + + duration = mg->duration[j]; + duration <<= mg->shift; + tx_time = DIV_ROUND_CLOSEST(duration, 1000); p += sprintf(p, "%u,", tx_time); tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100)); tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); - prob_ewmsd = minstrel_get_ewmsd10(mrs); - p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u," + p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u," "%u,%llu,%llu,", tp_max / 10, tp_max % 10, tp_avg / 10, tp_avg % 10, eprob / 10, eprob % 10, - prob_ewmsd / 10, prob_ewmsd % 10, mrs->retry_count, mrs->last_success, mrs->last_attempts, @@ -303,17 +320,8 @@ minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir) { struct minstrel_ht_sta_priv *msp = priv_sta; - msp->dbg_stats = debugfs_create_file("rc_stats", 0444, dir, msp, - &minstrel_ht_stat_fops); - msp->dbg_stats_csv = debugfs_create_file("rc_stats_csv", 0444, dir, msp, - &minstrel_ht_stat_csv_fops); -} - -void -minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta) -{ - struct minstrel_ht_sta_priv *msp = priv_sta; - - debugfs_remove(msp->dbg_stats); - debugfs_remove(msp->dbg_stats_csv); + debugfs_create_file("rc_stats", 0444, dir, msp, + &minstrel_ht_stat_fops); + debugfs_create_file("rc_stats_csv", 0444, dir, msp, + &minstrel_ht_stat_csv_fops); } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 96611d5dfadb..3bd3b5769797 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -115,7 +115,8 @@ static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC | - RX_FLAG_ONLY_MONITOR)) + RX_FLAG_ONLY_MONITOR | + RX_FLAG_NO_PSDU)) return true; if (unlikely(skb->len < 16 + present_fcs_len + rtap_space)) @@ -189,6 +190,15 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12); } + if (status->flag & RX_FLAG_NO_PSDU) + len += 1; + + if (status->flag & RX_FLAG_RADIOTAP_LSIG) { + len = ALIGN(len, 2); + len += 4; + BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4); + } + if (status->chains) { /* antenna and antenna signal fields */ len += 2 * hweight8(status->chains); @@ -279,6 +289,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, struct ieee80211_vendor_radiotap rtap = {}; struct ieee80211_radiotap_he he = {}; struct ieee80211_radiotap_he_mu he_mu = {}; + struct ieee80211_radiotap_lsig lsig = {}; if (status->flag & RX_FLAG_RADIOTAP_HE) { he = *(struct ieee80211_radiotap_he *)skb->data; @@ -291,6 +302,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, skb_pull(skb, sizeof(he_mu)); } + if (status->flag & RX_FLAG_RADIOTAP_LSIG) { + lsig = *(struct ieee80211_radiotap_lsig *)skb->data; + skb_pull(skb, sizeof(lsig)); + } + if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { rtap = *(struct ieee80211_vendor_radiotap *)skb->data; /* rtap.len and rtap.pad are undone immediately */ @@ -549,7 +565,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, if (status->encoding == RX_ENC_HE && status->flag & RX_FLAG_RADIOTAP_HE) { -#define HE_PREP(f, val) cpu_to_le16(FIELD_PREP(IEEE80211_RADIOTAP_HE_##f, val)) +#define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f) if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) { he.data6 |= HE_PREP(DATA6_NSTS, @@ -630,6 +646,21 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, pos += sizeof(he_mu); } + if (status->flag & RX_FLAG_NO_PSDU) { + rthdr->it_present |= + cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU); + *pos++ = status->zero_length_psdu_type; + } + + if (status->flag & RX_FLAG_RADIOTAP_LSIG) { + /* ensure 2 byte alignment */ + while ((pos - (u8 *)rthdr) & 1) + pos++; + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG); + memcpy(pos, &lsig, sizeof(lsig)); + pos += sizeof(lsig); + } + for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { *pos++ = status->chain_signal[chain]; *pos++ = chain; @@ -1505,7 +1536,7 @@ static void sta_ps_start(struct sta_info *sta) if (!sta->sta.txq[0]) return; - for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { + for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { if (txq_has_queue(sta->sta.txq[tid])) set_bit(tid, &sta->txq_buffered_tids); else @@ -2046,6 +2077,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, idx = sdata->fragment_next; for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { struct ieee80211_hdr *f_hdr; + struct sk_buff *f_skb; idx--; if (idx < 0) @@ -2057,7 +2089,8 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, entry->last_frag + 1 != frag) continue; - f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; + f_skb = __skb_peek(&entry->skb_list); + f_hdr = (struct ieee80211_hdr *) f_skb->data; /* * Check ftype and addresses are equal, else check next fragment @@ -2314,7 +2347,7 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) if (!sdata->u.mgd.use_4addr) return -1; - else + else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr)) check_port_control = true; } @@ -2425,8 +2458,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) if (!xmit_skb) net_info_ratelimited("%s: failed to clone multicast frame\n", dev->name); - } else if (!is_multicast_ether_addr(ehdr->h_dest)) { - dsta = sta_info_get(sdata, skb->data); + } else if (!is_multicast_ether_addr(ehdr->h_dest) && + !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) { + dsta = sta_info_get(sdata, ehdr->h_dest); if (dsta) { /* * The destination station is associated to @@ -4207,11 +4241,10 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, if (fast_rx->internal_forward) { struct sk_buff *xmit_skb = NULL; - bool multicast = is_multicast_ether_addr(skb->data); - - if (multicast) { + if (is_multicast_ether_addr(addrs.da)) { xmit_skb = skb_copy(skb, GFP_ATOMIC); - } else if (sta_info_get(rx->sdata, skb->data)) { + } else if (!ether_addr_equal(addrs.da, addrs.sa) && + sta_info_get(rx->sdata, addrs.da)) { xmit_skb = skb; skb = NULL; } diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c index 029334835747..4e4902bdbef8 100644 --- a/net/mac80211/spectmgmt.c +++ b/net/mac80211/spectmgmt.c @@ -144,6 +144,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, wide_bw_chansw_ie->new_center_freq_seg1, /* .basic_mcs_set doesn't matter */ }; + struct ieee80211_ht_operation ht_oper = {}; /* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT, * to the previously parsed chandef @@ -151,7 +152,9 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, new_vht_chandef = csa_ie->chandef; /* ignore if parsing fails */ - if (!ieee80211_chandef_vht_oper(&vht_oper, &new_vht_chandef)) + if (!ieee80211_chandef_vht_oper(&sdata->local->hw, + &vht_oper, &ht_oper, + &new_vht_chandef)) new_vht_chandef.chan = NULL; if (sta_flags & IEEE80211_STA_DISABLE_80P80MHZ && diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index f34202242d24..fb8c2252ac0e 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -113,7 +113,12 @@ static void __cleanup_single_sta(struct sta_info *sta) if (sta->sta.txq[0]) { for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { - struct txq_info *txqi = to_txq_info(sta->sta.txq[i]); + struct txq_info *txqi; + + if (!sta->sta.txq[i]) + continue; + + txqi = to_txq_info(sta->sta.txq[i]); spin_lock_bh(&fq->lock); ieee80211_txq_purge(local, txqi); @@ -374,6 +379,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { struct txq_info *txq = txq_data + i * size; + /* might not do anything for the bufferable MMPDU TXQ */ ieee80211_txq_init(sdata, sta, txq, i); } } @@ -1239,13 +1245,11 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); - if (sta->sta.txq[0]) { - for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { - if (!txq_has_queue(sta->sta.txq[i])) - continue; + for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { + if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i])) + continue; - drv_wake_tx_queue(local, to_txq_info(sta->sta.txq[i])); - } + drv_wake_tx_queue(local, to_txq_info(sta->sta.txq[i])); } skb_queue_head_init(&pending); @@ -1683,7 +1687,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta, return; for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { - if (!(driver_release_tids & BIT(tid)) || + if (!sta->sta.txq[tid] || + !(driver_release_tids & BIT(tid)) || txq_has_queue(sta->sta.txq[tid])) continue; @@ -2323,13 +2328,13 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); } - if (ieee80211_hw_check(&sta->local->hw, REPORTS_TX_ACK_STATUS) && - !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG))) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) && + sta->status_stats.ack_signal_filled) { sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read( &sta->status_stats.avg_ack_signal); sinfo->filled |= - BIT_ULL(NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG); + BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); } } diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 91d7c0cd1882..aa4afbf0abaf 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -987,6 +987,25 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw, } EXPORT_SYMBOL(ieee80211_tx_status_ext); +void ieee80211_tx_rate_update(struct ieee80211_hw *hw, + struct ieee80211_sta *pubsta, + struct ieee80211_tx_info *info) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_supported_band *sband = hw->wiphy->bands[info->band]; + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_tx_status status = { + .info = info, + .sta = pubsta, + }; + + rate_control_tx_status(local, sband, &status); + + if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) + sta->tx_stats.last_rate = info->status.rates[0]; +} +EXPORT_SYMBOL(ieee80211_tx_rate_update); + void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 0ab69a1964f8..588c51a67c89 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -2600,6 +2600,29 @@ TRACE_EVENT(drv_wake_tx_queue, ) ); +TRACE_EVENT(drv_get_ftm_responder_stats, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_ftm_responder_stats *ftm_stats), + + TP_ARGS(local, sdata, ftm_stats), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG + ) +); + #endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 25ba24bef8f5..e0ccee23fbcd 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1253,10 +1253,18 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) return NULL; - if (!ieee80211_is_data_present(hdr->frame_control)) - return NULL; - - if (sta) { + if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) { + if ((!ieee80211_is_mgmt(hdr->frame_control) || + ieee80211_is_bufferable_mmpdu(hdr->frame_control) || + vif->type == NL80211_IFTYPE_STATION) && + sta && sta->uploaded) { + /* + * This will be NULL if the driver didn't set the + * opt-in hardware flag. + */ + txq = sta->sta.txq[IEEE80211_NUM_TIDS]; + } + } else if (sta) { u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; if (!sta->uploaded) @@ -1444,16 +1452,33 @@ void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata, txqi->txq.vif = &sdata->vif; - if (sta) { - txqi->txq.sta = &sta->sta; - sta->sta.txq[tid] = &txqi->txq; - txqi->txq.tid = tid; - txqi->txq.ac = ieee80211_ac_from_tid(tid); - } else { + if (!sta) { sdata->vif.txq = &txqi->txq; txqi->txq.tid = 0; txqi->txq.ac = IEEE80211_AC_BE; + + return; + } + + if (tid == IEEE80211_NUM_TIDS) { + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + /* Drivers need to opt in to the management MPDU TXQ */ + if (!ieee80211_hw_check(&sdata->local->hw, + STA_MMPDU_TXQ)) + return; + } else if (!ieee80211_hw_check(&sdata->local->hw, + BUFF_MMPDU_TXQ)) { + /* Drivers need to opt in to the bufferable MMPDU TXQ */ + return; + } + txqi->txq.ac = IEEE80211_AC_VO; + } else { + txqi->txq.ac = ieee80211_ac_from_tid(tid); } + + txqi->txq.sta = &sta->sta; + txqi->txq.tid = tid; + sta->sta.txq[tid] = &txqi->txq; } void ieee80211_txq_purge(struct ieee80211_local *local, @@ -2955,6 +2980,10 @@ void ieee80211_check_fast_xmit(struct sta_info *sta) if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) goto out; + /* Key is being removed */ + if (build.key->flags & KEY_FLAG_TAINTED) + goto out; + switch (build.key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: @@ -3200,6 +3229,10 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, max_amsdu_len = min_t(int, max_amsdu_len, sta->sta.max_rc_amsdu_len); + if (sta->sta.max_tid_amsdu_len[tid]) + max_amsdu_len = min_t(int, max_amsdu_len, + sta->sta.max_tid_amsdu_len[tid]); + spin_lock_bh(&fq->lock); /* TODO: Ideally aggregation should be done on dequeue to remain @@ -3232,6 +3265,9 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, if (max_frags && nfrags > max_frags) goto out; + if (!drv_can_aggregate_in_amsdu(local, head, skb)) + goto out; + if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) goto out; @@ -3476,13 +3512,19 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct ieee80211_tx_info *info; struct ieee80211_tx_data tx; ieee80211_tx_result r; - struct ieee80211_vif *vif; + struct ieee80211_vif *vif = txq->vif; spin_lock_bh(&fq->lock); - if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags)) + if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) || + test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags)) goto out; + if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) { + set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags); + goto out; + } + /* Make sure fragments stay together. */ skb = __skb_dequeue(&txqi->frags); if (skb) @@ -3577,6 +3619,7 @@ begin: } IEEE80211_SKB_CB(skb)->control.vif = vif; + out: spin_unlock_bh(&fq->lock); @@ -3605,13 +3648,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, if (!IS_ERR_OR_NULL(sta)) { struct ieee80211_fast_tx *fast_tx; - /* We need a bit of data queued to build aggregates properly, so - * instruct the TCP stack to allow more than a single ms of data - * to be queued in the stack. The value is a bit-shift of 1 - * second, so 8 is ~4ms of queued data. Only affects local TCP - * sockets. - */ - sk_pacing_shift_update(skb->sk, 8); + sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift); fast_tx = rcu_dereference(sta->fast_tx); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 716cd6442d86..bec424316ea4 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -240,6 +240,102 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, } EXPORT_SYMBOL(ieee80211_ctstoself_duration); +static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_vif *vif = &sdata->vif; + struct fq *fq = &local->fq; + struct ps_data *ps = NULL; + struct txq_info *txqi; + struct sta_info *sta; + int i; + + spin_lock_bh(&fq->lock); + + if (sdata->vif.type == NL80211_IFTYPE_AP) + ps = &sdata->bss->ps; + + sdata->vif.txqs_stopped[ac] = false; + + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sdata != sta->sdata) + continue; + + for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { + struct ieee80211_txq *txq = sta->sta.txq[i]; + + if (!txq) + continue; + + txqi = to_txq_info(txq); + + if (ac != txq->ac) + continue; + + if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, + &txqi->flags)) + continue; + + spin_unlock_bh(&fq->lock); + drv_wake_tx_queue(local, txqi); + spin_lock_bh(&fq->lock); + } + } + + if (!vif->txq) + goto out; + + txqi = to_txq_info(vif->txq); + + if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags) || + (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac) + goto out; + + spin_unlock_bh(&fq->lock); + + drv_wake_tx_queue(local, txqi); + return; +out: + spin_unlock_bh(&fq->lock); +} + +void ieee80211_wake_txqs(unsigned long data) +{ + struct ieee80211_local *local = (struct ieee80211_local *)data; + struct ieee80211_sub_if_data *sdata; + int n_acs = IEEE80211_NUM_ACS; + unsigned long flags; + int i; + + rcu_read_lock(); + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + + if (local->hw.queues < IEEE80211_NUM_ACS) + n_acs = 1; + + for (i = 0; i < local->hw.queues; i++) { + if (local->queue_stop_reasons[i]) + continue; + + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + int ac; + + for (ac = 0; ac < n_acs; ac++) { + int ac_queue = sdata->vif.hw_queue[ac]; + + if (ac_queue == i || + sdata->vif.cab_queue == i) + __ieee80211_wake_txqs(sdata, ac); + } + } + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + } + + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + rcu_read_unlock(); +} + void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue) { struct ieee80211_sub_if_data *sdata; @@ -308,6 +404,9 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, rcu_read_unlock(); } else tasklet_schedule(&local->tx_pending_tasklet); + + if (local->ops->wake_tx_queue) + tasklet_schedule(&local->wake_txqs_tasklet); } void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, @@ -351,9 +450,6 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, if (__test_and_set_bit(reason, &local->queue_stop_reasons[queue])) return; - if (local->ops->wake_tx_queue) - return; - if (local->hw.queues < IEEE80211_NUM_ACS) n_acs = 1; @@ -366,8 +462,15 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, for (ac = 0; ac < n_acs; ac++) { if (sdata->vif.hw_queue[ac] == queue || - sdata->vif.cab_queue == queue) - netif_stop_subqueue(sdata->dev, ac); + sdata->vif.cab_queue == queue) { + if (!local->ops->wake_tx_queue) { + netif_stop_subqueue(sdata->dev, ac); + continue; + } + spin_lock(&local->fq.lock); + sdata->vif.txqs_stopped[ac] = true; + spin_unlock(&local->fq.lock); + } } } rcu_read_unlock(); @@ -2075,6 +2178,11 @@ int ieee80211_reconfig(struct ieee80211_local *local) case NL80211_IFTYPE_AP: changed |= BSS_CHANGED_SSID | BSS_CHANGED_P2P_PS; + if (sdata->vif.bss_conf.ftm_responder == 1 && + wiphy_ext_feature_isset(sdata->local->hw.wiphy, + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER)) + changed |= BSS_CHANGED_FTM_RESPONDER; + if (sdata->vif.type == NL80211_IFTYPE_AP) { changed |= BSS_CHANGED_AP_PROBE_RESP; @@ -2657,49 +2765,65 @@ bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper, return true; } -bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper, +bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, + const struct ieee80211_vht_operation *oper, + const struct ieee80211_ht_operation *htop, struct cfg80211_chan_def *chandef) { struct cfg80211_chan_def new = *chandef; - int cf1, cf2; + int cf0, cf1; + int ccfs0, ccfs1, ccfs2; + int ccf0, ccf1; - if (!oper) + if (!oper || !htop) return false; - cf1 = ieee80211_channel_to_frequency(oper->center_freq_seg0_idx, - chandef->chan->band); - cf2 = ieee80211_channel_to_frequency(oper->center_freq_seg1_idx, - chandef->chan->band); + ccfs0 = oper->center_freq_seg0_idx; + ccfs1 = oper->center_freq_seg1_idx; + ccfs2 = (le16_to_cpu(htop->operation_mode) & + IEEE80211_HT_OP_MODE_CCFS2_MASK) + >> IEEE80211_HT_OP_MODE_CCFS2_SHIFT; + + /* when parsing (and we know how to) CCFS1 and CCFS2 are equivalent */ + ccf0 = ccfs0; + ccf1 = ccfs1; + if (!ccfs1 && ieee80211_hw_check(hw, SUPPORTS_VHT_EXT_NSS_BW)) + ccf1 = ccfs2; + + cf0 = ieee80211_channel_to_frequency(ccf0, chandef->chan->band); + cf1 = ieee80211_channel_to_frequency(ccf1, chandef->chan->band); switch (oper->chan_width) { case IEEE80211_VHT_CHANWIDTH_USE_HT: + /* just use HT information directly */ break; case IEEE80211_VHT_CHANWIDTH_80MHZ: new.width = NL80211_CHAN_WIDTH_80; - new.center_freq1 = cf1; + new.center_freq1 = cf0; /* If needed, adjust based on the newer interop workaround. */ - if (oper->center_freq_seg1_idx) { + if (ccf1) { unsigned int diff; - diff = abs(oper->center_freq_seg1_idx - - oper->center_freq_seg0_idx); + diff = abs(ccf1 - ccf0); if (diff == 8) { new.width = NL80211_CHAN_WIDTH_160; - new.center_freq1 = cf2; + new.center_freq1 = cf1; } else if (diff > 8) { new.width = NL80211_CHAN_WIDTH_80P80; - new.center_freq2 = cf2; + new.center_freq2 = cf1; } } break; case IEEE80211_VHT_CHANWIDTH_160MHZ: + /* deprecated encoding */ new.width = NL80211_CHAN_WIDTH_160; - new.center_freq1 = cf1; + new.center_freq1 = cf0; break; case IEEE80211_VHT_CHANWIDTH_80P80MHZ: + /* deprecated encoding */ new.width = NL80211_CHAN_WIDTH_80P80; - new.center_freq1 = cf1; - new.center_freq2 = cf2; + new.center_freq1 = cf0; + new.center_freq2 = cf1; break; default: return false; diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index 259325cbcc31..006d82e4a397 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -3,6 +3,7 @@ * * Portions of this file * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -231,6 +232,13 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs, sizeof(struct ieee80211_vht_mcs_info)); + /* copy EXT_NSS_BW Support value or remove the capability */ + if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_VHT_EXT_NSS_BW)) + vht_cap->cap |= (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK); + else + vht_cap->vht_mcs.tx_highest &= + ~cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); + /* but also restrict MCSes */ for (i = 0; i < 8; i++) { u16 own_rx, own_tx, peer_rx, peer_tx; @@ -294,6 +302,18 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, break; default: sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80; + + if (!(vht_cap->vht_mcs.tx_highest & + cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE))) + break; + + /* + * If this is non-zero, then it does support 160 MHz after all, + * in one form or the other. We don't distinguish here (or even + * above) between 160 and 80+80 yet. + */ + if (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) + sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; } sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta); diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 8fbe6cdbe255..5fe274c47c41 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -1223,7 +1223,7 @@ static int mpls_netconf_get_devconf(struct sk_buff *in_skb, int err; err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX, - devconf_mpls_policy, NULL); + devconf_mpls_policy, extack); if (err < 0) goto errout; @@ -1263,6 +1263,7 @@ errout: static int mpls_netconf_dump_devconf(struct sk_buff *skb, struct netlink_callback *cb) { + const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); struct hlist_head *head; struct net_device *dev; @@ -1270,6 +1271,21 @@ static int mpls_netconf_dump_devconf(struct sk_buff *skb, int idx, s_idx; int h, s_h; + if (cb->strict_check) { + struct netlink_ext_ack *extack = cb->extack; + struct netconfmsg *ncm; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) { + NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request"); + return -EINVAL; + } + + if (nlmsg_attrlen(nlh, sizeof(*ncm))) { + NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request"); + return -EINVAL; + } + } + s_h = cb->args[0]; s_idx = idx = cb->args[1]; @@ -1286,7 +1302,7 @@ static int mpls_netconf_dump_devconf(struct sk_buff *skb, goto cont; if (mpls_netconf_fill_devconf(skb, mdev, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, + nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, NETCONFA_ALL) < 0) { @@ -2015,8 +2031,43 @@ nla_put_failure: return -EMSGSIZE; } +#if IS_ENABLED(CONFIG_INET) +static int mpls_valid_fib_dump_req(const struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) +{ + return ip_valid_fib_dump_req(nlh, extack); +} +#else +static int mpls_valid_fib_dump_req(const struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) +{ + struct rtmsg *rtm; + + if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) { + NL_SET_ERR_MSG_MOD(extack, "Invalid header for FIB dump request"); + return -EINVAL; + } + + rtm = nlmsg_data(nlh); + if (rtm->rtm_dst_len || rtm->rtm_src_len || rtm->rtm_tos || + rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope || + rtm->rtm_type || rtm->rtm_flags) { + NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for FIB dump request"); + return -EINVAL; + } + + if (nlmsg_attrlen(nlh, sizeof(*rtm))) { + NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in FIB dump request"); + return -EINVAL; + } + + return 0; +} +#endif + static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb) { + const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); struct mpls_route __rcu **platform_label; size_t platform_labels; @@ -2024,6 +2075,13 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb) ASSERT_RTNL(); + if (cb->strict_check) { + int err = mpls_valid_fib_dump_req(nlh, cb->extack); + + if (err < 0) + return err; + } + index = cb->args[0]; if (index < MPLS_LABEL_FIRST_UNRESERVED) index = MPLS_LABEL_FIRST_UNRESERVED; diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h index 8055e3965cef..3d0a33b874f5 100644 --- a/net/ncsi/internal.h +++ b/net/ncsi/internal.h @@ -68,6 +68,10 @@ enum { NCSI_MODE_MAX }; +/* OEM Vendor Manufacture ID */ +#define NCSI_OEM_MFR_MLX_ID 0x8119 +#define NCSI_OEM_MFR_BCM_ID 0x113d + struct ncsi_channel_version { u32 version; /* Supported BCD encoded NCSI version */ u32 alpha2; /* Supported BCD encoded NCSI version */ @@ -305,6 +309,7 @@ struct ncsi_cmd_arg { unsigned short words[8]; unsigned int dwords[4]; }; + unsigned char *data; /* NCSI OEM data */ }; extern struct list_head ncsi_dev_list; diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c index 7567ca63aae2..82b7d9201db8 100644 --- a/net/ncsi/ncsi-cmd.c +++ b/net/ncsi/ncsi-cmd.c @@ -211,6 +211,25 @@ static int ncsi_cmd_handler_snfc(struct sk_buff *skb, return 0; } +static int ncsi_cmd_handler_oem(struct sk_buff *skb, + struct ncsi_cmd_arg *nca) +{ + struct ncsi_cmd_oem_pkt *cmd; + unsigned int len; + + len = sizeof(struct ncsi_cmd_pkt_hdr) + 4; + if (nca->payload < 26) + len += 26; + else + len += nca->payload; + + cmd = skb_put_zero(skb, len); + memcpy(&cmd->mfr_id, nca->data, nca->payload); + ncsi_cmd_build_header(&cmd->cmd.common, nca); + + return 0; +} + static struct ncsi_cmd_handler { unsigned char type; int payload; @@ -244,7 +263,7 @@ static struct ncsi_cmd_handler { { NCSI_PKT_CMD_GNS, 0, ncsi_cmd_handler_default }, { NCSI_PKT_CMD_GNPTS, 0, ncsi_cmd_handler_default }, { NCSI_PKT_CMD_GPS, 0, ncsi_cmd_handler_default }, - { NCSI_PKT_CMD_OEM, 0, NULL }, + { NCSI_PKT_CMD_OEM, -1, ncsi_cmd_handler_oem }, { NCSI_PKT_CMD_PLDM, 0, NULL }, { NCSI_PKT_CMD_GPUUID, 0, ncsi_cmd_handler_default } }; @@ -316,8 +335,13 @@ int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca) return -ENOENT; } - /* Get packet payload length and allocate the request */ - nca->payload = nch->payload; + /* Get packet payload length and allocate the request + * It is expected that if length set as negative in + * handler structure means caller is initializing it + * and setting length in nca before calling xmit function + */ + if (nch->payload >= 0) + nca->payload = nch->payload; nr = ncsi_alloc_command(nca); if (!nr) return -ENOMEM; diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c index 45f33d6dedf7..32cb7751d216 100644 --- a/net/ncsi/ncsi-netlink.c +++ b/net/ncsi/ncsi-netlink.c @@ -12,7 +12,6 @@ #include <linux/if_arp.h> #include <linux/rtnetlink.h> #include <linux/etherdevice.h> -#include <linux/module.h> #include <net/genetlink.h> #include <net/ncsi.h> #include <linux/skbuff.h> diff --git a/net/ncsi/ncsi-pkt.h b/net/ncsi/ncsi-pkt.h index 91b4b66438df..0f2087c8d42a 100644 --- a/net/ncsi/ncsi-pkt.h +++ b/net/ncsi/ncsi-pkt.h @@ -151,6 +151,20 @@ struct ncsi_cmd_snfc_pkt { unsigned char pad[22]; }; +/* OEM Request Command as per NCSI Specification */ +struct ncsi_cmd_oem_pkt { + struct ncsi_cmd_pkt_hdr cmd; /* Command header */ + __be32 mfr_id; /* Manufacture ID */ + unsigned char data[]; /* OEM Payload Data */ +}; + +/* OEM Response Packet as per NCSI Specification */ +struct ncsi_rsp_oem_pkt { + struct ncsi_rsp_pkt_hdr rsp; /* Command header */ + __be32 mfr_id; /* Manufacture ID */ + unsigned char data[]; /* Payload data */ +}; + /* Get Link Status */ struct ncsi_rsp_gls_pkt { struct ncsi_rsp_pkt_hdr rsp; /* Response header */ diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c index 930c1d3796f0..d66b34749027 100644 --- a/net/ncsi/ncsi-rsp.c +++ b/net/ncsi/ncsi-rsp.c @@ -596,6 +596,47 @@ static int ncsi_rsp_handler_snfc(struct ncsi_request *nr) return 0; } +static struct ncsi_rsp_oem_handler { + unsigned int mfr_id; + int (*handler)(struct ncsi_request *nr); +} ncsi_rsp_oem_handlers[] = { + { NCSI_OEM_MFR_MLX_ID, NULL }, + { NCSI_OEM_MFR_BCM_ID, NULL } +}; + +/* Response handler for OEM command */ +static int ncsi_rsp_handler_oem(struct ncsi_request *nr) +{ + struct ncsi_rsp_oem_pkt *rsp; + struct ncsi_rsp_oem_handler *nrh = NULL; + unsigned int mfr_id, i; + + /* Get the response header */ + rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp); + mfr_id = ntohl(rsp->mfr_id); + + /* Check for manufacturer id and Find the handler */ + for (i = 0; i < ARRAY_SIZE(ncsi_rsp_oem_handlers); i++) { + if (ncsi_rsp_oem_handlers[i].mfr_id == mfr_id) { + if (ncsi_rsp_oem_handlers[i].handler) + nrh = &ncsi_rsp_oem_handlers[i]; + else + nrh = NULL; + + break; + } + } + + if (!nrh) { + netdev_err(nr->ndp->ndev.dev, "Received unrecognized OEM packet with MFR-ID (0x%x)\n", + mfr_id); + return -ENOENT; + } + + /* Process the packet */ + return nrh->handler(nr); +} + static int ncsi_rsp_handler_gvi(struct ncsi_request *nr) { struct ncsi_rsp_gvi_pkt *rsp; @@ -932,7 +973,7 @@ static struct ncsi_rsp_handler { { NCSI_PKT_RSP_GNS, 172, ncsi_rsp_handler_gns }, { NCSI_PKT_RSP_GNPTS, 172, ncsi_rsp_handler_gnpts }, { NCSI_PKT_RSP_GPS, 8, ncsi_rsp_handler_gps }, - { NCSI_PKT_RSP_OEM, 0, NULL }, + { NCSI_PKT_RSP_OEM, -1, ncsi_rsp_handler_oem }, { NCSI_PKT_RSP_PLDM, 0, NULL }, { NCSI_PKT_RSP_GPUUID, 20, ncsi_rsp_handler_gpuuid } }; diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index f61c306de1d0..2ab870ef233a 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -625,6 +625,13 @@ config NFT_FIB_INET The lookup will be delegated to the IPv4 or IPv6 FIB depending on the protocol of the packet. +config NFT_XFRM + tristate "Netfilter nf_tables xfrm/IPSec security association matching" + depends on XFRM + help + This option adds an expression that you can use to extract properties + of a packets security association. + config NFT_SOCKET tristate "Netfilter nf_tables socket match support" depends on IPV6 || IPV6=n diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 16895e045b66..4ddf3ef51ece 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -113,6 +113,7 @@ obj-$(CONFIG_NFT_FIB_NETDEV) += nft_fib_netdev.o obj-$(CONFIG_NFT_SOCKET) += nft_socket.o obj-$(CONFIG_NFT_OSF) += nft_osf.o obj-$(CONFIG_NFT_TPROXY) += nft_tproxy.o +obj-$(CONFIG_NFT_XFRM) += nft_xfrm.o # nf_tables netdev obj-$(CONFIG_NFT_DUP_NETDEV) += nft_dup_netdev.o diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 7ca926a03b81..fe9abf3cc10a 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1686,8 +1686,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related, skb_reset_network_header(skb); IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu); - ipv4_update_pmtu(skb, ipvs->net, - mtu, 0, 0, 0, 0); + ipv4_update_pmtu(skb, ipvs->net, mtu, 0, 0); /* Client uses PMTUD? */ if (!(frag_off & htons(IP_DF))) goto ignore_ipip; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 62eefea48973..83395bf6dc35 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -3234,7 +3234,7 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb, /* Try to find the service for which to dump destinations */ if (nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs, IPVS_CMD_ATTR_MAX, - ip_vs_cmd_policy, NULL)) + ip_vs_cmd_policy, cb->extack)) goto out_err; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index a676d5f76bdc..ca1168d67fac 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -379,7 +379,7 @@ bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, return false; } - l4proto = __nf_ct_l4proto_find(l3num, protonum); + l4proto = __nf_ct_l4proto_find(protonum); ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple, l4proto); @@ -539,7 +539,7 @@ destroy_conntrack(struct nf_conntrack *nfct) nf_ct_tmpl_free(ct); return; } - l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); + l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct)); if (l4proto->destroy) l4proto->destroy(ct); @@ -840,7 +840,7 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb, enum ip_conntrack_info oldinfo; struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo); - l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); + l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct)); if (l4proto->allow_clash && !nf_ct_is_dying(ct) && atomic_inc_not_zero(&ct->ct_general.use)) { @@ -1109,7 +1109,7 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct) if (!test_bit(IPS_ASSURED_BIT, &ct->status)) return true; - l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); + l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct)); if (l4proto->can_early_drop && l4proto->can_early_drop(ct)) return true; @@ -1370,12 +1370,6 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL; - if (!l4proto->new(ct, skb, dataoff)) { - nf_conntrack_free(ct); - pr_debug("can't track with proto module\n"); - return NULL; - } - if (timeout_ext) nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout), GFP_ATOMIC); @@ -1436,12 +1430,12 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, /* On success, returns 0, sets skb->_nfct | ctinfo */ static int -resolve_normal_ct(struct net *net, struct nf_conn *tmpl, +resolve_normal_ct(struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, - u_int16_t l3num, u_int8_t protonum, - const struct nf_conntrack_l4proto *l4proto) + const struct nf_conntrack_l4proto *l4proto, + const struct nf_hook_state *state) { const struct nf_conntrack_zone *zone; struct nf_conntrack_tuple tuple; @@ -1452,17 +1446,18 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, u32 hash; if (!nf_ct_get_tuple(skb, skb_network_offset(skb), - dataoff, l3num, protonum, net, &tuple, l4proto)) { + dataoff, state->pf, protonum, state->net, + &tuple, l4proto)) { pr_debug("Can't get tuple\n"); return 0; } /* look for tuple match */ zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); - hash = hash_conntrack_raw(&tuple, net); - h = __nf_conntrack_find_get(net, zone, &tuple, hash); + hash = hash_conntrack_raw(&tuple, state->net); + h = __nf_conntrack_find_get(state->net, zone, &tuple, hash); if (!h) { - h = init_conntrack(net, tmpl, &tuple, l4proto, + h = init_conntrack(state->net, tmpl, &tuple, l4proto, skb, dataoff, hash); if (!h) return 0; @@ -1491,13 +1486,45 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, return 0; } +/* + * icmp packets need special treatment to handle error messages that are + * related to a connection. + * + * Callers need to check if skb has a conntrack assigned when this + * helper returns; in such case skb belongs to an already known connection. + */ +static unsigned int __cold +nf_conntrack_handle_icmp(struct nf_conn *tmpl, + struct sk_buff *skb, + unsigned int dataoff, + u8 protonum, + const struct nf_hook_state *state) +{ + int ret; + + if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP) + ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state); +#if IS_ENABLED(CONFIG_IPV6) + else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6) + ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state); +#endif + else + return NF_ACCEPT; + + if (ret <= 0) { + NF_CT_STAT_INC_ATOMIC(state->net, error); + NF_CT_STAT_INC_ATOMIC(state->net, invalid); + } + + return ret; +} + unsigned int -nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, - struct sk_buff *skb) +nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state) { const struct nf_conntrack_l4proto *l4proto; - struct nf_conn *ct, *tmpl; enum ip_conntrack_info ctinfo; + struct nf_conn *ct, *tmpl; u_int8_t protonum; int dataoff, ret; @@ -1506,32 +1533,28 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, /* Previously seen (loopback or untracked)? Ignore. */ if ((tmpl && !nf_ct_is_template(tmpl)) || ctinfo == IP_CT_UNTRACKED) { - NF_CT_STAT_INC_ATOMIC(net, ignore); + NF_CT_STAT_INC_ATOMIC(state->net, ignore); return NF_ACCEPT; } skb->_nfct = 0; } /* rcu_read_lock()ed by nf_hook_thresh */ - dataoff = get_l4proto(skb, skb_network_offset(skb), pf, &protonum); + dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum); if (dataoff <= 0) { pr_debug("not prepared to track yet or error occurred\n"); - NF_CT_STAT_INC_ATOMIC(net, error); - NF_CT_STAT_INC_ATOMIC(net, invalid); + NF_CT_STAT_INC_ATOMIC(state->net, error); + NF_CT_STAT_INC_ATOMIC(state->net, invalid); ret = NF_ACCEPT; goto out; } - l4proto = __nf_ct_l4proto_find(pf, protonum); + l4proto = __nf_ct_l4proto_find(protonum); - /* It may be an special packet, error, unclean... - * inverse of the return code tells to the netfilter - * core what to do with the packet. */ - if (l4proto->error != NULL) { - ret = l4proto->error(net, tmpl, skb, dataoff, pf, hooknum); + if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) { + ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff, + protonum, state); if (ret <= 0) { - NF_CT_STAT_INC_ATOMIC(net, error); - NF_CT_STAT_INC_ATOMIC(net, invalid); ret = -ret; goto out; } @@ -1540,10 +1563,11 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, goto out; } repeat: - ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, l4proto); + ret = resolve_normal_ct(tmpl, skb, dataoff, + protonum, l4proto, state); if (ret < 0) { /* Too stressed to deal. */ - NF_CT_STAT_INC_ATOMIC(net, drop); + NF_CT_STAT_INC_ATOMIC(state->net, drop); ret = NF_DROP; goto out; } @@ -1551,21 +1575,21 @@ repeat: ct = nf_ct_get(skb, &ctinfo); if (!ct) { /* Not valid part of a connection */ - NF_CT_STAT_INC_ATOMIC(net, invalid); + NF_CT_STAT_INC_ATOMIC(state->net, invalid); ret = NF_ACCEPT; goto out; } - ret = l4proto->packet(ct, skb, dataoff, ctinfo); + ret = l4proto->packet(ct, skb, dataoff, ctinfo, state); if (ret <= 0) { /* Invalid: inverse of the return code tells * the netfilter core what to do */ pr_debug("nf_conntrack_in: Can't track with proto module\n"); nf_conntrack_put(&ct->ct_general); skb->_nfct = 0; - NF_CT_STAT_INC_ATOMIC(net, invalid); + NF_CT_STAT_INC_ATOMIC(state->net, invalid); if (ret == -NF_DROP) - NF_CT_STAT_INC_ATOMIC(net, drop); + NF_CT_STAT_INC_ATOMIC(state->net, drop); /* Special case: TCP tracker reports an attempt to reopen a * closed/aborted connection. We have to go back and create a * fresh conntrack. @@ -1594,8 +1618,7 @@ bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, rcu_read_lock(); ret = nf_ct_invert_tuple(inverse, orig, - __nf_ct_l4proto_find(orig->src.l3num, - orig->dst.protonum)); + __nf_ct_l4proto_find(orig->dst.protonum)); rcu_read_unlock(); return ret; } @@ -1752,7 +1775,7 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb) if (dataoff <= 0) return -1; - l4proto = nf_ct_l4proto_find_get(l3num, l4num); + l4proto = nf_ct_l4proto_find_get(l4num); if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num, l4num, net, &tuple, l4proto)) diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 27b84231db10..3034038bfdf0 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -610,8 +610,7 @@ static int exp_seq_show(struct seq_file *s, void *v) expect->tuple.src.l3num, expect->tuple.dst.protonum); print_tuple(s, &expect->tuple, - __nf_ct_l4proto_find(expect->tuple.src.l3num, - expect->tuple.dst.protonum)); + __nf_ct_l4proto_find(expect->tuple.dst.protonum)); if (expect->flags & NF_CT_EXPECT_PERMANENT) { seq_puts(s, "PERMANENT"); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 036207ecaf16..4ae8e528943a 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -135,8 +135,7 @@ static int ctnetlink_dump_tuples(struct sk_buff *skb, ret = ctnetlink_dump_tuples_ip(skb, tuple); if (ret >= 0) { - l4proto = __nf_ct_l4proto_find(tuple->src.l3num, - tuple->dst.protonum); + l4proto = __nf_ct_l4proto_find(tuple->dst.protonum); ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto); } rcu_read_unlock(); @@ -184,7 +183,7 @@ static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct) struct nlattr *nest_proto; int ret; - l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); + l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct)); if (!l4proto->to_nlattr) return 0; @@ -592,7 +591,7 @@ static size_t ctnetlink_proto_size(const struct nf_conn *ct) len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1); len *= 3u; /* ORIG, REPLY, MASTER */ - l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); + l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct)); len += l4proto->nlattr_size; if (l4proto->nlattr_tuple_size) { len4 = l4proto->nlattr_tuple_size(); @@ -821,6 +820,7 @@ static int ctnetlink_done(struct netlink_callback *cb) } struct ctnetlink_filter { + u8 family; struct { u_int32_t val; u_int32_t mask; @@ -828,31 +828,39 @@ struct ctnetlink_filter { }; static struct ctnetlink_filter * -ctnetlink_alloc_filter(const struct nlattr * const cda[]) +ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family) { -#ifdef CONFIG_NF_CONNTRACK_MARK struct ctnetlink_filter *filter; +#ifndef CONFIG_NF_CONNTRACK_MARK + if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) + return ERR_PTR(-EOPNOTSUPP); +#endif + filter = kzalloc(sizeof(*filter), GFP_KERNEL); if (filter == NULL) return ERR_PTR(-ENOMEM); - filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK])); - filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK])); + filter->family = family; - return filter; -#else - return ERR_PTR(-EOPNOTSUPP); +#ifdef CONFIG_NF_CONNTRACK_MARK + if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) { + filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK])); + filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK])); + } #endif + return filter; } static int ctnetlink_start(struct netlink_callback *cb) { const struct nlattr * const *cda = cb->data; struct ctnetlink_filter *filter = NULL; + struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); + u8 family = nfmsg->nfgen_family; - if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) { - filter = ctnetlink_alloc_filter(cda); + if (family || (cda[CTA_MARK] && cda[CTA_MARK_MASK])) { + filter = ctnetlink_alloc_filter(cda, family); if (IS_ERR(filter)) return PTR_ERR(filter); } @@ -866,13 +874,24 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data) struct ctnetlink_filter *filter = data; if (filter == NULL) - return 1; + goto out; + + /* Match entries of a given L3 protocol number. + * If it is not specified, ie. l3proto == 0, + * then match everything. + */ + if (filter->family && nf_ct_l3num(ct) != filter->family) + goto ignore_entry; #ifdef CONFIG_NF_CONNTRACK_MARK - if ((ct->mark & filter->mark.mask) == filter->mark.val) - return 1; + if ((ct->mark & filter->mark.mask) != filter->mark.val) + goto ignore_entry; #endif +out: + return 1; + +ignore_entry: return 0; } @@ -883,8 +902,6 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) struct nf_conn *ct, *last; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; - struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); - u_int8_t l3proto = nfmsg->nfgen_family; struct nf_conn *nf_ct_evict[8]; int res, i; spinlock_t *lockp; @@ -923,11 +940,6 @@ restart: if (!net_eq(net, nf_ct_net(ct))) continue; - /* Dump entries of a given L3 protocol number. - * If it is not specified, ie. l3proto == 0, - * then dump everything. */ - if (l3proto && nf_ct_l3num(ct) != l3proto) - continue; if (cb->args[1]) { if (ct != last) continue; @@ -1048,7 +1060,7 @@ static int ctnetlink_parse_tuple_proto(struct nlattr *attr, tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]); rcu_read_lock(); - l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum); + l4proto = __nf_ct_l4proto_find(tuple->dst.protonum); if (likely(l4proto->nlattr_to_tuple)) { ret = nla_validate_nested(attr, CTA_PROTO_MAX, @@ -1213,12 +1225,12 @@ static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data) static int ctnetlink_flush_conntrack(struct net *net, const struct nlattr * const cda[], - u32 portid, int report) + u32 portid, int report, u8 family) { struct ctnetlink_filter *filter = NULL; - if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) { - filter = ctnetlink_alloc_filter(cda); + if (family || (cda[CTA_MARK] && cda[CTA_MARK_MASK])) { + filter = ctnetlink_alloc_filter(cda, family); if (IS_ERR(filter)) return PTR_ERR(filter); } @@ -1257,7 +1269,7 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl, else { return ctnetlink_flush_conntrack(net, cda, NETLINK_CB(skb).portid, - nlmsg_report(nlh)); + nlmsg_report(nlh), u3); } if (err < 0) @@ -1696,7 +1708,7 @@ static int ctnetlink_change_protoinfo(struct nf_conn *ct, return err; rcu_read_lock(); - l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); + l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct)); if (l4proto->from_nlattr) err = l4proto->from_nlattr(tb, ct); rcu_read_unlock(); @@ -2656,8 +2668,7 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb, rcu_read_lock(); ret = ctnetlink_dump_tuples_ip(skb, &m); if (ret >= 0) { - l4proto = __nf_ct_l4proto_find(tuple->src.l3num, - tuple->dst.protonum); + l4proto = __nf_ct_l4proto_find(tuple->dst.protonum); ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto); } rcu_read_unlock(); diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 51c5d7eec0a3..40643af7137e 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -43,7 +43,7 @@ extern unsigned int nf_conntrack_net_id; -static struct nf_conntrack_l4proto __rcu **nf_ct_protos[NFPROTO_NUMPROTO] __read_mostly; +static struct nf_conntrack_l4proto __rcu *nf_ct_protos[MAX_NF_CT_PROTO + 1] __read_mostly; static DEFINE_MUTEX(nf_ct_proto_mutex); @@ -124,23 +124,21 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb, EXPORT_SYMBOL_GPL(nf_ct_l4proto_log_invalid); #endif -const struct nf_conntrack_l4proto * -__nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto) +const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u8 l4proto) { - if (unlikely(l3proto >= NFPROTO_NUMPROTO || nf_ct_protos[l3proto] == NULL)) + if (unlikely(l4proto >= ARRAY_SIZE(nf_ct_protos))) return &nf_conntrack_l4proto_generic; - return rcu_dereference(nf_ct_protos[l3proto][l4proto]); + return rcu_dereference(nf_ct_protos[l4proto]); } EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find); -const struct nf_conntrack_l4proto * -nf_ct_l4proto_find_get(u_int16_t l3num, u_int8_t l4num) +const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u8 l4num) { const struct nf_conntrack_l4proto *p; rcu_read_lock(); - p = __nf_ct_l4proto_find(l3num, l4num); + p = __nf_ct_l4proto_find(l4num); if (!try_module_get(p->me)) p = &nf_conntrack_l4proto_generic; rcu_read_unlock(); @@ -159,8 +157,7 @@ static int kill_l4proto(struct nf_conn *i, void *data) { const struct nf_conntrack_l4proto *l4proto; l4proto = data; - return nf_ct_protonum(i) == l4proto->l4proto && - nf_ct_l3num(i) == l4proto->l3proto; + return nf_ct_protonum(i) == l4proto->l4proto; } static struct nf_proto_net *nf_ct_l4proto_net(struct net *net, @@ -219,48 +216,20 @@ int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *l4proto) { int ret = 0; - if (l4proto->l3proto >= ARRAY_SIZE(nf_ct_protos)) - return -EBUSY; - if ((l4proto->to_nlattr && l4proto->nlattr_size == 0) || (l4proto->tuple_to_nlattr && !l4proto->nlattr_tuple_size)) return -EINVAL; mutex_lock(&nf_ct_proto_mutex); - if (!nf_ct_protos[l4proto->l3proto]) { - /* l3proto may be loaded latter. */ - struct nf_conntrack_l4proto __rcu **proto_array; - int i; - - proto_array = - kmalloc_array(MAX_NF_CT_PROTO, - sizeof(struct nf_conntrack_l4proto *), - GFP_KERNEL); - if (proto_array == NULL) { - ret = -ENOMEM; - goto out_unlock; - } - - for (i = 0; i < MAX_NF_CT_PROTO; i++) - RCU_INIT_POINTER(proto_array[i], - &nf_conntrack_l4proto_generic); - - /* Before making proto_array visible to lockless readers, - * we must make sure its content is committed to memory. - */ - smp_wmb(); - - nf_ct_protos[l4proto->l3proto] = proto_array; - } else if (rcu_dereference_protected( - nf_ct_protos[l4proto->l3proto][l4proto->l4proto], + if (rcu_dereference_protected( + nf_ct_protos[l4proto->l4proto], lockdep_is_held(&nf_ct_proto_mutex) ) != &nf_conntrack_l4proto_generic) { ret = -EBUSY; goto out_unlock; } - rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], - l4proto); + rcu_assign_pointer(nf_ct_protos[l4proto->l4proto], l4proto); out_unlock: mutex_unlock(&nf_ct_proto_mutex); return ret; @@ -274,7 +243,7 @@ int nf_ct_l4proto_pernet_register_one(struct net *net, struct nf_proto_net *pn = NULL; if (l4proto->init_net) { - ret = l4proto->init_net(net, l4proto->l3proto); + ret = l4proto->init_net(net); if (ret < 0) goto out; } @@ -296,13 +265,13 @@ EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register_one); static void __nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *l4proto) { - BUG_ON(l4proto->l3proto >= ARRAY_SIZE(nf_ct_protos)); + BUG_ON(l4proto->l4proto >= ARRAY_SIZE(nf_ct_protos)); BUG_ON(rcu_dereference_protected( - nf_ct_protos[l4proto->l3proto][l4proto->l4proto], + nf_ct_protos[l4proto->l4proto], lockdep_is_held(&nf_ct_proto_mutex) ) != l4proto); - rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], + rcu_assign_pointer(nf_ct_protos[l4proto->l4proto], &nf_conntrack_l4proto_generic); } @@ -352,7 +321,7 @@ static int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[], unsigned int num_proto) { - int ret = -EINVAL, ver; + int ret = -EINVAL; unsigned int i; for (i = 0; i < num_proto; i++) { @@ -361,9 +330,8 @@ nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[], break; } if (i != num_proto) { - ver = l4proto[i]->l3proto == PF_INET6 ? 6 : 4; - pr_err("nf_conntrack_ipv%d: can't register l4 %d proto.\n", - ver, l4proto[i]->l4proto); + pr_err("nf_conntrack: can't register l4 %d proto.\n", + l4proto[i]->l4proto); nf_ct_l4proto_unregister(l4proto, i); } return ret; @@ -382,9 +350,8 @@ int nf_ct_l4proto_pernet_register(struct net *net, break; } if (i != num_proto) { - pr_err("nf_conntrack_proto_%d %d: pernet registration failed\n", - l4proto[i]->l4proto, - l4proto[i]->l3proto == PF_INET6 ? 6 : 4); + pr_err("nf_conntrack %d: pernet registration failed\n", + l4proto[i]->l4proto); nf_ct_l4proto_pernet_unregister(net, l4proto, i); } return ret; @@ -455,7 +422,7 @@ static unsigned int ipv4_conntrack_in(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { - return nf_conntrack_in(state->net, PF_INET, state->hook, skb); + return nf_conntrack_in(skb, state); } static unsigned int ipv4_conntrack_local(void *priv, @@ -477,7 +444,7 @@ static unsigned int ipv4_conntrack_local(void *priv, return NF_ACCEPT; } - return nf_conntrack_in(state->net, PF_INET, state->hook, skb); + return nf_conntrack_in(skb, state); } /* Connection tracking may drop packets, but never alters them, so @@ -690,14 +657,14 @@ static unsigned int ipv6_conntrack_in(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { - return nf_conntrack_in(state->net, PF_INET6, state->hook, skb); + return nf_conntrack_in(skb, state); } static unsigned int ipv6_conntrack_local(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { - return nf_conntrack_in(state->net, PF_INET6, state->hook, skb); + return nf_conntrack_in(skb, state); } static unsigned int ipv6_helper(void *priv, @@ -911,37 +878,26 @@ void nf_ct_netns_put(struct net *net, uint8_t nfproto) EXPORT_SYMBOL_GPL(nf_ct_netns_put); static const struct nf_conntrack_l4proto * const builtin_l4proto[] = { - &nf_conntrack_l4proto_tcp4, - &nf_conntrack_l4proto_udp4, + &nf_conntrack_l4proto_tcp, + &nf_conntrack_l4proto_udp, &nf_conntrack_l4proto_icmp, #ifdef CONFIG_NF_CT_PROTO_DCCP - &nf_conntrack_l4proto_dccp4, + &nf_conntrack_l4proto_dccp, #endif #ifdef CONFIG_NF_CT_PROTO_SCTP - &nf_conntrack_l4proto_sctp4, + &nf_conntrack_l4proto_sctp, #endif #ifdef CONFIG_NF_CT_PROTO_UDPLITE - &nf_conntrack_l4proto_udplite4, + &nf_conntrack_l4proto_udplite, #endif #if IS_ENABLED(CONFIG_IPV6) - &nf_conntrack_l4proto_tcp6, - &nf_conntrack_l4proto_udp6, &nf_conntrack_l4proto_icmpv6, -#ifdef CONFIG_NF_CT_PROTO_DCCP - &nf_conntrack_l4proto_dccp6, -#endif -#ifdef CONFIG_NF_CT_PROTO_SCTP - &nf_conntrack_l4proto_sctp6, -#endif -#ifdef CONFIG_NF_CT_PROTO_UDPLITE - &nf_conntrack_l4proto_udplite6, -#endif #endif /* CONFIG_IPV6 */ }; int nf_conntrack_proto_init(void) { - int ret = 0; + int ret = 0, i; ret = nf_register_sockopt(&so_getorigdst); if (ret < 0) @@ -952,6 +908,11 @@ int nf_conntrack_proto_init(void) if (ret < 0) goto cleanup_sockopt; #endif + + for (i = 0; i < ARRAY_SIZE(nf_ct_protos); i++) + RCU_INIT_POINTER(nf_ct_protos[i], + &nf_conntrack_l4proto_generic); + ret = nf_ct_l4proto_register(builtin_l4proto, ARRAY_SIZE(builtin_l4proto)); if (ret < 0) @@ -969,17 +930,10 @@ cleanup_sockopt: void nf_conntrack_proto_fini(void) { - unsigned int i; - nf_unregister_sockopt(&so_getorigdst); #if IS_ENABLED(CONFIG_IPV6) nf_unregister_sockopt(&so_getorigdst6); #endif - /* No need to call nf_ct_l4proto_unregister(), the register - * tables are free'd here anyway. - */ - for (i = 0; i < ARRAY_SIZE(nf_ct_protos); i++) - kfree(nf_ct_protos[i]); } int nf_conntrack_proto_pernet_init(struct net *net) @@ -988,8 +942,7 @@ int nf_conntrack_proto_pernet_init(struct net *net) struct nf_proto_net *pn = nf_ct_l4proto_net(net, &nf_conntrack_l4proto_generic); - err = nf_conntrack_l4proto_generic.init_net(net, - nf_conntrack_l4proto_generic.l3proto); + err = nf_conntrack_l4proto_generic.init_net(net); if (err < 0) return err; err = nf_ct_l4proto_register_sysctl(net, diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index f3f91ed2c21a..171e9e122e5f 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -389,18 +389,15 @@ static inline struct nf_dccp_net *dccp_pernet(struct net *net) return &net->ct.nf_ct_proto.dccp; } -static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff) +static noinline bool +dccp_new(struct nf_conn *ct, const struct sk_buff *skb, + const struct dccp_hdr *dh) { struct net *net = nf_ct_net(ct); struct nf_dccp_net *dn; - struct dccp_hdr _dh, *dh; const char *msg; u_int8_t state; - dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); - BUG_ON(dh == NULL); - state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE]; switch (state) { default: @@ -438,8 +435,51 @@ static u64 dccp_ack_seq(const struct dccp_hdr *dh) ntohl(dhack->dccph_ack_nr_low); } -static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff, enum ip_conntrack_info ctinfo) +static bool dccp_error(const struct dccp_hdr *dh, + struct sk_buff *skb, unsigned int dataoff, + const struct nf_hook_state *state) +{ + unsigned int dccp_len = skb->len - dataoff; + unsigned int cscov; + const char *msg; + + if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) || + dh->dccph_doff * 4 > dccp_len) { + msg = "nf_ct_dccp: truncated/malformed packet "; + goto out_invalid; + } + + cscov = dccp_len; + if (dh->dccph_cscov) { + cscov = (dh->dccph_cscov - 1) * 4; + if (cscov > dccp_len) { + msg = "nf_ct_dccp: bad checksum coverage "; + goto out_invalid; + } + } + + if (state->hook == NF_INET_PRE_ROUTING && + state->net->ct.sysctl_checksum && + nf_checksum_partial(skb, state->hook, dataoff, cscov, + IPPROTO_DCCP, state->pf)) { + msg = "nf_ct_dccp: bad checksum "; + goto out_invalid; + } + + if (dh->dccph_type >= DCCP_PKT_INVALID) { + msg = "nf_ct_dccp: reserved packet type "; + goto out_invalid; + } + return false; +out_invalid: + nf_l4proto_log_invalid(skb, state->net, state->pf, + IPPROTO_DCCP, "%s", msg); + return true; +} + +static int dccp_packet(struct nf_conn *ct, struct sk_buff *skb, + unsigned int dataoff, enum ip_conntrack_info ctinfo, + const struct nf_hook_state *state) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); struct dccp_hdr _dh, *dh; @@ -448,8 +488,15 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int *timeouts; dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); - BUG_ON(dh == NULL); + if (!dh) + return NF_DROP; + + if (dccp_error(dh, skb, dataoff, state)) + return -NF_ACCEPT; + type = dh->dccph_type; + if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh)) + return -NF_ACCEPT; if (type == DCCP_PKT_RESET && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { @@ -527,55 +574,6 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, return NF_ACCEPT; } -static int dccp_error(struct net *net, struct nf_conn *tmpl, - struct sk_buff *skb, unsigned int dataoff, - u_int8_t pf, unsigned int hooknum) -{ - struct dccp_hdr _dh, *dh; - unsigned int dccp_len = skb->len - dataoff; - unsigned int cscov; - const char *msg; - - dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); - if (dh == NULL) { - msg = "nf_ct_dccp: short packet "; - goto out_invalid; - } - - if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) || - dh->dccph_doff * 4 > dccp_len) { - msg = "nf_ct_dccp: truncated/malformed packet "; - goto out_invalid; - } - - cscov = dccp_len; - if (dh->dccph_cscov) { - cscov = (dh->dccph_cscov - 1) * 4; - if (cscov > dccp_len) { - msg = "nf_ct_dccp: bad checksum coverage "; - goto out_invalid; - } - } - - if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && - nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_DCCP, - pf)) { - msg = "nf_ct_dccp: bad checksum "; - goto out_invalid; - } - - if (dh->dccph_type >= DCCP_PKT_INVALID) { - msg = "nf_ct_dccp: reserved packet type "; - goto out_invalid; - } - - return NF_ACCEPT; - -out_invalid: - nf_l4proto_log_invalid(skb, net, pf, IPPROTO_DCCP, "%s", msg); - return -NF_ACCEPT; -} - static bool dccp_can_early_drop(const struct nf_conn *ct) { switch (ct->proto.dccp.state) { @@ -814,7 +812,7 @@ static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn, return 0; } -static int dccp_init_net(struct net *net, u_int16_t proto) +static int dccp_init_net(struct net *net) { struct nf_dccp_net *dn = dccp_pernet(net); struct nf_proto_net *pn = &dn->pn; @@ -844,45 +842,9 @@ static struct nf_proto_net *dccp_get_net_proto(struct net *net) return &net->ct.nf_ct_proto.dccp.pn; } -const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = { - .l3proto = AF_INET, - .l4proto = IPPROTO_DCCP, - .new = dccp_new, - .packet = dccp_packet, - .error = dccp_error, - .can_early_drop = dccp_can_early_drop, -#ifdef CONFIG_NF_CONNTRACK_PROCFS - .print_conntrack = dccp_print_conntrack, -#endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - .nlattr_size = DCCP_NLATTR_SIZE, - .to_nlattr = dccp_to_nlattr, - .from_nlattr = nlattr_to_dccp, - .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, - .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, - .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, - .nla_policy = nf_ct_port_nla_policy, -#endif -#ifdef CONFIG_NF_CONNTRACK_TIMEOUT - .ctnl_timeout = { - .nlattr_to_obj = dccp_timeout_nlattr_to_obj, - .obj_to_nlattr = dccp_timeout_obj_to_nlattr, - .nlattr_max = CTA_TIMEOUT_DCCP_MAX, - .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, - .nla_policy = dccp_timeout_nla_policy, - }, -#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ - .init_net = dccp_init_net, - .get_net_proto = dccp_get_net_proto, -}; -EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp4); - -const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = { - .l3proto = AF_INET6, +const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp = { .l4proto = IPPROTO_DCCP, - .new = dccp_new, .packet = dccp_packet, - .error = dccp_error, .can_early_drop = dccp_can_early_drop, #ifdef CONFIG_NF_CONNTRACK_PROCFS .print_conntrack = dccp_print_conntrack, @@ -908,4 +870,3 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = { .init_net = dccp_init_net, .get_net_proto = dccp_get_net_proto, }; -EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp6); diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c index 1df3244ecd07..e10e867e0b55 100644 --- a/net/netfilter/nf_conntrack_proto_generic.c +++ b/net/netfilter/nf_conntrack_proto_generic.c @@ -44,12 +44,19 @@ static bool generic_pkt_to_tuple(const struct sk_buff *skb, /* Returns verdict for packet, or -1 for invalid. */ static int generic_packet(struct nf_conn *ct, - const struct sk_buff *skb, + struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo) + enum ip_conntrack_info ctinfo, + const struct nf_hook_state *state) { const unsigned int *timeout = nf_ct_timeout_lookup(ct); + if (!nf_generic_should_process(nf_ct_protonum(ct))) { + pr_warn_once("conntrack: generic helper won't handle protocol %d. Please consider loading the specific helper module.\n", + nf_ct_protonum(ct)); + return -NF_ACCEPT; + } + if (!timeout) timeout = &generic_pernet(nf_ct_net(ct))->timeout; @@ -57,19 +64,6 @@ static int generic_packet(struct nf_conn *ct, return NF_ACCEPT; } -/* Called when a new connection for this protocol found. */ -static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff) -{ - bool ret; - - ret = nf_generic_should_process(nf_ct_protonum(ct)); - if (!ret) - pr_warn_once("conntrack: generic helper won't handle protocol %d. Please consider loading the specific helper module.\n", - nf_ct_protonum(ct)); - return ret; -} - #ifdef CONFIG_NF_CONNTRACK_TIMEOUT #include <linux/netfilter/nfnetlink.h> @@ -142,7 +136,7 @@ static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn, return 0; } -static int generic_init_net(struct net *net, u_int16_t proto) +static int generic_init_net(struct net *net) { struct nf_generic_net *gn = generic_pernet(net); struct nf_proto_net *pn = &gn->pn; @@ -159,11 +153,9 @@ static struct nf_proto_net *generic_get_net_proto(struct net *net) const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic = { - .l3proto = PF_UNSPEC, .l4proto = 255, .pkt_to_tuple = generic_pkt_to_tuple, .packet = generic_packet, - .new = generic_new, #ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = generic_timeout_nlattr_to_obj, diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index 650eb4fba2c5..9b48dc8b4b88 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -233,10 +233,26 @@ static unsigned int *gre_get_timeouts(struct net *net) /* Returns verdict for packet, and may modify conntrack */ static int gre_packet(struct nf_conn *ct, - const struct sk_buff *skb, + struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo) + enum ip_conntrack_info ctinfo, + const struct nf_hook_state *state) { + if (state->pf != NFPROTO_IPV4) + return -NF_ACCEPT; + + if (!nf_ct_is_confirmed(ct)) { + unsigned int *timeouts = nf_ct_timeout_lookup(ct); + + if (!timeouts) + timeouts = gre_get_timeouts(nf_ct_net(ct)); + + /* initialize to sane value. Ideally a conntrack helper + * (e.g. in case of pptp) is increasing them */ + ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED]; + ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED]; + } + /* If we've seen traffic both ways, this is a GRE connection. * Extend timeout. */ if (ct->status & IPS_SEEN_REPLY) { @@ -252,26 +268,6 @@ static int gre_packet(struct nf_conn *ct, return NF_ACCEPT; } -/* Called when a new connection for this protocol found. */ -static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff) -{ - unsigned int *timeouts = nf_ct_timeout_lookup(ct); - - if (!timeouts) - timeouts = gre_get_timeouts(nf_ct_net(ct)); - - pr_debug(": "); - nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - - /* initialize to sane value. Ideally a conntrack helper - * (e.g. in case of pptp) is increasing them */ - ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED]; - ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED]; - - return true; -} - /* Called when a conntrack entry has already been removed from the hashes * and is about to be deleted from memory */ static void gre_destroy(struct nf_conn *ct) @@ -336,7 +332,7 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = { }; #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ -static int gre_init_net(struct net *net, u_int16_t proto) +static int gre_init_net(struct net *net) { struct netns_proto_gre *net_gre = gre_pernet(net); int i; @@ -351,14 +347,12 @@ static int gre_init_net(struct net *net, u_int16_t proto) /* protocol helper struct */ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = { - .l3proto = AF_INET, .l4proto = IPPROTO_GRE, .pkt_to_tuple = gre_pkt_to_tuple, #ifdef CONFIG_NF_CONNTRACK_PROCFS .print_conntrack = gre_print_conntrack, #endif .packet = gre_packet, - .new = gre_new, .destroy = gre_destroy, .me = THIS_MODULE, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c index 43c7e1a217b9..3598520bd19b 100644 --- a/net/netfilter/nf_conntrack_proto_icmp.c +++ b/net/netfilter/nf_conntrack_proto_icmp.c @@ -72,34 +72,17 @@ static bool icmp_invert_tuple(struct nf_conntrack_tuple *tuple, return true; } -static unsigned int *icmp_get_timeouts(struct net *net) -{ - return &icmp_pernet(net)->timeout; -} - /* Returns verdict for packet, or -1 for invalid. */ static int icmp_packet(struct nf_conn *ct, - const struct sk_buff *skb, + struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo) + enum ip_conntrack_info ctinfo, + const struct nf_hook_state *state) { /* Do not immediately delete the connection after the first successful reply to avoid excessive conntrackd traffic and also to handle correctly ICMP echo reply duplicates. */ unsigned int *timeout = nf_ct_timeout_lookup(ct); - - if (!timeout) - timeout = icmp_get_timeouts(nf_ct_net(ct)); - - nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); - - return NF_ACCEPT; -} - -/* Called when a new connection for this protocol found. */ -static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff) -{ static const u_int8_t valid_new[] = { [ICMP_ECHO] = 1, [ICMP_TIMESTAMP] = 1, @@ -107,21 +90,29 @@ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb, [ICMP_ADDRESS] = 1 }; + if (state->pf != NFPROTO_IPV4) + return -NF_ACCEPT; + if (ct->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) || !valid_new[ct->tuplehash[0].tuple.dst.u.icmp.type]) { /* Can't create a new ICMP `conn' with this. */ pr_debug("icmp: can't create new conn with type %u\n", ct->tuplehash[0].tuple.dst.u.icmp.type); nf_ct_dump_tuple_ip(&ct->tuplehash[0].tuple); - return false; + return -NF_ACCEPT; } - return true; + + if (!timeout) + timeout = &icmp_pernet(nf_ct_net(ct))->timeout; + + nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); + return NF_ACCEPT; } /* Returns conntrack if it dealt with ICMP, and filled in skb fields */ static int -icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, - unsigned int hooknum) +icmp_error_message(struct nf_conn *tmpl, struct sk_buff *skb, + const struct nf_hook_state *state) { struct nf_conntrack_tuple innertuple, origtuple; const struct nf_conntrack_l4proto *innerproto; @@ -137,13 +128,13 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb) + ip_hdrlen(skb) + sizeof(struct icmphdr), - PF_INET, net, &origtuple)) { + PF_INET, state->net, &origtuple)) { pr_debug("icmp_error_message: failed to get tuple\n"); return -NF_ACCEPT; } /* rcu_read_lock()ed by nf_hook_thresh */ - innerproto = __nf_ct_l4proto_find(PF_INET, origtuple.dst.protonum); + innerproto = __nf_ct_l4proto_find(origtuple.dst.protonum); /* Ordinarily, we'd expect the inverted tupleproto, but it's been preserved inside the ICMP. */ @@ -154,7 +145,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, ctinfo = IP_CT_RELATED; - h = nf_conntrack_find_get(net, zone, &innertuple); + h = nf_conntrack_find_get(state->net, zone, &innertuple); if (!h) { pr_debug("icmp_error_message: no match\n"); return -NF_ACCEPT; @@ -168,17 +159,18 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, return NF_ACCEPT; } -static void icmp_error_log(const struct sk_buff *skb, struct net *net, - u8 pf, const char *msg) +static void icmp_error_log(const struct sk_buff *skb, + const struct nf_hook_state *state, + const char *msg) { - nf_l4proto_log_invalid(skb, net, pf, IPPROTO_ICMP, "%s", msg); + nf_l4proto_log_invalid(skb, state->net, state->pf, + IPPROTO_ICMP, "%s", msg); } /* Small and modified version of icmp_rcv */ -static int -icmp_error(struct net *net, struct nf_conn *tmpl, - struct sk_buff *skb, unsigned int dataoff, - u8 pf, unsigned int hooknum) +int nf_conntrack_icmpv4_error(struct nf_conn *tmpl, + struct sk_buff *skb, unsigned int dataoff, + const struct nf_hook_state *state) { const struct icmphdr *icmph; struct icmphdr _ih; @@ -186,14 +178,15 @@ icmp_error(struct net *net, struct nf_conn *tmpl, /* Not enough header? */ icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih); if (icmph == NULL) { - icmp_error_log(skb, net, pf, "short packet"); + icmp_error_log(skb, state, "short packet"); return -NF_ACCEPT; } /* See ip_conntrack_proto_tcp.c */ - if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && - nf_ip_checksum(skb, hooknum, dataoff, 0)) { - icmp_error_log(skb, net, pf, "bad hw icmp checksum"); + if (state->net->ct.sysctl_checksum && + state->hook == NF_INET_PRE_ROUTING && + nf_ip_checksum(skb, state->hook, dataoff, 0)) { + icmp_error_log(skb, state, "bad hw icmp checksum"); return -NF_ACCEPT; } @@ -204,7 +197,7 @@ icmp_error(struct net *net, struct nf_conn *tmpl, * discarded. */ if (icmph->type > NR_ICMP_TYPES) { - icmp_error_log(skb, net, pf, "invalid icmp type"); + icmp_error_log(skb, state, "invalid icmp type"); return -NF_ACCEPT; } @@ -216,7 +209,7 @@ icmp_error(struct net *net, struct nf_conn *tmpl, icmph->type != ICMP_REDIRECT) return NF_ACCEPT; - return icmp_error_message(net, tmpl, skb, hooknum); + return icmp_error_message(tmpl, skb, state); } #if IS_ENABLED(CONFIG_NF_CT_NETLINK) @@ -342,7 +335,7 @@ static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn, return 0; } -static int icmp_init_net(struct net *net, u_int16_t proto) +static int icmp_init_net(struct net *net) { struct nf_icmp_net *in = icmp_pernet(net); struct nf_proto_net *pn = &in->pn; @@ -359,13 +352,10 @@ static struct nf_proto_net *icmp_get_net_proto(struct net *net) const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp = { - .l3proto = PF_INET, .l4proto = IPPROTO_ICMP, .pkt_to_tuple = icmp_pkt_to_tuple, .invert_tuple = icmp_invert_tuple, .packet = icmp_packet, - .new = icmp_new, - .error = icmp_error, .destroy = NULL, .me = NULL, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c index 97e40f77d678..378618feed5d 100644 --- a/net/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/netfilter/nf_conntrack_proto_icmpv6.c @@ -92,11 +92,31 @@ static unsigned int *icmpv6_get_timeouts(struct net *net) /* Returns verdict for packet, or -1 for invalid. */ static int icmpv6_packet(struct nf_conn *ct, - const struct sk_buff *skb, - unsigned int dataoff, - enum ip_conntrack_info ctinfo) + struct sk_buff *skb, + unsigned int dataoff, + enum ip_conntrack_info ctinfo, + const struct nf_hook_state *state) { unsigned int *timeout = nf_ct_timeout_lookup(ct); + static const u8 valid_new[] = { + [ICMPV6_ECHO_REQUEST - 128] = 1, + [ICMPV6_NI_QUERY - 128] = 1 + }; + + if (state->pf != NFPROTO_IPV6) + return -NF_ACCEPT; + + if (!nf_ct_is_confirmed(ct)) { + int type = ct->tuplehash[0].tuple.dst.u.icmp.type - 128; + + if (type < 0 || type >= sizeof(valid_new) || !valid_new[type]) { + /* Can't create a new ICMPv6 `conn' with this. */ + pr_debug("icmpv6: can't create new conn with type %u\n", + type + 128); + nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple); + return -NF_ACCEPT; + } + } if (!timeout) timeout = icmpv6_get_timeouts(nf_ct_net(ct)); @@ -109,26 +129,6 @@ static int icmpv6_packet(struct nf_conn *ct, return NF_ACCEPT; } -/* Called when a new connection for this protocol found. */ -static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff) -{ - static const u_int8_t valid_new[] = { - [ICMPV6_ECHO_REQUEST - 128] = 1, - [ICMPV6_NI_QUERY - 128] = 1 - }; - int type = ct->tuplehash[0].tuple.dst.u.icmp.type - 128; - - if (type < 0 || type >= sizeof(valid_new) || !valid_new[type]) { - /* Can't create a new ICMPv6 `conn' with this. */ - pr_debug("icmpv6: can't create new conn with type %u\n", - type + 128); - nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple); - return false; - } - return true; -} - static int icmpv6_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, @@ -153,7 +153,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl, } /* rcu_read_lock()ed by nf_hook_thresh */ - inproto = __nf_ct_l4proto_find(PF_INET6, origtuple.dst.protonum); + inproto = __nf_ct_l4proto_find(origtuple.dst.protonum); /* Ordinarily, we'd expect the inverted tupleproto, but it's been preserved inside the ICMP. */ @@ -179,16 +179,18 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl, return NF_ACCEPT; } -static void icmpv6_error_log(const struct sk_buff *skb, struct net *net, - u8 pf, const char *msg) +static void icmpv6_error_log(const struct sk_buff *skb, + const struct nf_hook_state *state, + const char *msg) { - nf_l4proto_log_invalid(skb, net, pf, IPPROTO_ICMPV6, "%s", msg); + nf_l4proto_log_invalid(skb, state->net, state->pf, + IPPROTO_ICMPV6, "%s", msg); } -static int -icmpv6_error(struct net *net, struct nf_conn *tmpl, - struct sk_buff *skb, unsigned int dataoff, - u8 pf, unsigned int hooknum) +int nf_conntrack_icmpv6_error(struct nf_conn *tmpl, + struct sk_buff *skb, + unsigned int dataoff, + const struct nf_hook_state *state) { const struct icmp6hdr *icmp6h; struct icmp6hdr _ih; @@ -196,13 +198,14 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl, icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih); if (icmp6h == NULL) { - icmpv6_error_log(skb, net, pf, "short packet"); + icmpv6_error_log(skb, state, "short packet"); return -NF_ACCEPT; } - if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && - nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) { - icmpv6_error_log(skb, net, pf, "ICMPv6 checksum failed"); + if (state->hook == NF_INET_PRE_ROUTING && + state->net->ct.sysctl_checksum && + nf_ip6_checksum(skb, state->hook, dataoff, IPPROTO_ICMPV6)) { + icmpv6_error_log(skb, state, "ICMPv6 checksum failed"); return -NF_ACCEPT; } @@ -217,7 +220,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl, if (icmp6h->icmp6_type >= 128) return NF_ACCEPT; - return icmpv6_error_message(net, tmpl, skb, dataoff); + return icmpv6_error_message(state->net, tmpl, skb, dataoff); } #if IS_ENABLED(CONFIG_NF_CT_NETLINK) @@ -343,7 +346,7 @@ static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn, return 0; } -static int icmpv6_init_net(struct net *net, u_int16_t proto) +static int icmpv6_init_net(struct net *net) { struct nf_icmp_net *in = icmpv6_pernet(net); struct nf_proto_net *pn = &in->pn; @@ -360,13 +363,10 @@ static struct nf_proto_net *icmpv6_get_net_proto(struct net *net) const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 = { - .l3proto = PF_INET6, .l4proto = IPPROTO_ICMPV6, .pkt_to_tuple = icmpv6_pkt_to_tuple, .invert_tuple = icmpv6_invert_tuple, .packet = icmpv6_packet, - .new = icmpv6_new, - .error = icmpv6_error, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .tuple_to_nlattr = icmpv6_tuple_to_nlattr, .nlattr_tuple_size = icmpv6_nlattr_tuple_size, diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index e4d738d34cd0..3d719d3eb9a3 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -273,11 +273,100 @@ static int sctp_new_state(enum ip_conntrack_dir dir, return sctp_conntracks[dir][i][cur_state]; } +/* Don't need lock here: this conntrack not in circulation yet */ +static noinline bool +sctp_new(struct nf_conn *ct, const struct sk_buff *skb, + const struct sctphdr *sh, unsigned int dataoff) +{ + enum sctp_conntrack new_state; + const struct sctp_chunkhdr *sch; + struct sctp_chunkhdr _sch; + u32 offset, count; + + memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp)); + new_state = SCTP_CONNTRACK_MAX; + for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) { + new_state = sctp_new_state(IP_CT_DIR_ORIGINAL, + SCTP_CONNTRACK_NONE, sch->type); + + /* Invalid: delete conntrack */ + if (new_state == SCTP_CONNTRACK_NONE || + new_state == SCTP_CONNTRACK_MAX) { + pr_debug("nf_conntrack_sctp: invalid new deleting.\n"); + return false; + } + + /* Copy the vtag into the state info */ + if (sch->type == SCTP_CID_INIT) { + struct sctp_inithdr _inithdr, *ih; + /* Sec 8.5.1 (A) */ + if (sh->vtag) + return false; + + ih = skb_header_pointer(skb, offset + sizeof(_sch), + sizeof(_inithdr), &_inithdr); + if (!ih) + return false; + + pr_debug("Setting vtag %x for new conn\n", + ih->init_tag); + + ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ih->init_tag; + } else if (sch->type == SCTP_CID_HEARTBEAT) { + pr_debug("Setting vtag %x for secondary conntrack\n", + sh->vtag); + ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag; + } else { + /* If it is a shutdown ack OOTB packet, we expect a return + shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */ + pr_debug("Setting vtag %x for new conn OOTB\n", + sh->vtag); + ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; + } + + ct->proto.sctp.state = new_state; + } + + return true; +} + +static bool sctp_error(struct sk_buff *skb, + unsigned int dataoff, + const struct nf_hook_state *state) +{ + const struct sctphdr *sh; + const char *logmsg; + + if (skb->len < dataoff + sizeof(struct sctphdr)) { + logmsg = "nf_ct_sctp: short packet "; + goto out_invalid; + } + if (state->hook == NF_INET_PRE_ROUTING && + state->net->ct.sysctl_checksum && + skb->ip_summed == CHECKSUM_NONE) { + if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) { + logmsg = "nf_ct_sctp: failed to read header "; + goto out_invalid; + } + sh = (const struct sctphdr *)(skb->data + dataoff); + if (sh->checksum != sctp_compute_cksum(skb, dataoff)) { + logmsg = "nf_ct_sctp: bad CRC "; + goto out_invalid; + } + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + return false; +out_invalid: + nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_SCTP, "%s", logmsg); + return true; +} + /* Returns verdict for packet, or -NF_ACCEPT for invalid. */ static int sctp_packet(struct nf_conn *ct, - const struct sk_buff *skb, + struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo) + enum ip_conntrack_info ctinfo, + const struct nf_hook_state *state) { enum sctp_conntrack new_state, old_state; enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); @@ -289,6 +378,9 @@ static int sctp_packet(struct nf_conn *ct, unsigned int *timeouts; unsigned long map[256 / sizeof(unsigned long)] = { 0 }; + if (sctp_error(skb, dataoff, state)) + return -NF_ACCEPT; + sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); if (sh == NULL) goto out; @@ -296,6 +388,17 @@ static int sctp_packet(struct nf_conn *ct, if (do_basic_checks(ct, skb, dataoff, map) != 0) goto out; + if (!nf_ct_is_confirmed(ct)) { + /* If an OOTB packet has any of these chunks discard (Sec 8.4) */ + if (test_bit(SCTP_CID_ABORT, map) || + test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) || + test_bit(SCTP_CID_COOKIE_ACK, map)) + return -NF_ACCEPT; + + if (!sctp_new(ct, skb, sh, dataoff)) + return -NF_ACCEPT; + } + /* Check the verification tag (Sec 8.5) */ if (!test_bit(SCTP_CID_INIT, map) && !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) && @@ -397,110 +500,6 @@ out: return -NF_ACCEPT; } -/* Called when a new connection for this protocol found. */ -static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff) -{ - enum sctp_conntrack new_state; - const struct sctphdr *sh; - struct sctphdr _sctph; - const struct sctp_chunkhdr *sch; - struct sctp_chunkhdr _sch; - u_int32_t offset, count; - unsigned long map[256 / sizeof(unsigned long)] = { 0 }; - - sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); - if (sh == NULL) - return false; - - if (do_basic_checks(ct, skb, dataoff, map) != 0) - return false; - - /* If an OOTB packet has any of these chunks discard (Sec 8.4) */ - if (test_bit(SCTP_CID_ABORT, map) || - test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) || - test_bit(SCTP_CID_COOKIE_ACK, map)) - return false; - - memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp)); - new_state = SCTP_CONNTRACK_MAX; - for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { - /* Don't need lock here: this conntrack not in circulation yet */ - new_state = sctp_new_state(IP_CT_DIR_ORIGINAL, - SCTP_CONNTRACK_NONE, sch->type); - - /* Invalid: delete conntrack */ - if (new_state == SCTP_CONNTRACK_NONE || - new_state == SCTP_CONNTRACK_MAX) { - pr_debug("nf_conntrack_sctp: invalid new deleting.\n"); - return false; - } - - /* Copy the vtag into the state info */ - if (sch->type == SCTP_CID_INIT) { - struct sctp_inithdr _inithdr, *ih; - /* Sec 8.5.1 (A) */ - if (sh->vtag) - return false; - - ih = skb_header_pointer(skb, offset + sizeof(_sch), - sizeof(_inithdr), &_inithdr); - if (!ih) - return false; - - pr_debug("Setting vtag %x for new conn\n", - ih->init_tag); - - ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ih->init_tag; - } else if (sch->type == SCTP_CID_HEARTBEAT) { - pr_debug("Setting vtag %x for secondary conntrack\n", - sh->vtag); - ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag; - } - /* If it is a shutdown ack OOTB packet, we expect a return - shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */ - else { - pr_debug("Setting vtag %x for new conn OOTB\n", - sh->vtag); - ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; - } - - ct->proto.sctp.state = new_state; - } - - return true; -} - -static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb, - unsigned int dataoff, - u8 pf, unsigned int hooknum) -{ - const struct sctphdr *sh; - const char *logmsg; - - if (skb->len < dataoff + sizeof(struct sctphdr)) { - logmsg = "nf_ct_sctp: short packet "; - goto out_invalid; - } - if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && - skb->ip_summed == CHECKSUM_NONE) { - if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) { - logmsg = "nf_ct_sctp: failed to read header "; - goto out_invalid; - } - sh = (const struct sctphdr *)(skb->data + dataoff); - if (sh->checksum != sctp_compute_cksum(skb, dataoff)) { - logmsg = "nf_ct_sctp: bad CRC "; - goto out_invalid; - } - skb->ip_summed = CHECKSUM_UNNECESSARY; - } - return NF_ACCEPT; -out_invalid: - nf_l4proto_log_invalid(skb, net, pf, IPPROTO_SCTP, "%s", logmsg); - return -NF_ACCEPT; -} - static bool sctp_can_early_drop(const struct nf_conn *ct) { switch (ct->proto.sctp.state) { @@ -735,7 +734,7 @@ static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn, return 0; } -static int sctp_init_net(struct net *net, u_int16_t proto) +static int sctp_init_net(struct net *net) { struct nf_sctp_net *sn = sctp_pernet(net); struct nf_proto_net *pn = &sn->pn; @@ -760,49 +759,12 @@ static struct nf_proto_net *sctp_get_net_proto(struct net *net) return &net->ct.nf_ct_proto.sctp.pn; } -const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = { - .l3proto = PF_INET, - .l4proto = IPPROTO_SCTP, -#ifdef CONFIG_NF_CONNTRACK_PROCFS - .print_conntrack = sctp_print_conntrack, -#endif - .packet = sctp_packet, - .new = sctp_new, - .error = sctp_error, - .can_early_drop = sctp_can_early_drop, - .me = THIS_MODULE, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - .nlattr_size = SCTP_NLATTR_SIZE, - .to_nlattr = sctp_to_nlattr, - .from_nlattr = nlattr_to_sctp, - .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, - .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, - .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, - .nla_policy = nf_ct_port_nla_policy, -#endif -#ifdef CONFIG_NF_CONNTRACK_TIMEOUT - .ctnl_timeout = { - .nlattr_to_obj = sctp_timeout_nlattr_to_obj, - .obj_to_nlattr = sctp_timeout_obj_to_nlattr, - .nlattr_max = CTA_TIMEOUT_SCTP_MAX, - .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, - .nla_policy = sctp_timeout_nla_policy, - }, -#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ - .init_net = sctp_init_net, - .get_net_proto = sctp_get_net_proto, -}; -EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp4); - -const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = { - .l3proto = PF_INET6, +const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp = { .l4proto = IPPROTO_SCTP, #ifdef CONFIG_NF_CONNTRACK_PROCFS .print_conntrack = sctp_print_conntrack, #endif .packet = sctp_packet, - .new = sctp_new, - .error = sctp_error, .can_early_drop = sctp_can_early_drop, .me = THIS_MODULE, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) @@ -826,4 +788,3 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = { .init_net = sctp_init_net, .get_net_proto = sctp_get_net_proto, }; -EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp6); diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 247b89784a6f..1bcf9984d45e 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -717,35 +717,26 @@ static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK| [TCPHDR_ACK|TCPHDR_URG] = 1, }; -static void tcp_error_log(const struct sk_buff *skb, struct net *net, - u8 pf, const char *msg) +static void tcp_error_log(const struct sk_buff *skb, + const struct nf_hook_state *state, + const char *msg) { - nf_l4proto_log_invalid(skb, net, pf, IPPROTO_TCP, "%s", msg); + nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_TCP, "%s", msg); } /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */ -static int tcp_error(struct net *net, struct nf_conn *tmpl, - struct sk_buff *skb, - unsigned int dataoff, - u_int8_t pf, - unsigned int hooknum) +static bool tcp_error(const struct tcphdr *th, + struct sk_buff *skb, + unsigned int dataoff, + const struct nf_hook_state *state) { - const struct tcphdr *th; - struct tcphdr _tcph; unsigned int tcplen = skb->len - dataoff; - u_int8_t tcpflags; - - /* Smaller that minimal TCP header? */ - th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph); - if (th == NULL) { - tcp_error_log(skb, net, pf, "short packet"); - return -NF_ACCEPT; - } + u8 tcpflags; /* Not whole TCP header or malformed packet */ if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) { - tcp_error_log(skb, net, pf, "truncated packet"); - return -NF_ACCEPT; + tcp_error_log(skb, state, "truncated packet"); + return true; } /* Checksum invalid? Ignore. @@ -753,27 +744,101 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl, * because the checksum is assumed to be correct. */ /* FIXME: Source route IP option packets --RR */ - if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && - nf_checksum(skb, hooknum, dataoff, IPPROTO_TCP, pf)) { - tcp_error_log(skb, net, pf, "bad checksum"); - return -NF_ACCEPT; + if (state->net->ct.sysctl_checksum && + state->hook == NF_INET_PRE_ROUTING && + nf_checksum(skb, state->hook, dataoff, IPPROTO_TCP, state->pf)) { + tcp_error_log(skb, state, "bad checksum"); + return true; } /* Check TCP flags. */ tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH)); if (!tcp_valid_flags[tcpflags]) { - tcp_error_log(skb, net, pf, "invalid tcp flag combination"); - return -NF_ACCEPT; + tcp_error_log(skb, state, "invalid tcp flag combination"); + return true; } - return NF_ACCEPT; + return false; +} + +static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb, + unsigned int dataoff, + const struct tcphdr *th) +{ + enum tcp_conntrack new_state; + struct net *net = nf_ct_net(ct); + const struct nf_tcp_net *tn = tcp_pernet(net); + const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0]; + const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1]; + + /* Don't need lock here: this conntrack not in circulation yet */ + new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE]; + + /* Invalid: delete conntrack */ + if (new_state >= TCP_CONNTRACK_MAX) { + pr_debug("nf_ct_tcp: invalid new deleting.\n"); + return false; + } + + if (new_state == TCP_CONNTRACK_SYN_SENT) { + memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp)); + /* SYN packet */ + ct->proto.tcp.seen[0].td_end = + segment_seq_plus_len(ntohl(th->seq), skb->len, + dataoff, th); + ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window); + if (ct->proto.tcp.seen[0].td_maxwin == 0) + ct->proto.tcp.seen[0].td_maxwin = 1; + ct->proto.tcp.seen[0].td_maxend = + ct->proto.tcp.seen[0].td_end; + + tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]); + } else if (tn->tcp_loose == 0) { + /* Don't try to pick up connections. */ + return false; + } else { + memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp)); + /* + * We are in the middle of a connection, + * its history is lost for us. + * Let's try to use the data from the packet. + */ + ct->proto.tcp.seen[0].td_end = + segment_seq_plus_len(ntohl(th->seq), skb->len, + dataoff, th); + ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window); + if (ct->proto.tcp.seen[0].td_maxwin == 0) + ct->proto.tcp.seen[0].td_maxwin = 1; + ct->proto.tcp.seen[0].td_maxend = + ct->proto.tcp.seen[0].td_end + + ct->proto.tcp.seen[0].td_maxwin; + + /* We assume SACK and liberal window checking to handle + * window scaling */ + ct->proto.tcp.seen[0].flags = + ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM | + IP_CT_TCP_FLAG_BE_LIBERAL; + } + + /* tcp_packet will set them */ + ct->proto.tcp.last_index = TCP_NONE_SET; + + pr_debug("%s: sender end=%u maxend=%u maxwin=%u scale=%i " + "receiver end=%u maxend=%u maxwin=%u scale=%i\n", + __func__, + sender->td_end, sender->td_maxend, sender->td_maxwin, + sender->td_scale, + receiver->td_end, receiver->td_maxend, receiver->td_maxwin, + receiver->td_scale); + return true; } /* Returns verdict for packet, or -1 for invalid. */ static int tcp_packet(struct nf_conn *ct, - const struct sk_buff *skb, + struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo) + enum ip_conntrack_info ctinfo, + const struct nf_hook_state *state) { struct net *net = nf_ct_net(ct); struct nf_tcp_net *tn = tcp_pernet(net); @@ -786,7 +851,14 @@ static int tcp_packet(struct nf_conn *ct, unsigned long timeout; th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph); - BUG_ON(th == NULL); + if (th == NULL) + return -NF_ACCEPT; + + if (tcp_error(th, skb, dataoff, state)) + return -NF_ACCEPT; + + if (!nf_ct_is_confirmed(ct) && !tcp_new(ct, skb, dataoff, th)) + return -NF_ACCEPT; spin_lock_bh(&ct->lock); old_state = ct->proto.tcp.state; @@ -1067,82 +1139,6 @@ static int tcp_packet(struct nf_conn *ct, return NF_ACCEPT; } -/* Called when a new connection for this protocol found. */ -static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff) -{ - enum tcp_conntrack new_state; - const struct tcphdr *th; - struct tcphdr _tcph; - struct net *net = nf_ct_net(ct); - struct nf_tcp_net *tn = tcp_pernet(net); - const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0]; - const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1]; - - th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph); - BUG_ON(th == NULL); - - /* Don't need lock here: this conntrack not in circulation yet */ - new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE]; - - /* Invalid: delete conntrack */ - if (new_state >= TCP_CONNTRACK_MAX) { - pr_debug("nf_ct_tcp: invalid new deleting.\n"); - return false; - } - - if (new_state == TCP_CONNTRACK_SYN_SENT) { - memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp)); - /* SYN packet */ - ct->proto.tcp.seen[0].td_end = - segment_seq_plus_len(ntohl(th->seq), skb->len, - dataoff, th); - ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window); - if (ct->proto.tcp.seen[0].td_maxwin == 0) - ct->proto.tcp.seen[0].td_maxwin = 1; - ct->proto.tcp.seen[0].td_maxend = - ct->proto.tcp.seen[0].td_end; - - tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]); - } else if (tn->tcp_loose == 0) { - /* Don't try to pick up connections. */ - return false; - } else { - memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp)); - /* - * We are in the middle of a connection, - * its history is lost for us. - * Let's try to use the data from the packet. - */ - ct->proto.tcp.seen[0].td_end = - segment_seq_plus_len(ntohl(th->seq), skb->len, - dataoff, th); - ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window); - if (ct->proto.tcp.seen[0].td_maxwin == 0) - ct->proto.tcp.seen[0].td_maxwin = 1; - ct->proto.tcp.seen[0].td_maxend = - ct->proto.tcp.seen[0].td_end + - ct->proto.tcp.seen[0].td_maxwin; - - /* We assume SACK and liberal window checking to handle - * window scaling */ - ct->proto.tcp.seen[0].flags = - ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM | - IP_CT_TCP_FLAG_BE_LIBERAL; - } - - /* tcp_packet will set them */ - ct->proto.tcp.last_index = TCP_NONE_SET; - - pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i " - "receiver end=%u maxend=%u maxwin=%u scale=%i\n", - sender->td_end, sender->td_maxend, sender->td_maxwin, - sender->td_scale, - receiver->td_end, receiver->td_maxend, receiver->td_maxwin, - receiver->td_scale); - return true; -} - static bool tcp_can_early_drop(const struct nf_conn *ct) { switch (ct->proto.tcp.state) { @@ -1510,7 +1506,7 @@ static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn, return 0; } -static int tcp_init_net(struct net *net, u_int16_t proto) +static int tcp_init_net(struct net *net) { struct nf_tcp_net *tn = tcp_pernet(net); struct nf_proto_net *pn = &tn->pn; @@ -1538,16 +1534,13 @@ static struct nf_proto_net *tcp_get_net_proto(struct net *net) return &net->ct.nf_ct_proto.tcp.pn; } -const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 = +const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp = { - .l3proto = PF_INET, .l4proto = IPPROTO_TCP, #ifdef CONFIG_NF_CONNTRACK_PROCFS .print_conntrack = tcp_print_conntrack, #endif .packet = tcp_packet, - .new = tcp_new, - .error = tcp_error, .can_early_drop = tcp_can_early_drop, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .to_nlattr = tcp_to_nlattr, @@ -1571,39 +1564,3 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 = .init_net = tcp_init_net, .get_net_proto = tcp_get_net_proto, }; -EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4); - -const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 = -{ - .l3proto = PF_INET6, - .l4proto = IPPROTO_TCP, -#ifdef CONFIG_NF_CONNTRACK_PROCFS - .print_conntrack = tcp_print_conntrack, -#endif - .packet = tcp_packet, - .new = tcp_new, - .error = tcp_error, - .can_early_drop = tcp_can_early_drop, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - .nlattr_size = TCP_NLATTR_SIZE, - .to_nlattr = tcp_to_nlattr, - .from_nlattr = nlattr_to_tcp, - .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, - .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, - .nlattr_tuple_size = tcp_nlattr_tuple_size, - .nla_policy = nf_ct_port_nla_policy, -#endif -#ifdef CONFIG_NF_CONNTRACK_TIMEOUT - .ctnl_timeout = { - .nlattr_to_obj = tcp_timeout_nlattr_to_obj, - .obj_to_nlattr = tcp_timeout_obj_to_nlattr, - .nlattr_max = CTA_TIMEOUT_TCP_MAX, - .obj_size = sizeof(unsigned int) * - TCP_CONNTRACK_TIMEOUT_MAX, - .nla_policy = tcp_timeout_nla_policy, - }, -#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ - .init_net = tcp_init_net, - .get_net_proto = tcp_get_net_proto, -}; -EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6); diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 3065fb8ef91b..a7aa70370913 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -42,14 +42,65 @@ static unsigned int *udp_get_timeouts(struct net *net) return udp_pernet(net)->timeouts; } +static void udp_error_log(const struct sk_buff *skb, + const struct nf_hook_state *state, + const char *msg) +{ + nf_l4proto_log_invalid(skb, state->net, state->pf, + IPPROTO_UDP, "%s", msg); +} + +static bool udp_error(struct sk_buff *skb, + unsigned int dataoff, + const struct nf_hook_state *state) +{ + unsigned int udplen = skb->len - dataoff; + const struct udphdr *hdr; + struct udphdr _hdr; + + /* Header is too small? */ + hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); + if (!hdr) { + udp_error_log(skb, state, "short packet"); + return true; + } + + /* Truncated/malformed packets */ + if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) { + udp_error_log(skb, state, "truncated/malformed packet"); + return true; + } + + /* Packet with no checksum */ + if (!hdr->check) + return false; + + /* Checksum invalid? Ignore. + * We skip checking packets on the outgoing path + * because the checksum is assumed to be correct. + * FIXME: Source route IP option packets --RR */ + if (state->hook == NF_INET_PRE_ROUTING && + state->net->ct.sysctl_checksum && + nf_checksum(skb, state->hook, dataoff, IPPROTO_UDP, state->pf)) { + udp_error_log(skb, state, "bad checksum"); + return true; + } + + return false; +} + /* Returns verdict for packet, and may modify conntracktype */ static int udp_packet(struct nf_conn *ct, - const struct sk_buff *skb, + struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo) + enum ip_conntrack_info ctinfo, + const struct nf_hook_state *state) { unsigned int *timeouts; + if (udp_error(skb, dataoff, state)) + return -NF_ACCEPT; + timeouts = nf_ct_timeout_lookup(ct); if (!timeouts) timeouts = udp_get_timeouts(nf_ct_net(ct)); @@ -69,24 +120,18 @@ static int udp_packet(struct nf_conn *ct, return NF_ACCEPT; } -/* Called when a new connection for this protocol found. */ -static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff) -{ - return true; -} - #ifdef CONFIG_NF_CT_PROTO_UDPLITE -static void udplite_error_log(const struct sk_buff *skb, struct net *net, - u8 pf, const char *msg) +static void udplite_error_log(const struct sk_buff *skb, + const struct nf_hook_state *state, + const char *msg) { - nf_l4proto_log_invalid(skb, net, pf, IPPROTO_UDPLITE, "%s", msg); + nf_l4proto_log_invalid(skb, state->net, state->pf, + IPPROTO_UDPLITE, "%s", msg); } -static int udplite_error(struct net *net, struct nf_conn *tmpl, - struct sk_buff *skb, - unsigned int dataoff, - u8 pf, unsigned int hooknum) +static bool udplite_error(struct sk_buff *skb, + unsigned int dataoff, + const struct nf_hook_state *state) { unsigned int udplen = skb->len - dataoff; const struct udphdr *hdr; @@ -96,80 +141,67 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl, /* Header is too small? */ hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); if (!hdr) { - udplite_error_log(skb, net, pf, "short packet"); - return -NF_ACCEPT; + udplite_error_log(skb, state, "short packet"); + return true; } cscov = ntohs(hdr->len); if (cscov == 0) { cscov = udplen; } else if (cscov < sizeof(*hdr) || cscov > udplen) { - udplite_error_log(skb, net, pf, "invalid checksum coverage"); - return -NF_ACCEPT; + udplite_error_log(skb, state, "invalid checksum coverage"); + return true; } /* UDPLITE mandates checksums */ if (!hdr->check) { - udplite_error_log(skb, net, pf, "checksum missing"); - return -NF_ACCEPT; + udplite_error_log(skb, state, "checksum missing"); + return true; } /* Checksum invalid? Ignore. */ - if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && - nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP, - pf)) { - udplite_error_log(skb, net, pf, "bad checksum"); - return -NF_ACCEPT; + if (state->hook == NF_INET_PRE_ROUTING && + state->net->ct.sysctl_checksum && + nf_checksum_partial(skb, state->hook, dataoff, cscov, IPPROTO_UDP, + state->pf)) { + udplite_error_log(skb, state, "bad checksum"); + return true; } - return NF_ACCEPT; -} -#endif - -static void udp_error_log(const struct sk_buff *skb, struct net *net, - u8 pf, const char *msg) -{ - nf_l4proto_log_invalid(skb, net, pf, IPPROTO_UDP, "%s", msg); + return false; } -static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, - unsigned int dataoff, - u_int8_t pf, - unsigned int hooknum) +/* Returns verdict for packet, and may modify conntracktype */ +static int udplite_packet(struct nf_conn *ct, + struct sk_buff *skb, + unsigned int dataoff, + enum ip_conntrack_info ctinfo, + const struct nf_hook_state *state) { - unsigned int udplen = skb->len - dataoff; - const struct udphdr *hdr; - struct udphdr _hdr; - - /* Header is too small? */ - hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); - if (hdr == NULL) { - udp_error_log(skb, net, pf, "short packet"); - return -NF_ACCEPT; - } + unsigned int *timeouts; - /* Truncated/malformed packets */ - if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) { - udp_error_log(skb, net, pf, "truncated/malformed packet"); + if (udplite_error(skb, dataoff, state)) return -NF_ACCEPT; - } - /* Packet with no checksum */ - if (!hdr->check) - return NF_ACCEPT; + timeouts = nf_ct_timeout_lookup(ct); + if (!timeouts) + timeouts = udp_get_timeouts(nf_ct_net(ct)); - /* Checksum invalid? Ignore. - * We skip checking packets on the outgoing path - * because the checksum is assumed to be correct. - * FIXME: Source route IP option packets --RR */ - if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && - nf_checksum(skb, hooknum, dataoff, IPPROTO_UDP, pf)) { - udp_error_log(skb, net, pf, "bad checksum"); - return -NF_ACCEPT; + /* If we've seen traffic both ways, this is some kind of UDP + stream. Extend timeout. */ + if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { + nf_ct_refresh_acct(ct, ctinfo, skb, + timeouts[UDP_CT_REPLIED]); + /* Also, more likely to be important, and not a probe */ + if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) + nf_conntrack_event_cache(IPCT_ASSURED, ct); + } else { + nf_ct_refresh_acct(ct, ctinfo, skb, + timeouts[UDP_CT_UNREPLIED]); } - return NF_ACCEPT; } +#endif #ifdef CONFIG_NF_CONNTRACK_TIMEOUT @@ -258,7 +290,7 @@ static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn, return 0; } -static int udp_init_net(struct net *net, u_int16_t proto) +static int udp_init_net(struct net *net) { struct nf_udp_net *un = udp_pernet(net); struct nf_proto_net *pn = &un->pn; @@ -278,72 +310,11 @@ static struct nf_proto_net *udp_get_net_proto(struct net *net) return &net->ct.nf_ct_proto.udp.pn; } -const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 = -{ - .l3proto = PF_INET, - .l4proto = IPPROTO_UDP, - .allow_clash = true, - .packet = udp_packet, - .new = udp_new, - .error = udp_error, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, - .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, - .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, - .nla_policy = nf_ct_port_nla_policy, -#endif -#ifdef CONFIG_NF_CONNTRACK_TIMEOUT - .ctnl_timeout = { - .nlattr_to_obj = udp_timeout_nlattr_to_obj, - .obj_to_nlattr = udp_timeout_obj_to_nlattr, - .nlattr_max = CTA_TIMEOUT_UDP_MAX, - .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, - .nla_policy = udp_timeout_nla_policy, - }, -#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ - .init_net = udp_init_net, - .get_net_proto = udp_get_net_proto, -}; -EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4); - -#ifdef CONFIG_NF_CT_PROTO_UDPLITE -const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 = -{ - .l3proto = PF_INET, - .l4proto = IPPROTO_UDPLITE, - .allow_clash = true, - .packet = udp_packet, - .new = udp_new, - .error = udplite_error, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, - .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, - .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, - .nla_policy = nf_ct_port_nla_policy, -#endif -#ifdef CONFIG_NF_CONNTRACK_TIMEOUT - .ctnl_timeout = { - .nlattr_to_obj = udp_timeout_nlattr_to_obj, - .obj_to_nlattr = udp_timeout_obj_to_nlattr, - .nlattr_max = CTA_TIMEOUT_UDP_MAX, - .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, - .nla_policy = udp_timeout_nla_policy, - }, -#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ - .init_net = udp_init_net, - .get_net_proto = udp_get_net_proto, -}; -EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite4); -#endif - -const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 = +const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp = { - .l3proto = PF_INET6, .l4proto = IPPROTO_UDP, .allow_clash = true, .packet = udp_packet, - .new = udp_new, - .error = udp_error, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, @@ -362,17 +333,13 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 = .init_net = udp_init_net, .get_net_proto = udp_get_net_proto, }; -EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6); #ifdef CONFIG_NF_CT_PROTO_UDPLITE -const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 = +const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite = { - .l3proto = PF_INET6, .l4proto = IPPROTO_UDPLITE, .allow_clash = true, - .packet = udp_packet, - .new = udp_new, - .error = udplite_error, + .packet = udplite_packet, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, @@ -391,5 +358,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 = .init_net = udp_init_net, .get_net_proto = udp_get_net_proto, }; -EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6); #endif diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 13279f683da9..463d17d349c1 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -292,7 +292,7 @@ static int ct_seq_show(struct seq_file *s, void *v) if (!net_eq(nf_ct_net(ct), net)) goto release; - l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); + l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct)); WARN_ON(!l4proto); ret = -ENOSPC; @@ -720,10 +720,3 @@ static void __exit nf_conntrack_standalone_fini(void) module_init(nf_conntrack_standalone_init); module_exit(nf_conntrack_standalone_fini); - -/* Some modules need us, but don't depend directly on any symbol. - They should call this. */ -void need_conntrack(void) -{ -} -EXPORT_SYMBOL_GPL(need_conntrack); diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index d8125616edc7..185c633b6872 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -120,7 +120,7 @@ static void flow_offload_fixup_ct_state(struct nf_conn *ct) if (l4num == IPPROTO_TCP) flow_offload_fixup_tcp(&ct->proto.tcp); - l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), l4num); + l4proto = __nf_ct_l4proto_find(l4num); if (!l4proto) return; @@ -233,8 +233,8 @@ flow_offload_lookup(struct nf_flowtable *flow_table, struct flow_offload *flow; int dir; - tuplehash = rhashtable_lookup_fast(&flow_table->rhashtable, tuple, - nf_flow_offload_rhash_params); + tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple, + nf_flow_offload_rhash_params); if (!tuplehash) return NULL; @@ -254,20 +254,17 @@ int nf_flow_table_iterate(struct nf_flowtable *flow_table, struct flow_offload_tuple_rhash *tuplehash; struct rhashtable_iter hti; struct flow_offload *flow; - int err; - - err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL); - if (err) - return err; + int err = 0; + rhashtable_walk_enter(&flow_table->rhashtable, &hti); rhashtable_walk_start(&hti); while ((tuplehash = rhashtable_walk_next(&hti))) { if (IS_ERR(tuplehash)) { - err = PTR_ERR(tuplehash); - if (err != -EAGAIN) - goto out; - + if (PTR_ERR(tuplehash) != -EAGAIN) { + err = PTR_ERR(tuplehash); + break; + } continue; } if (tuplehash->tuple.dir) @@ -277,7 +274,6 @@ int nf_flow_table_iterate(struct nf_flowtable *flow_table, iter(flow, data); } -out: rhashtable_walk_stop(&hti); rhashtable_walk_exit(&hti); @@ -290,25 +286,19 @@ static inline bool nf_flow_has_expired(const struct flow_offload *flow) return (__s32)(flow->timeout - (u32)jiffies) <= 0; } -static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table) +static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table) { struct flow_offload_tuple_rhash *tuplehash; struct rhashtable_iter hti; struct flow_offload *flow; - int err; - - err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL); - if (err) - return 0; + rhashtable_walk_enter(&flow_table->rhashtable, &hti); rhashtable_walk_start(&hti); while ((tuplehash = rhashtable_walk_next(&hti))) { if (IS_ERR(tuplehash)) { - err = PTR_ERR(tuplehash); - if (err != -EAGAIN) - goto out; - + if (PTR_ERR(tuplehash) != -EAGAIN) + break; continue; } if (tuplehash->tuple.dir) @@ -321,11 +311,8 @@ static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table) FLOW_OFFLOAD_TEARDOWN))) flow_offload_del(flow_table, flow); } -out: rhashtable_walk_stop(&hti); rhashtable_walk_exit(&hti); - - return 1; } static void nf_flow_offload_work_gc(struct work_struct *work) @@ -514,7 +501,7 @@ void nf_flow_table_free(struct nf_flowtable *flow_table) mutex_unlock(&flowtable_lock); cancel_delayed_work_sync(&flow_table->gc_work); nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL); - WARN_ON(!nf_flow_offload_gc_step(flow_table)); + nf_flow_offload_gc_step(flow_table); rhashtable_destroy(&flow_table->rhashtable); } EXPORT_SYMBOL_GPL(nf_flow_table_free); diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 15ed91309992..1d291a51cd45 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -254,8 +254,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff)) return NF_ACCEPT; - if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) && - nf_flow_nat_ip(flow, skb, thoff, dir) < 0) + if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0) return NF_DROP; flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; @@ -471,8 +470,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, if (skb_try_make_writable(skb, sizeof(*ip6h))) return NF_DROP; - if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) && - nf_flow_nat_ipv6(flow, skb, dir) < 0) + if (nf_flow_nat_ipv6(flow, skb, dir) < 0) return NF_DROP; flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c index 99606baedda4..38793b95d9bc 100644 --- a/net/netfilter/nf_nat_helper.c +++ b/net/netfilter/nf_nat_helper.c @@ -37,7 +37,7 @@ static void mangle_contents(struct sk_buff *skb, { unsigned char *data; - BUG_ON(skb_is_nonlinear(skb)); + SKB_LINEAR_ASSERT(skb); data = skb_network_header(skb) + dataoff; /* move post-replacement */ @@ -110,8 +110,6 @@ bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb, !enlarge_skb(skb, rep_len - match_len)) return false; - SKB_LINEAR_ASSERT(skb); - tcph = (void *)skb->data + protoff; oldlen = skb->len - protoff; diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c index adee04af8d43..78a9e6454ff3 100644 --- a/net/netfilter/nf_nat_redirect.c +++ b/net/netfilter/nf_nat_redirect.c @@ -52,13 +52,11 @@ nf_nat_redirect_ipv4(struct sk_buff *skb, newdst = 0; - rcu_read_lock(); indev = __in_dev_get_rcu(skb->dev); if (indev && indev->ifa_list) { ifa = indev->ifa_list; newdst = ifa->ifa_local; } - rcu_read_unlock(); if (!newdst) return NF_DROP; @@ -97,7 +95,6 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, struct inet6_ifaddr *ifa; bool addr = false; - rcu_read_lock(); idev = __in6_dev_get(skb->dev); if (idev != NULL) { read_lock_bh(&idev->lock); @@ -108,7 +105,6 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, } read_unlock_bh(&idev->lock); } - rcu_read_unlock(); if (!addr) return NF_DROP; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 2cfb173cd0b2..f0159eea2978 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -27,6 +27,8 @@ static LIST_HEAD(nf_tables_expressions); static LIST_HEAD(nf_tables_objects); static LIST_HEAD(nf_tables_flowtables); +static LIST_HEAD(nf_tables_destroy_list); +static DEFINE_SPINLOCK(nf_tables_destroy_list_lock); static u64 table_handle; enum { @@ -64,6 +66,8 @@ static void nft_validate_state_update(struct net *net, u8 new_validate_state) net->nft.validate_state = new_validate_state; } +static void nf_tables_trans_destroy_work(struct work_struct *w); +static DECLARE_WORK(trans_destroy_work, nf_tables_trans_destroy_work); static void nft_ctx_init(struct nft_ctx *ctx, struct net *net, @@ -207,6 +211,18 @@ static int nft_delchain(struct nft_ctx *ctx) return err; } +/* either expr ops provide both activate/deactivate, or neither */ +static bool nft_expr_check_ops(const struct nft_expr_ops *ops) +{ + if (!ops) + return true; + + if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate))) + return false; + + return true; +} + static void nft_rule_expr_activate(const struct nft_ctx *ctx, struct nft_rule *rule) { @@ -298,7 +314,7 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx) return 0; } -static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type, +static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type, struct nft_set *set) { struct nft_trans *trans; @@ -318,7 +334,7 @@ static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type, return 0; } -static int nft_delset(struct nft_ctx *ctx, struct nft_set *set) +static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set) { int err; @@ -1005,7 +1021,8 @@ static int nf_tables_deltable(struct net *net, struct sock *nlsk, static void nf_tables_table_destroy(struct nft_ctx *ctx) { - BUG_ON(ctx->table->use > 0); + if (WARN_ON(ctx->table->use > 0)) + return; rhltable_destroy(&ctx->table->chains_ht); kfree(ctx->table->name); @@ -1412,7 +1429,8 @@ static void nf_tables_chain_destroy(struct nft_ctx *ctx) { struct nft_chain *chain = ctx->chain; - BUG_ON(chain->use > 0); + if (WARN_ON(chain->use > 0)) + return; /* no concurrent access possible anymore */ nf_tables_chain_free_chain_rules(chain); @@ -1907,6 +1925,9 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk, */ int nft_register_expr(struct nft_expr_type *type) { + if (!nft_expr_check_ops(type->ops)) + return -EINVAL; + nfnl_lock(NFNL_SUBSYS_NFTABLES); if (type->family == NFPROTO_UNSPEC) list_add_tail_rcu(&type->list, &nf_tables_expressions); @@ -2054,6 +2075,10 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx, err = PTR_ERR(ops); goto err1; } + if (!nft_expr_check_ops(ops)) { + err = -EINVAL; + goto err1; + } } else ops = type->ops; @@ -2434,7 +2459,6 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, { struct nft_expr *expr; - lockdep_assert_held(&ctx->net->nft.commit_mutex); /* * Careful: some expressions might not be initialized in case this * is called on error from nf_tables_newrule(). @@ -3567,13 +3591,6 @@ static void nft_set_destroy(struct nft_set *set) kvfree(set); } -static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) -{ - list_del_rcu(&set->list); - nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); - nft_set_destroy(set); -} - static int nf_tables_delset(struct net *net, struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[], @@ -3668,17 +3685,38 @@ bind: } EXPORT_SYMBOL_GPL(nf_tables_bind_set); -void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, +void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding) { + if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && + nft_is_active(ctx->net, set)) + list_add_tail_rcu(&set->list, &ctx->table->sets); + + list_add_tail_rcu(&binding->list, &set->bindings); +} +EXPORT_SYMBOL_GPL(nf_tables_rebind_set); + +void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, + struct nft_set_binding *binding) +{ list_del_rcu(&binding->list); if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && nft_is_active(ctx->net, set)) - nf_tables_set_destroy(ctx, set); + list_del_rcu(&set->list); } EXPORT_SYMBOL_GPL(nf_tables_unbind_set); +void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set) +{ + if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && + nft_is_active(ctx->net, set)) { + nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); + nft_set_destroy(set); + } +} +EXPORT_SYMBOL_GPL(nf_tables_destroy_set); + const struct nft_set_ext_type nft_set_ext_types[] = { [NFT_SET_EXT_KEY] = { .align = __alignof__(u32), @@ -6191,19 +6229,28 @@ static void nft_commit_release(struct nft_trans *trans) nf_tables_flowtable_destroy(nft_trans_flowtable(trans)); break; } + + if (trans->put_net) + put_net(trans->ctx.net); + kfree(trans); } -static void nf_tables_commit_release(struct net *net) +static void nf_tables_trans_destroy_work(struct work_struct *w) { struct nft_trans *trans, *next; + LIST_HEAD(head); - if (list_empty(&net->nft.commit_list)) + spin_lock(&nf_tables_destroy_list_lock); + list_splice_init(&nf_tables_destroy_list, &head); + spin_unlock(&nf_tables_destroy_list_lock); + + if (list_empty(&head)) return; synchronize_rcu(); - list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { + list_for_each_entry_safe(trans, next, &head, list) { list_del(&trans->list); nft_commit_release(trans); } @@ -6334,6 +6381,37 @@ static void nft_chain_del(struct nft_chain *chain) list_del_rcu(&chain->list); } +static void nf_tables_commit_release(struct net *net) +{ + struct nft_trans *trans; + + /* all side effects have to be made visible. + * For example, if a chain named 'foo' has been deleted, a + * new transaction must not find it anymore. + * + * Memory reclaim happens asynchronously from work queue + * to prevent expensive synchronize_rcu() in commit phase. + */ + if (list_empty(&net->nft.commit_list)) { + mutex_unlock(&net->nft.commit_mutex); + return; + } + + trans = list_last_entry(&net->nft.commit_list, + struct nft_trans, list); + get_net(trans->ctx.net); + WARN_ON_ONCE(trans->put_net); + + trans->put_net = true; + spin_lock(&nf_tables_destroy_list_lock); + list_splice_tail_init(&net->nft.commit_list, &nf_tables_destroy_list); + spin_unlock(&nf_tables_destroy_list_lock); + + mutex_unlock(&net->nft.commit_mutex); + + schedule_work(&trans_destroy_work); +} + static int nf_tables_commit(struct net *net, struct sk_buff *skb) { struct nft_trans *trans, *next; @@ -6495,9 +6573,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) } } - nf_tables_commit_release(net); nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); - mutex_unlock(&net->nft.commit_mutex); + nf_tables_commit_release(net); return 0; } @@ -7168,7 +7245,8 @@ int __nft_release_basechain(struct nft_ctx *ctx) { struct nft_rule *rule, *nr; - BUG_ON(!nft_is_base_chain(ctx->chain)); + if (WARN_ON(!nft_is_base_chain(ctx->chain))) + return 0; nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain); list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) { @@ -7271,6 +7349,7 @@ static int __init nf_tables_module_init(void) { int err; + spin_lock_init(&nf_tables_destroy_list_lock); err = register_pernet_subsys(&nf_tables_net_ops); if (err < 0) return err; @@ -7310,6 +7389,7 @@ static void __exit nf_tables_module_exit(void) unregister_netdevice_notifier(&nf_tables_flowtable_notifier); nft_chain_filter_fini(); unregister_pernet_subsys(&nf_tables_net_ops); + cancel_work_sync(&trans_destroy_work); rcu_barrier(); nf_tables_core_module_exit(); } diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index ffd5c0f9412b..3fbce3b9c5ec 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -249,12 +249,24 @@ static struct nft_expr_type *nft_basic_types[] = { &nft_exthdr_type, }; +static struct nft_object_type *nft_basic_objects[] = { +#ifdef CONFIG_NETWORK_SECMARK + &nft_secmark_obj_type, +#endif +}; + int __init nf_tables_core_module_init(void) { - int err, i; + int err, i, j = 0; + + for (i = 0; i < ARRAY_SIZE(nft_basic_objects); i++) { + err = nft_register_obj(nft_basic_objects[i]); + if (err) + goto err; + } - for (i = 0; i < ARRAY_SIZE(nft_basic_types); i++) { - err = nft_register_expr(nft_basic_types[i]); + for (j = 0; j < ARRAY_SIZE(nft_basic_types); j++) { + err = nft_register_expr(nft_basic_types[j]); if (err) goto err; } @@ -262,8 +274,12 @@ int __init nf_tables_core_module_init(void) return 0; err: + while (j-- > 0) + nft_unregister_expr(nft_basic_types[j]); + while (i-- > 0) - nft_unregister_expr(nft_basic_types[i]); + nft_unregister_obj(nft_basic_objects[i]); + return err; } @@ -274,4 +290,8 @@ void nf_tables_core_module_exit(void) i = ARRAY_SIZE(nft_basic_types); while (i-- > 0) nft_unregister_expr(nft_basic_types[i]); + + i = ARRAY_SIZE(nft_basic_objects); + while (i-- > 0) + nft_unregister_obj(nft_basic_objects[i]); } diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index a30f8ba4b89a..b48545b84ce8 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -53,9 +53,6 @@ ctnl_timeout_parse_policy(void *timeout, struct nlattr **tb; int ret = 0; - if (!l4proto->ctnl_timeout.nlattr_to_obj) - return 0; - tb = kcalloc(l4proto->ctnl_timeout.nlattr_max + 1, sizeof(*tb), GFP_KERNEL); @@ -125,7 +122,7 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl, return -EBUSY; } - l4proto = nf_ct_l4proto_find_get(l3num, l4num); + l4proto = nf_ct_l4proto_find_get(l4num); /* This protocol is not supportted, skip. */ if (l4proto->l4proto != l4num) { @@ -167,6 +164,8 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0; const struct nf_conntrack_l4proto *l4proto = timeout->timeout.l4proto; + struct nlattr *nest_parms; + int ret; event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); @@ -186,22 +185,15 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, htonl(refcount_read(&timeout->refcnt)))) goto nla_put_failure; - if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) { - struct nlattr *nest_parms; - int ret; - - nest_parms = nla_nest_start(skb, - CTA_TIMEOUT_DATA | NLA_F_NESTED); - if (!nest_parms) - goto nla_put_failure; + nest_parms = nla_nest_start(skb, CTA_TIMEOUT_DATA | NLA_F_NESTED); + if (!nest_parms) + goto nla_put_failure; - ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, - &timeout->timeout.data); - if (ret < 0) - goto nla_put_failure; + ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->timeout.data); + if (ret < 0) + goto nla_put_failure; - nla_nest_end(skb, nest_parms); - } + nla_nest_end(skb, nest_parms); nlmsg_end(skb, nlh); return skb->len; @@ -369,7 +361,7 @@ static int cttimeout_default_set(struct net *net, struct sock *ctnl, l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO])); l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]); - l4proto = nf_ct_l4proto_find_get(l3num, l4num); + l4proto = nf_ct_l4proto_find_get(l4num); /* This protocol is not supported, skip. */ if (l4proto->l4proto != l4num) { @@ -391,12 +383,14 @@ err: static int cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid, - u32 seq, u32 type, int event, + u32 seq, u32 type, int event, u16 l3num, const struct nf_conntrack_l4proto *l4proto) { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0; + struct nlattr *nest_parms; + int ret; event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); @@ -408,25 +402,19 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid, nfmsg->version = NFNETLINK_V0; nfmsg->res_id = 0; - if (nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(l4proto->l3proto)) || + if (nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(l3num)) || nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, l4proto->l4proto)) goto nla_put_failure; - if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) { - struct nlattr *nest_parms; - int ret; - - nest_parms = nla_nest_start(skb, - CTA_TIMEOUT_DATA | NLA_F_NESTED); - if (!nest_parms) - goto nla_put_failure; + nest_parms = nla_nest_start(skb, CTA_TIMEOUT_DATA | NLA_F_NESTED); + if (!nest_parms) + goto nla_put_failure; - ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL); - if (ret < 0) - goto nla_put_failure; + ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL); + if (ret < 0) + goto nla_put_failure; - nla_nest_end(skb, nest_parms); - } + nla_nest_end(skb, nest_parms); nlmsg_end(skb, nlh); return skb->len; @@ -454,7 +442,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl, l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO])); l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]); - l4proto = nf_ct_l4proto_find_get(l3num, l4num); + l4proto = nf_ct_l4proto_find_get(l4num); /* This protocol is not supported, skip. */ if (l4proto->l4proto != l4num) { @@ -472,6 +460,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl, nlh->nlmsg_seq, NFNL_MSG_TYPE(nlh->nlmsg_type), IPCTNL_MSG_TIMEOUT_DEFAULT_SET, + l3num, l4proto); if (ret <= 0) { kfree_skb(skb2); diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index d33094f4ec41..43041f087eb3 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -765,7 +765,7 @@ __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue, return ret; } - skb->next = NULL; + skb_mark_not_on_list(skb); entry_seg = nf_queue_entry_dup(entry); if (entry_seg) { diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c index fa90a8402845..79d48c1d06f4 100644 --- a/net/netfilter/nft_cmp.c +++ b/net/netfilter/nft_cmp.c @@ -79,7 +79,8 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr, err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc, tb[NFTA_CMP_DATA]); - BUG_ON(err < 0); + if (err < 0) + return err; priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]); err = nft_validate_register_load(priv->sreg, desc.len); @@ -129,7 +130,8 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx, err = nft_data_init(NULL, &data, sizeof(data), &desc, tb[NFTA_CMP_DATA]); - BUG_ON(err < 0); + if (err < 0) + return err; priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]); err = nft_validate_register_load(priv->sreg, desc.len); diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 5dd87748afa8..586627c361df 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -279,7 +279,7 @@ static void nft_ct_set_eval(const struct nft_expr *expr, { const struct nft_ct *priv = nft_expr_priv(expr); struct sk_buff *skb = pkt->skb; -#ifdef CONFIG_NF_CONNTRACK_MARK +#if defined(CONFIG_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_SECMARK) u32 value = regs->data[priv->sreg]; #endif enum ip_conntrack_info ctinfo; @@ -298,6 +298,14 @@ static void nft_ct_set_eval(const struct nft_expr *expr, } break; #endif +#ifdef CONFIG_NF_CONNTRACK_SECMARK + case NFT_CT_SECMARK: + if (ct->secmark != value) { + ct->secmark = value; + nf_conntrack_event_cache(IPCT_SECMARK, ct); + } + break; +#endif #ifdef CONFIG_NF_CONNTRACK_LABELS case NFT_CT_LABELS: nf_connlabels_replace(ct, @@ -565,6 +573,13 @@ static int nft_ct_set_init(const struct nft_ctx *ctx, len = sizeof(u32); break; #endif +#ifdef CONFIG_NF_CONNTRACK_SECMARK + case NFT_CT_SECMARK: + if (tb[NFTA_CT_DIRECTION]) + return -EINVAL; + len = sizeof(u32); + break; +#endif default: return -EOPNOTSUPP; } @@ -776,9 +791,6 @@ nft_ct_timeout_parse_policy(void *timeouts, struct nlattr **tb; int ret = 0; - if (!l4proto->ctnl_timeout.nlattr_to_obj) - return 0; - tb = kcalloc(l4proto->ctnl_timeout.nlattr_max + 1, sizeof(*tb), GFP_KERNEL); @@ -858,7 +870,7 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx, l4num = nla_get_u8(tb[NFTA_CT_TIMEOUT_L4PROTO]); priv->l4proto = l4num; - l4proto = nf_ct_l4proto_find_get(l3num, l4num); + l4proto = nf_ct_l4proto_find_get(l4num); if (l4proto->l4proto != l4num) { ret = -EOPNOTSUPP; diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 6e91a37d57f2..07d4efd3d851 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -235,14 +235,31 @@ err1: return err; } +static void nft_dynset_activate(const struct nft_ctx *ctx, + const struct nft_expr *expr) +{ + struct nft_dynset *priv = nft_expr_priv(expr); + + nf_tables_rebind_set(ctx, priv->set, &priv->binding); +} + +static void nft_dynset_deactivate(const struct nft_ctx *ctx, + const struct nft_expr *expr) +{ + struct nft_dynset *priv = nft_expr_priv(expr); + + nf_tables_unbind_set(ctx, priv->set, &priv->binding); +} + static void nft_dynset_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { struct nft_dynset *priv = nft_expr_priv(expr); - nf_tables_unbind_set(ctx, priv->set, &priv->binding); if (priv->expr != NULL) nft_expr_destroy(ctx, priv->expr); + + nf_tables_destroy_set(ctx, priv->set); } static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr) @@ -279,6 +296,8 @@ static const struct nft_expr_ops nft_dynset_ops = { .eval = nft_dynset_eval, .init = nft_dynset_init, .destroy = nft_dynset_destroy, + .activate = nft_dynset_activate, + .deactivate = nft_dynset_deactivate, .dump = nft_dynset_dump, }; diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index ad13e8643599..227b2b15a19c 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c @@ -121,12 +121,28 @@ static int nft_lookup_init(const struct nft_ctx *ctx, return 0; } +static void nft_lookup_activate(const struct nft_ctx *ctx, + const struct nft_expr *expr) +{ + struct nft_lookup *priv = nft_expr_priv(expr); + + nf_tables_rebind_set(ctx, priv->set, &priv->binding); +} + +static void nft_lookup_deactivate(const struct nft_ctx *ctx, + const struct nft_expr *expr) +{ + struct nft_lookup *priv = nft_expr_priv(expr); + + nf_tables_unbind_set(ctx, priv->set, &priv->binding); +} + static void nft_lookup_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { struct nft_lookup *priv = nft_expr_priv(expr); - nf_tables_unbind_set(ctx, priv->set, &priv->binding); + nf_tables_destroy_set(ctx, priv->set); } static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr) @@ -209,6 +225,8 @@ static const struct nft_expr_ops nft_lookup_ops = { .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)), .eval = nft_lookup_eval, .init = nft_lookup_init, + .activate = nft_lookup_activate, + .deactivate = nft_lookup_deactivate, .destroy = nft_lookup_destroy, .dump = nft_lookup_dump, .validate = nft_lookup_validate, diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 297fe7d97c18..6180626c3f80 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -284,6 +284,11 @@ static void nft_meta_set_eval(const struct nft_expr *expr, skb->nf_trace = !!value8; break; +#ifdef CONFIG_NETWORK_SECMARK + case NFT_META_SECMARK: + skb->secmark = value; + break; +#endif default: WARN_ON(1); } @@ -436,6 +441,9 @@ static int nft_meta_set_init(const struct nft_ctx *ctx, switch (priv->key) { case NFT_META_MARK: case NFT_META_PRIORITY: +#ifdef CONFIG_NETWORK_SECMARK + case NFT_META_SECMARK: +#endif len = sizeof(u32); break; case NFT_META_NFTRACE: @@ -543,3 +551,111 @@ struct nft_expr_type nft_meta_type __read_mostly = { .maxattr = NFTA_META_MAX, .owner = THIS_MODULE, }; + +#ifdef CONFIG_NETWORK_SECMARK +struct nft_secmark { + u32 secid; + char *ctx; +}; + +static const struct nla_policy nft_secmark_policy[NFTA_SECMARK_MAX + 1] = { + [NFTA_SECMARK_CTX] = { .type = NLA_STRING, .len = NFT_SECMARK_CTX_MAXLEN }, +}; + +static int nft_secmark_compute_secid(struct nft_secmark *priv) +{ + u32 tmp_secid = 0; + int err; + + err = security_secctx_to_secid(priv->ctx, strlen(priv->ctx), &tmp_secid); + if (err) + return err; + + if (!tmp_secid) + return -ENOENT; + + err = security_secmark_relabel_packet(tmp_secid); + if (err) + return err; + + priv->secid = tmp_secid; + return 0; +} + +static void nft_secmark_obj_eval(struct nft_object *obj, struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + const struct nft_secmark *priv = nft_obj_data(obj); + struct sk_buff *skb = pkt->skb; + + skb->secmark = priv->secid; +} + +static int nft_secmark_obj_init(const struct nft_ctx *ctx, + const struct nlattr * const tb[], + struct nft_object *obj) +{ + struct nft_secmark *priv = nft_obj_data(obj); + int err; + + if (tb[NFTA_SECMARK_CTX] == NULL) + return -EINVAL; + + priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL); + if (!priv->ctx) + return -ENOMEM; + + err = nft_secmark_compute_secid(priv); + if (err) { + kfree(priv->ctx); + return err; + } + + security_secmark_refcount_inc(); + + return 0; +} + +static int nft_secmark_obj_dump(struct sk_buff *skb, struct nft_object *obj, + bool reset) +{ + struct nft_secmark *priv = nft_obj_data(obj); + int err; + + if (nla_put_string(skb, NFTA_SECMARK_CTX, priv->ctx)) + return -1; + + if (reset) { + err = nft_secmark_compute_secid(priv); + if (err) + return err; + } + + return 0; +} + +static void nft_secmark_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj) +{ + struct nft_secmark *priv = nft_obj_data(obj); + + security_secmark_refcount_dec(); + + kfree(priv->ctx); +} + +static const struct nft_object_ops nft_secmark_obj_ops = { + .type = &nft_secmark_obj_type, + .size = sizeof(struct nft_secmark), + .init = nft_secmark_obj_init, + .eval = nft_secmark_obj_eval, + .dump = nft_secmark_obj_dump, + .destroy = nft_secmark_obj_destroy, +}; +struct nft_object_type nft_secmark_obj_type __read_mostly = { + .type = NFT_OBJECT_SECMARK, + .ops = &nft_secmark_obj_ops, + .maxattr = NFTA_SECMARK_MAX, + .policy = nft_secmark_policy, + .owner = THIS_MODULE, +}; +#endif /* CONFIG_NETWORK_SECMARK */ diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index cdf348f751ec..a3185ca2a3a9 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c @@ -155,12 +155,28 @@ nla_put_failure: return -1; } +static void nft_objref_map_activate(const struct nft_ctx *ctx, + const struct nft_expr *expr) +{ + struct nft_objref_map *priv = nft_expr_priv(expr); + + nf_tables_rebind_set(ctx, priv->set, &priv->binding); +} + +static void nft_objref_map_deactivate(const struct nft_ctx *ctx, + const struct nft_expr *expr) +{ + struct nft_objref_map *priv = nft_expr_priv(expr); + + nf_tables_unbind_set(ctx, priv->set, &priv->binding); +} + static void nft_objref_map_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { struct nft_objref_map *priv = nft_expr_priv(expr); - nf_tables_unbind_set(ctx, priv->set, &priv->binding); + nf_tables_destroy_set(ctx, priv->set); } static struct nft_expr_type nft_objref_type; @@ -169,6 +185,8 @@ static const struct nft_expr_ops nft_objref_map_ops = { .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)), .eval = nft_objref_map_eval, .init = nft_objref_map_init, + .activate = nft_objref_map_activate, + .deactivate = nft_objref_map_deactivate, .destroy = nft_objref_map_destroy, .dump = nft_objref_map_dump, }; diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c index 29f5bd2377b0..b48e58cceeb7 100644 --- a/net/netfilter/nft_reject.c +++ b/net/netfilter/nft_reject.c @@ -94,7 +94,8 @@ static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX + 1] = { int nft_reject_icmp_code(u8 code) { - BUG_ON(code > NFT_REJECT_ICMPX_MAX); + if (WARN_ON_ONCE(code > NFT_REJECT_ICMPX_MAX)) + return ICMP_NET_UNREACH; return icmp_code_v4[code]; } @@ -111,7 +112,8 @@ static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX + 1] = { int nft_reject_icmpv6_code(u8 code) { - BUG_ON(code > NFT_REJECT_ICMPX_MAX); + if (WARN_ON_ONCE(code > NFT_REJECT_ICMPX_MAX)) + return ICMPV6_NOROUTE; return icmp_code_v6[code]; } diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c index 76dba9f6b6f6..f35fa33913ae 100644 --- a/net/netfilter/nft_rt.c +++ b/net/netfilter/nft_rt.c @@ -90,6 +90,11 @@ static void nft_rt_get_eval(const struct nft_expr *expr, case NFT_RT_TCPMSS: nft_reg_store16(dest, get_tcpmss(pkt, dst)); break; +#ifdef CONFIG_XFRM + case NFT_RT_XFRM: + nft_reg_store8(dest, !!dst->xfrm); + break; +#endif default: WARN_ON(1); goto err; @@ -130,6 +135,11 @@ static int nft_rt_get_init(const struct nft_ctx *ctx, case NFT_RT_TCPMSS: len = sizeof(u16); break; +#ifdef CONFIG_XFRM + case NFT_RT_XFRM: + len = sizeof(u8); + break; +#endif default: return -EOPNOTSUPP; } @@ -164,6 +174,7 @@ static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *exp case NFT_RT_NEXTHOP4: case NFT_RT_NEXTHOP6: case NFT_RT_CLASSID: + case NFT_RT_XFRM: return 0; case NFT_RT_TCPMSS: hooks = (1 << NF_INET_FORWARD) | diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 015124e649cb..339a9dd1c832 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -88,7 +88,7 @@ static bool nft_rhash_lookup(const struct net *net, const struct nft_set *set, .key = key, }; - he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params); + he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params); if (he != NULL) *ext = &he->ext; @@ -106,7 +106,7 @@ static void *nft_rhash_get(const struct net *net, const struct nft_set *set, .key = elem->key.val.data, }; - he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params); + he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params); if (he != NULL) return he; @@ -129,7 +129,7 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key, .key = key, }; - he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params); + he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params); if (he != NULL) goto out; @@ -217,7 +217,7 @@ static void *nft_rhash_deactivate(const struct net *net, }; rcu_read_lock(); - he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params); + he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params); if (he != NULL && !nft_rhash_flush(net, set, he)) he = NULL; @@ -244,21 +244,15 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set, struct nft_rhash_elem *he; struct rhashtable_iter hti; struct nft_set_elem elem; - int err; - - err = rhashtable_walk_init(&priv->ht, &hti, GFP_ATOMIC); - iter->err = err; - if (err) - return; + rhashtable_walk_enter(&priv->ht, &hti); rhashtable_walk_start(&hti); while ((he = rhashtable_walk_next(&hti))) { if (IS_ERR(he)) { - err = PTR_ERR(he); - if (err != -EAGAIN) { - iter->err = err; - goto out; + if (PTR_ERR(he) != -EAGAIN) { + iter->err = PTR_ERR(he); + break; } continue; @@ -275,13 +269,11 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set, iter->err = iter->fn(ctx, set, iter, &elem); if (iter->err < 0) - goto out; + break; cont: iter->count++; } - -out: rhashtable_walk_stop(&hti); rhashtable_walk_exit(&hti); } @@ -293,21 +285,17 @@ static void nft_rhash_gc(struct work_struct *work) struct nft_rhash *priv; struct nft_set_gc_batch *gcb = NULL; struct rhashtable_iter hti; - int err; priv = container_of(work, struct nft_rhash, gc_work.work); set = nft_set_container_of(priv); - err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL); - if (err) - goto schedule; - + rhashtable_walk_enter(&priv->ht, &hti); rhashtable_walk_start(&hti); while ((he = rhashtable_walk_next(&hti))) { if (IS_ERR(he)) { if (PTR_ERR(he) != -EAGAIN) - goto out; + break; continue; } @@ -326,17 +314,15 @@ gc: gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); if (gcb == NULL) - goto out; + break; rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params); atomic_dec(&set->nelems); nft_set_gc_batch_add(gcb, he); } -out: rhashtable_walk_stop(&hti); rhashtable_walk_exit(&hti); nft_set_gc_batch_complete(gcb); -schedule: queue_delayed_work(system_power_efficient_wq, &priv->gc_work, nft_set_gc_interval(set)); } diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c new file mode 100644 index 000000000000..3cf71a2e375b --- /dev/null +++ b/net/netfilter/nft_xfrm.c @@ -0,0 +1,293 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Generic part shared by ipv4 and ipv6 backends. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/netlink.h> +#include <linux/netfilter.h> +#include <linux/netfilter/nf_tables.h> +#include <net/netfilter/nf_tables_core.h> +#include <net/netfilter/nf_tables.h> +#include <linux/in.h> +#include <net/xfrm.h> + +static const struct nla_policy nft_xfrm_policy[NFTA_XFRM_MAX + 1] = { + [NFTA_XFRM_KEY] = { .type = NLA_U32 }, + [NFTA_XFRM_DIR] = { .type = NLA_U8 }, + [NFTA_XFRM_SPNUM] = { .type = NLA_U32 }, + [NFTA_XFRM_DREG] = { .type = NLA_U32 }, +}; + +struct nft_xfrm { + enum nft_xfrm_keys key:8; + enum nft_registers dreg:8; + u8 dir; + u8 spnum; +}; + +static int nft_xfrm_get_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_xfrm *priv = nft_expr_priv(expr); + unsigned int len = 0; + u32 spnum = 0; + u8 dir; + + if (!tb[NFTA_XFRM_KEY] || !tb[NFTA_XFRM_DIR] || !tb[NFTA_XFRM_DREG]) + return -EINVAL; + + switch (ctx->family) { + case NFPROTO_IPV4: + case NFPROTO_IPV6: + case NFPROTO_INET: + break; + default: + return -EOPNOTSUPP; + } + + priv->key = ntohl(nla_get_u32(tb[NFTA_XFRM_KEY])); + switch (priv->key) { + case NFT_XFRM_KEY_REQID: + case NFT_XFRM_KEY_SPI: + len = sizeof(u32); + break; + case NFT_XFRM_KEY_DADDR_IP4: + case NFT_XFRM_KEY_SADDR_IP4: + len = sizeof(struct in_addr); + break; + case NFT_XFRM_KEY_DADDR_IP6: + case NFT_XFRM_KEY_SADDR_IP6: + len = sizeof(struct in6_addr); + break; + default: + return -EINVAL; + } + + dir = nla_get_u8(tb[NFTA_XFRM_DIR]); + switch (dir) { + case XFRM_POLICY_IN: + case XFRM_POLICY_OUT: + priv->dir = dir; + break; + default: + return -EINVAL; + } + + if (tb[NFTA_XFRM_SPNUM]) + spnum = ntohl(nla_get_be32(tb[NFTA_XFRM_SPNUM])); + + if (spnum >= XFRM_MAX_DEPTH) + return -ERANGE; + + priv->spnum = spnum; + + priv->dreg = nft_parse_register(tb[NFTA_XFRM_DREG]); + return nft_validate_register_store(ctx, priv->dreg, NULL, + NFT_DATA_VALUE, len); +} + +/* Return true if key asks for daddr/saddr and current + * state does have a valid address (BEET, TUNNEL). + */ +static bool xfrm_state_addr_ok(enum nft_xfrm_keys k, u8 family, u8 mode) +{ + switch (k) { + case NFT_XFRM_KEY_DADDR_IP4: + case NFT_XFRM_KEY_SADDR_IP4: + if (family == NFPROTO_IPV4) + break; + return false; + case NFT_XFRM_KEY_DADDR_IP6: + case NFT_XFRM_KEY_SADDR_IP6: + if (family == NFPROTO_IPV6) + break; + return false; + default: + return true; + } + + return mode == XFRM_MODE_BEET || mode == XFRM_MODE_TUNNEL; +} + +static void nft_xfrm_state_get_key(const struct nft_xfrm *priv, + struct nft_regs *regs, + const struct xfrm_state *state, + u8 family) +{ + u32 *dest = ®s->data[priv->dreg]; + + if (!xfrm_state_addr_ok(priv->key, family, state->props.mode)) { + regs->verdict.code = NFT_BREAK; + return; + } + + switch (priv->key) { + case NFT_XFRM_KEY_UNSPEC: + case __NFT_XFRM_KEY_MAX: + WARN_ON_ONCE(1); + break; + case NFT_XFRM_KEY_DADDR_IP4: + *dest = state->id.daddr.a4; + return; + case NFT_XFRM_KEY_DADDR_IP6: + memcpy(dest, &state->id.daddr.in6, sizeof(struct in6_addr)); + return; + case NFT_XFRM_KEY_SADDR_IP4: + *dest = state->props.saddr.a4; + return; + case NFT_XFRM_KEY_SADDR_IP6: + memcpy(dest, &state->props.saddr.in6, sizeof(struct in6_addr)); + return; + case NFT_XFRM_KEY_REQID: + *dest = state->props.reqid; + return; + case NFT_XFRM_KEY_SPI: + *dest = state->id.spi; + return; + } + + regs->verdict.code = NFT_BREAK; +} + +static void nft_xfrm_get_eval_in(const struct nft_xfrm *priv, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + const struct sec_path *sp = pkt->skb->sp; + const struct xfrm_state *state; + + if (sp == NULL || sp->len <= priv->spnum) { + regs->verdict.code = NFT_BREAK; + return; + } + + state = sp->xvec[priv->spnum]; + nft_xfrm_state_get_key(priv, regs, state, nft_pf(pkt)); +} + +static void nft_xfrm_get_eval_out(const struct nft_xfrm *priv, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + const struct dst_entry *dst = skb_dst(pkt->skb); + int i; + + for (i = 0; dst && dst->xfrm; + dst = ((const struct xfrm_dst *)dst)->child, i++) { + if (i < priv->spnum) + continue; + + nft_xfrm_state_get_key(priv, regs, dst->xfrm, nft_pf(pkt)); + return; + } + + regs->verdict.code = NFT_BREAK; +} + +static void nft_xfrm_get_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + const struct nft_xfrm *priv = nft_expr_priv(expr); + + switch (priv->dir) { + case XFRM_POLICY_IN: + nft_xfrm_get_eval_in(priv, regs, pkt); + break; + case XFRM_POLICY_OUT: + nft_xfrm_get_eval_out(priv, regs, pkt); + break; + default: + WARN_ON_ONCE(1); + regs->verdict.code = NFT_BREAK; + break; + } +} + +static int nft_xfrm_get_dump(struct sk_buff *skb, + const struct nft_expr *expr) +{ + const struct nft_xfrm *priv = nft_expr_priv(expr); + + if (nft_dump_register(skb, NFTA_XFRM_DREG, priv->dreg)) + return -1; + + if (nla_put_be32(skb, NFTA_XFRM_KEY, htonl(priv->key))) + return -1; + if (nla_put_u8(skb, NFTA_XFRM_DIR, priv->dir)) + return -1; + if (nla_put_be32(skb, NFTA_XFRM_SPNUM, htonl(priv->spnum))) + return -1; + + return 0; +} + +static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, + const struct nft_data **data) +{ + const struct nft_xfrm *priv = nft_expr_priv(expr); + unsigned int hooks; + + switch (priv->dir) { + case XFRM_POLICY_IN: + hooks = (1 << NF_INET_FORWARD) | + (1 << NF_INET_LOCAL_IN) | + (1 << NF_INET_PRE_ROUTING); + break; + case XFRM_POLICY_OUT: + hooks = (1 << NF_INET_FORWARD) | + (1 << NF_INET_LOCAL_OUT) | + (1 << NF_INET_POST_ROUTING); + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + return nft_chain_validate_hooks(ctx->chain, hooks); +} + + +static struct nft_expr_type nft_xfrm_type; +static const struct nft_expr_ops nft_xfrm_get_ops = { + .type = &nft_xfrm_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_xfrm)), + .eval = nft_xfrm_get_eval, + .init = nft_xfrm_get_init, + .dump = nft_xfrm_get_dump, + .validate = nft_xfrm_validate, +}; + +static struct nft_expr_type nft_xfrm_type __read_mostly = { + .name = "xfrm", + .ops = &nft_xfrm_get_ops, + .policy = nft_xfrm_policy, + .maxattr = NFTA_XFRM_MAX, + .owner = THIS_MODULE, +}; + +static int __init nft_xfrm_module_init(void) +{ + return nft_register_expr(&nft_xfrm_type); +} + +static void __exit nft_xfrm_module_exit(void) +{ + nft_unregister_expr(&nft_xfrm_type); +} + +module_init(nft_xfrm_module_init); +module_exit(nft_xfrm_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("nf_tables: xfrm/IPSec matching"); +MODULE_AUTHOR("Florian Westphal <fw@strlen.de>"); +MODULE_AUTHOR("Máté Eckl <ecklm94@gmail.com>"); +MODULE_ALIAS_NFT_EXPR("xfrm"); diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 89457efd2e00..2c7a4b80206f 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -159,7 +159,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par, /* Make sure the timeout policy matches any existing protocol tracker, * otherwise default to generic. */ - l4proto = __nf_ct_l4proto_find(par->family, proto); + l4proto = __nf_ct_l4proto_find(proto); if (timeout->l4proto->l4proto != l4proto->l4proto) { ret = -EINVAL; pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n", diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c index 5ee859193783..c6acfc2d9c84 100644 --- a/net/netfilter/xt_IDLETIMER.c +++ b/net/netfilter/xt_IDLETIMER.c @@ -68,8 +68,6 @@ struct idletimer_tg *__idletimer_tg_find_by_label(const char *label) { struct idletimer_tg *entry; - BUG_ON(!label); - list_for_each_entry(entry, &idletimer_tg_list, entry) { if (!strcmp(label, entry->attr.attr.name)) return entry; @@ -172,8 +170,6 @@ static unsigned int idletimer_tg_target(struct sk_buff *skb, pr_debug("resetting timer %s, timeout period %u\n", info->label, info->timeout); - BUG_ON(!info->timer); - mod_timer(&info->timer->timer, msecs_to_jiffies(info->timeout * 1000) + jiffies); diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c index 4ad5fe27e08b..f16202d26c20 100644 --- a/net/netfilter/xt_SECMARK.c +++ b/net/netfilter/xt_SECMARK.c @@ -35,8 +35,6 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par) u32 secmark = 0; const struct xt_secmark_target_info *info = par->targinfo; - BUG_ON(info->mode != mode); - switch (mode) { case SECMARK_MODE_SEL: secmark = info->secid; diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c index 5d92e1781980..5cb1ecb29ea4 100644 --- a/net/netfilter/xt_cgroup.c +++ b/net/netfilter/xt_cgroup.c @@ -68,6 +68,38 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par) return 0; } +static int cgroup_mt_check_v2(const struct xt_mtchk_param *par) +{ + struct xt_cgroup_info_v2 *info = par->matchinfo; + struct cgroup *cgrp; + + if ((info->invert_path & ~1) || (info->invert_classid & ~1)) + return -EINVAL; + + if (!info->has_path && !info->has_classid) { + pr_info("xt_cgroup: no path or classid specified\n"); + return -EINVAL; + } + + if (info->has_path && info->has_classid) { + pr_info_ratelimited("path and classid specified\n"); + return -EINVAL; + } + + info->priv = NULL; + if (info->has_path) { + cgrp = cgroup_get_from_path(info->path); + if (IS_ERR(cgrp)) { + pr_info_ratelimited("invalid path, errno=%ld\n", + PTR_ERR(cgrp)); + return -EINVAL; + } + info->priv = cgrp; + } + + return 0; +} + static bool cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par) { @@ -99,6 +131,24 @@ static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) info->invert_classid; } +static bool cgroup_mt_v2(const struct sk_buff *skb, struct xt_action_param *par) +{ + const struct xt_cgroup_info_v2 *info = par->matchinfo; + struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data; + struct cgroup *ancestor = info->priv; + struct sock *sk = skb->sk; + + if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk))) + return false; + + if (ancestor) + return cgroup_is_descendant(sock_cgroup_ptr(skcd), ancestor) ^ + info->invert_path; + else + return (info->classid == sock_cgroup_classid(skcd)) ^ + info->invert_classid; +} + static void cgroup_mt_destroy_v1(const struct xt_mtdtor_param *par) { struct xt_cgroup_info_v1 *info = par->matchinfo; @@ -107,6 +157,14 @@ static void cgroup_mt_destroy_v1(const struct xt_mtdtor_param *par) cgroup_put(info->priv); } +static void cgroup_mt_destroy_v2(const struct xt_mtdtor_param *par) +{ + struct xt_cgroup_info_v2 *info = par->matchinfo; + + if (info->priv) + cgroup_put(info->priv); +} + static struct xt_match cgroup_mt_reg[] __read_mostly = { { .name = "cgroup", @@ -134,6 +192,20 @@ static struct xt_match cgroup_mt_reg[] __read_mostly = { (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN), }, + { + .name = "cgroup", + .revision = 2, + .family = NFPROTO_UNSPEC, + .checkentry = cgroup_mt_check_v2, + .match = cgroup_mt_v2, + .matchsize = sizeof(struct xt_cgroup_info_v2), + .usersize = offsetof(struct xt_cgroup_info_v2, priv), + .destroy = cgroup_mt_destroy_v2, + .me = THIS_MODULE, + .hooks = (1 << NF_INET_LOCAL_OUT) | + (1 << NF_INET_POST_ROUTING) | + (1 << NF_INET_LOCAL_IN), + }, }; static int __init cgroup_mt_init(void) diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c index 10d61a6eed71..fceae245eb03 100644 --- a/net/netfilter/xt_quota.c +++ b/net/netfilter/xt_quota.c @@ -11,11 +11,6 @@ #include <linux/netfilter/xt_quota.h> #include <linux/module.h> -struct xt_quota_priv { - spinlock_t lock; - uint64_t quota; -}; - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sam Johnston <samj@samj.net>"); MODULE_DESCRIPTION("Xtables: countdown quota match"); @@ -26,54 +21,48 @@ static bool quota_mt(const struct sk_buff *skb, struct xt_action_param *par) { struct xt_quota_info *q = (void *)par->matchinfo; - struct xt_quota_priv *priv = q->master; + u64 current_count = atomic64_read(&q->counter); bool ret = q->flags & XT_QUOTA_INVERT; - - spin_lock_bh(&priv->lock); - if (priv->quota >= skb->len) { - priv->quota -= skb->len; - ret = !ret; - } else { - /* we do not allow even small packets from now on */ - priv->quota = 0; - } - spin_unlock_bh(&priv->lock); - - return ret; + u64 old_count, new_count; + + do { + if (current_count == 1) + return ret; + if (current_count <= skb->len) { + atomic64_set(&q->counter, 1); + return ret; + } + old_count = current_count; + new_count = current_count - skb->len; + current_count = atomic64_cmpxchg(&q->counter, old_count, + new_count); + } while (current_count != old_count); + return !ret; } static int quota_mt_check(const struct xt_mtchk_param *par) { struct xt_quota_info *q = par->matchinfo; + BUILD_BUG_ON(sizeof(atomic64_t) != sizeof(__u64)); + if (q->flags & ~XT_QUOTA_MASK) return -EINVAL; + if (atomic64_read(&q->counter) > q->quota + 1) + return -ERANGE; - q->master = kmalloc(sizeof(*q->master), GFP_KERNEL); - if (q->master == NULL) - return -ENOMEM; - - spin_lock_init(&q->master->lock); - q->master->quota = q->quota; + if (atomic64_read(&q->counter) == 0) + atomic64_set(&q->counter, q->quota + 1); return 0; } -static void quota_mt_destroy(const struct xt_mtdtor_param *par) -{ - const struct xt_quota_info *q = par->matchinfo; - - kfree(q->master); -} - static struct xt_match quota_mt_reg __read_mostly = { .name = "quota", .revision = 0, .family = NFPROTO_UNSPEC, .match = quota_mt, .checkentry = quota_mt_check, - .destroy = quota_mt_destroy, .matchsize = sizeof(struct xt_quota_info), - .usersize = offsetof(struct xt_quota_info, master), .me = THIS_MODULE, }; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 930d17fa906c..e613a9f89600 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -574,11 +574,6 @@ static int netlink_insert(struct sock *sk, u32 portid) if (nlk_sk(sk)->bound) goto err; - err = -ENOMEM; - if (BITS_PER_LONG > 32 && - unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX)) - goto err; - nlk_sk(sk)->portid = portid; sock_hold(sk); @@ -993,7 +988,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; int err = 0; - long unsigned int groups = nladdr->nl_groups; + unsigned long groups = nladdr->nl_groups; bool bound; if (addr_len < sizeof(struct sockaddr_nl)) @@ -1011,9 +1006,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, return err; } - if (nlk->ngroups == 0) - groups = 0; - else if (nlk->ngroups < 8*sizeof(groups)) + if (nlk->ngroups < BITS_PER_LONG) groups &= (1UL << nlk->ngroups) - 1; bound = nlk->bound; @@ -1713,6 +1706,13 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, nlk->flags &= ~NETLINK_F_EXT_ACK; err = 0; break; + case NETLINK_DUMP_STRICT_CHK: + if (val) + nlk->flags |= NETLINK_F_STRICT_CHK; + else + nlk->flags &= ~NETLINK_F_STRICT_CHK; + err = 0; + break; default: err = -ENOPROTOOPT; } @@ -1806,6 +1806,15 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname, return -EFAULT; err = 0; break; + case NETLINK_DUMP_STRICT_CHK: + if (len < sizeof(int)) + return -EINVAL; + len = sizeof(int); + val = nlk->flags & NETLINK_F_STRICT_CHK ? 1 : 0; + if (put_user(len, optlen) || put_user(val, optval)) + return -EFAULT; + err = 0; + break; default: err = -ENOPROTOOPT; } @@ -2178,6 +2187,7 @@ EXPORT_SYMBOL(__nlmsg_put); static int netlink_dump(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); + struct netlink_ext_ack extack = {}; struct netlink_callback *cb; struct sk_buff *skb = NULL; struct nlmsghdr *nlh; @@ -2229,8 +2239,11 @@ static int netlink_dump(struct sock *sk) skb_reserve(skb, skb_tailroom(skb) - alloc_size); netlink_skb_set_owner_r(skb, sk); - if (nlk->dump_done_errno > 0) + if (nlk->dump_done_errno > 0) { + cb->extack = &extack; nlk->dump_done_errno = cb->dump(skb, cb); + cb->extack = NULL; + } if (nlk->dump_done_errno > 0 || skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) { @@ -2253,6 +2266,12 @@ static int netlink_dump(struct sock *sk) memcpy(nlmsg_data(nlh), &nlk->dump_done_errno, sizeof(nlk->dump_done_errno)); + if (extack._msg && nlk->flags & NETLINK_F_EXT_ACK) { + nlh->nlmsg_flags |= NLM_F_ACK_TLVS; + if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack._msg)) + nlmsg_end(skb, nlh); + } + if (sk_filter(sk, skb)) kfree_skb(skb); else @@ -2279,9 +2298,9 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, const struct nlmsghdr *nlh, struct netlink_dump_control *control) { + struct netlink_sock *nlk, *nlk2; struct netlink_callback *cb; struct sock *sk; - struct netlink_sock *nlk; int ret; refcount_inc(&skb->users); @@ -2315,6 +2334,9 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, cb->min_dump_alloc = control->min_dump_alloc; cb->skb = skb; + nlk2 = nlk_sk(NETLINK_CB(skb).sk); + cb->strict_check = !!(nlk2->flags & NETLINK_F_STRICT_CHK); + if (control->start) { ret = control->start(cb); if (ret) diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index 962de7b3c023..5f454c8de6a4 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -15,6 +15,7 @@ #define NETLINK_F_LISTEN_ALL_NSID 0x10 #define NETLINK_F_CAP_ACK 0x20 #define NETLINK_F_EXT_ACK 0x40 +#define NETLINK_F_STRICT_CHK 0x80 #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) #define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long)) diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c index a66f102c6c01..4503937915ad 100644 --- a/net/nfc/nci/uart.c +++ b/net/nfc/nci/uart.c @@ -192,10 +192,8 @@ static void nci_uart_tty_close(struct tty_struct *tty) if (!nu) return; - if (nu->tx_skb) - kfree_skb(nu->tx_skb); - if (nu->rx_skb) - kfree_skb(nu->rx_skb); + kfree_skb(nu->tx_skb); + kfree_skb(nu->rx_skb); skb_queue_purge(&nu->tx_q); diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 35ae64cbef33..6bec37ab4472 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -933,6 +933,11 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key, struct nf_conn *ct; if (!cached) { + struct nf_hook_state state = { + .hook = NF_INET_PRE_ROUTING, + .pf = info->family, + .net = net, + }; struct nf_conn *tmpl = info->ct; int err; @@ -944,8 +949,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key, nf_ct_set(skb, tmpl, IP_CT_NEW); } - err = nf_conntrack_in(net, info->family, - NF_INET_PRE_ROUTING, skb); + err = nf_conntrack_in(skb, &state); if (err != NF_ACCEPT) return -ENOENT; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 0f5ce77460d4..6679e96ab1dc 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1182,14 +1182,14 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) ovs_header->dp_ifindex, reply, info->snd_portid, info->snd_seq, 0, - OVS_FLOW_CMD_NEW, + OVS_FLOW_CMD_SET, ufid_flags); BUG_ON(error < 0); } } else { /* Could not alloc without acts before locking. */ reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, - info, OVS_FLOW_CMD_NEW, false, + info, OVS_FLOW_CMD_SET, false, ufid_flags); if (IS_ERR(reply)) { @@ -1265,7 +1265,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) } reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info, - OVS_FLOW_CMD_NEW, true, ufid_flags); + OVS_FLOW_CMD_GET, true, ufid_flags); if (IS_ERR(reply)) { err = PTR_ERR(reply); goto unlock; @@ -1389,7 +1389,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, - OVS_FLOW_CMD_NEW, ufid_flags) < 0) + OVS_FLOW_CMD_GET, ufid_flags) < 0) break; cb->args[0] = bucket; @@ -1730,7 +1730,7 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) ovs_dp_change(dp, info->attrs); err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, - info->snd_seq, 0, OVS_DP_CMD_NEW); + info->snd_seq, 0, OVS_DP_CMD_SET); BUG_ON(err < 0); ovs_unlock(); @@ -1761,7 +1761,7 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) goto err_unlock_free; } err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, - info->snd_seq, 0, OVS_DP_CMD_NEW); + info->snd_seq, 0, OVS_DP_CMD_GET); BUG_ON(err < 0); ovs_unlock(); @@ -1785,7 +1785,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) if (i >= skip && ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, - OVS_DP_CMD_NEW) < 0) + OVS_DP_CMD_GET) < 0) break; i++; } @@ -2101,7 +2101,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), info->snd_portid, info->snd_seq, 0, - OVS_VPORT_CMD_NEW); + OVS_VPORT_CMD_SET); BUG_ON(err < 0); ovs_unlock(); @@ -2182,7 +2182,7 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info) goto exit_unlock_free; err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), info->snd_portid, info->snd_seq, 0, - OVS_VPORT_CMD_NEW); + OVS_VPORT_CMD_GET); BUG_ON(err < 0); rcu_read_unlock(); @@ -2218,7 +2218,7 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, - OVS_VPORT_CMD_NEW) < 0) + OVS_VPORT_CMD_GET) < 0) goto out; j++; diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 56b8e7167790..35966da84769 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -254,21 +254,18 @@ static bool icmphdr_ok(struct sk_buff *skb) static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) { + unsigned short frag_off; + unsigned int payload_ofs = 0; unsigned int nh_ofs = skb_network_offset(skb); unsigned int nh_len; - int payload_ofs; struct ipv6hdr *nh; - uint8_t nexthdr; - __be16 frag_off; - int err; + int err, nexthdr, flags = 0; err = check_header(skb, nh_ofs + sizeof(*nh)); if (unlikely(err)) return err; nh = ipv6_hdr(skb); - nexthdr = nh->nexthdr; - payload_ofs = (u8 *)(nh + 1) - skb->data; key->ip.proto = NEXTHDR_NONE; key->ip.tos = ipv6_get_dsfield(nh); @@ -277,10 +274,9 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) key->ipv6.addr.src = nh->saddr; key->ipv6.addr.dst = nh->daddr; - payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off); - - if (frag_off) { - if (frag_off & htons(~0x7)) + nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags); + if (flags & IP6_FH_F_FRAG) { + if (frag_off) key->ip.frag = OVS_FRAG_TYPE_LATER; else key->ip.frag = OVS_FRAG_TYPE_FIRST; @@ -288,11 +284,11 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) key->ip.frag = OVS_FRAG_TYPE_NONE; } - /* Delayed handling of error in ipv6_skip_exthdr() as it - * always sets frag_off to a valid value which may be + /* Delayed handling of error in ipv6_find_hdr() as it + * always sets flags and frag_off to a valid value which may be * used to set key->ip.frag above. */ - if (unlikely(payload_ofs < 0)) + if (unlikely(nexthdr < 0)) return -EPROTO; nh_len = payload_ofs - nh_ofs; diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index bb95c43aae76..26f71cbf7527 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -43,7 +43,8 @@ static struct internal_dev *internal_dev_priv(struct net_device *netdev) } /* Called with rcu_read_lock_bh. */ -static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t +internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev) { int len, err; @@ -62,7 +63,7 @@ static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev) } else { netdev->stats.tx_errors++; } - return 0; + return NETDEV_TX_OK; } static int internal_dev_open(struct net_device *netdev) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index d6e94dc7e290..ec3095f13aae 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3808,6 +3808,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv return fanout_set_data(po, optval, optlen); } + case PACKET_IGNORE_OUTGOING: + { + int val; + + if (optlen != sizeof(val)) + return -EINVAL; + if (copy_from_user(&val, optval, sizeof(val))) + return -EFAULT; + if (val < 0 || val > 1) + return -EINVAL; + + po->prot_hook.ignore_outgoing = !!val; + return 0; + } case PACKET_TX_HAS_OFF: { unsigned int val; @@ -3931,6 +3945,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, ((u32)po->fanout->flags << 24)) : 0); break; + case PACKET_IGNORE_OUTGOING: + val = po->prot_hook.ignore_outgoing; + break; case PACKET_ROLLOVER_STATS: if (!po->rollover) return -EINVAL; diff --git a/net/rds/rds.h b/net/rds/rds.h index c4dcf654d8fe..6bfaf05b63b2 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -278,7 +278,7 @@ struct rds_incoming { struct in6_addr i_saddr; rds_rdma_cookie_t i_rdma_cookie; - struct timeval i_rx_tstamp; + ktime_t i_rx_tstamp; u64 i_rx_lat_trace[RDS_RX_MAX_TRACES]; }; diff --git a/net/rds/recv.c b/net/rds/recv.c index 504cd6bcc54c..727639dac8a7 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -43,18 +43,14 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, struct in6_addr *saddr) { - int i; - refcount_set(&inc->i_refcount, 1); INIT_LIST_HEAD(&inc->i_item); inc->i_conn = conn; inc->i_saddr = *saddr; inc->i_rdma_cookie = 0; - inc->i_rx_tstamp.tv_sec = 0; - inc->i_rx_tstamp.tv_usec = 0; + inc->i_rx_tstamp = ktime_set(0, 0); - for (i = 0; i < RDS_RX_MAX_TRACES; i++) - inc->i_rx_lat_trace[i] = 0; + memset(inc->i_rx_lat_trace, 0, sizeof(inc->i_rx_lat_trace)); } EXPORT_SYMBOL_GPL(rds_inc_init); @@ -67,8 +63,7 @@ void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp, inc->i_conn_path = cp; inc->i_saddr = *saddr; inc->i_rdma_cookie = 0; - inc->i_rx_tstamp.tv_sec = 0; - inc->i_rx_tstamp.tv_usec = 0; + inc->i_rx_tstamp = ktime_set(0, 0); } EXPORT_SYMBOL_GPL(rds_inc_path_init); @@ -385,7 +380,7 @@ void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr, be32_to_cpu(inc->i_hdr.h_len), inc->i_hdr.h_dport); if (sock_flag(sk, SOCK_RCVTSTAMP)) - do_gettimeofday(&inc->i_rx_tstamp); + inc->i_rx_tstamp = ktime_get_real(); rds_inc_addref(inc); inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock(); list_add_tail(&inc->i_item, &rs->rs_recv_queue); @@ -552,11 +547,11 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg, goto out; } - if ((inc->i_rx_tstamp.tv_sec != 0) && + if ((inc->i_rx_tstamp != 0) && sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) { + struct timeval tv = ktime_to_timeval(inc->i_rx_tstamp); ret = put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, - sizeof(struct timeval), - &inc->i_rx_tstamp); + sizeof(tv), &tv); if (ret) goto out; } diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 1355f5ca8d22..abca57040f37 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -510,8 +510,8 @@ void rfkill_remove_epo_lock(void) /** * rfkill_is_epo_lock_active - returns true EPO is active * - * Returns 0 (false) if there is NOT an active EPO contidion, - * and 1 (true) if there is an active EPO contition, which + * Returns 0 (false) if there is NOT an active EPO condition, + * and 1 (true) if there is an active EPO condition, which * locks all radios in one of the BLOCKED states. * * Can be called in atomic context. diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index ac44d8afffb1..013dbcb052e5 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -97,7 +97,8 @@ static int rxrpc_validate_address(struct rxrpc_sock *rx, srx->transport_len > len) return -EINVAL; - if (srx->transport.family != rx->family) + if (srx->transport.family != rx->family && + srx->transport.family == AF_INET && rx->family != AF_INET6) return -EAFNOSUPPORT; switch (srx->transport.family) { @@ -385,6 +386,20 @@ u32 rxrpc_kernel_check_life(struct socket *sock, struct rxrpc_call *call) EXPORT_SYMBOL(rxrpc_kernel_check_life); /** + * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call. + * @sock: The socket the call is on + * @call: The call to query + * + * Allow a kernel service to retrieve the epoch value from a service call to + * see if the client at the other end rebooted. + */ +u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call) +{ + return call->conn->proto.epoch; +} +EXPORT_SYMBOL(rxrpc_kernel_get_epoch); + +/** * rxrpc_kernel_check_call - Check a call's state * @sock: The socket the call is on * @call: The call to check diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index a6e6cae82c30..8cee7644965c 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -1098,7 +1098,6 @@ void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace); -void rxrpc_lose_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_purge_queue(struct sk_buff_head *); /* @@ -1115,8 +1114,7 @@ static inline void rxrpc_sysctl_exit(void) {} /* * utils.c */ -int rxrpc_extract_addr_from_skb(struct rxrpc_local *, struct sockaddr_rxrpc *, - struct sk_buff *); +int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *); static inline bool before(u32 seq1, u32 seq2) { diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 652e314de38e..e0d8ca03169a 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -280,7 +280,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, peer = NULL; if (!peer) { peer = b->peer_backlog[peer_tail]; - if (rxrpc_extract_addr_from_skb(local, &peer->srx, skb) < 0) + if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0) return NULL; b->peer_backlog[peer_tail] = NULL; smp_store_release(&b->peer_backlog_tail, diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 885dae829f4a..c332722820c2 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -86,11 +86,12 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local, _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK); - if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0) + if (rxrpc_extract_addr_from_skb(&srx, skb) < 0) goto not_found; - /* We may have to handle mixing IPv4 and IPv6 */ - if (srx.transport.family != local->srx.transport.family) { + if (srx.transport.family != local->srx.transport.family && + (srx.transport.family == AF_INET && + local->srx.transport.family != AF_INET6)) { pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n", srx.transport.family, local->srx.transport.family); diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 570b49d2da42..9128aa0e40aa 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -262,7 +262,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, while (list) { skb = list; list = skb->next; - skb->next = NULL; + skb_mark_not_on_list(skb); rxrpc_free_skb(skb, rxrpc_skb_tx_freed); } diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c index 13bd8a4dfac7..927ead43df42 100644 --- a/net/rxrpc/local_event.c +++ b/net/rxrpc/local_event.c @@ -39,7 +39,7 @@ static void rxrpc_send_version_request(struct rxrpc_local *local, _enter(""); - if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0) + if (rxrpc_extract_addr_from_skb(&srx, skb) < 0) return; msg.msg_name = &srx.transport; diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index e8fb8922bca8..0f0b499d1202 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -378,11 +378,13 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, if ((lose++ & 7) == 7) { ret = 0; lost = true; - goto done; } } - _proto("Tx DATA %%%u { #%u }", serial, sp->hdr.seq); + trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, + retrans, lost); + if (lost) + goto done; /* send the packet with the don't fragment bit set if we currently * think it's small enough */ @@ -415,8 +417,6 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, goto send_fragmentable; done: - trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, - retrans, lost); if (ret >= 0) { if (whdr.flags & RXRPC_REQUEST_ACK) { call->peer->rtt_last_req = skb->tstamp; @@ -561,7 +561,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) continue; } - if (rxrpc_extract_addr_from_skb(local, &srx, skb) == 0) { + if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) { msg.msg_namelen = srx.transport_len; whdr.epoch = htonl(sp->hdr.epoch); diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index 05b51bdbdd41..7feb611c7258 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -47,6 +47,8 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, */ switch (srx->transport.family) { case AF_INET: + srx->transport_len = sizeof(srx->transport.sin); + srx->transport.family = AF_INET; srx->transport.sin.sin_port = serr->port; switch (serr->ee.ee_origin) { case SO_EE_ORIGIN_ICMP: @@ -70,20 +72,20 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, #ifdef CONFIG_AF_RXRPC_IPV6 case AF_INET6: - srx->transport.sin6.sin6_port = serr->port; switch (serr->ee.ee_origin) { case SO_EE_ORIGIN_ICMP6: _net("Rx ICMP6"); + srx->transport.sin6.sin6_port = serr->port; memcpy(&srx->transport.sin6.sin6_addr, skb_network_header(skb) + serr->addr_offset, sizeof(struct in6_addr)); break; case SO_EE_ORIGIN_ICMP: _net("Rx ICMP on v6 sock"); - srx->transport.sin6.sin6_addr.s6_addr32[0] = 0; - srx->transport.sin6.sin6_addr.s6_addr32[1] = 0; - srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); - memcpy(srx->transport.sin6.sin6_addr.s6_addr + 12, + srx->transport_len = sizeof(srx->transport.sin); + srx->transport.family = AF_INET; + srx->transport.sin.sin_port = serr->port; + memcpy(&srx->transport.sin.sin_addr, skb_network_header(skb) + serr->addr_offset, sizeof(struct in_addr)); break; diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 816b19a78809..eaf19ebaa964 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -715,3 +715,46 @@ call_complete: goto out; } EXPORT_SYMBOL(rxrpc_kernel_recv_data); + +/** + * rxrpc_kernel_get_reply_time - Get timestamp on first reply packet + * @sock: The socket that the call exists on + * @call: The call to query + * @_ts: Where to put the timestamp + * + * Retrieve the timestamp from the first DATA packet of the reply if it is + * in the ring. Returns true if successful, false if not. + */ +bool rxrpc_kernel_get_reply_time(struct socket *sock, struct rxrpc_call *call, + ktime_t *_ts) +{ + struct sk_buff *skb; + rxrpc_seq_t hard_ack, top, seq; + bool success = false; + + mutex_lock(&call->user_mutex); + + if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_RECV_REPLY) + goto out; + + hard_ack = call->rx_hard_ack; + if (hard_ack != 0) + goto out; + + seq = hard_ack + 1; + top = smp_load_acquire(&call->rx_top); + if (after(seq, top)) + goto out; + + skb = call->rxtx_buffer[seq & RXRPC_RXTX_BUFF_MASK]; + if (!skb) + goto out; + + *_ts = skb_get_ktime(skb); + success = true; + +out: + mutex_unlock(&call->user_mutex); + return success; +} +EXPORT_SYMBOL(rxrpc_kernel_get_reply_time); diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c index b8985d01876a..913dca65cc65 100644 --- a/net/rxrpc/skbuff.c +++ b/net/rxrpc/skbuff.c @@ -69,21 +69,6 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) } /* - * Note the injected loss of a socket buffer. - */ -void rxrpc_lose_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) -{ - const void *here = __builtin_return_address(0); - if (skb) { - int n; - CHECK_SLAB_OKAY(&skb->users); - n = atomic_dec_return(select_skb_count(op)); - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); - kfree_skb(skb); - } -} - -/* * Clear a queue of socket buffers. */ void rxrpc_purge_queue(struct sk_buff_head *list) diff --git a/net/rxrpc/utils.c b/net/rxrpc/utils.c index e801171fa351..ff7af71c4b49 100644 --- a/net/rxrpc/utils.c +++ b/net/rxrpc/utils.c @@ -17,28 +17,17 @@ /* * Fill out a peer address from a socket buffer containing a packet. */ -int rxrpc_extract_addr_from_skb(struct rxrpc_local *local, - struct sockaddr_rxrpc *srx, - struct sk_buff *skb) +int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *srx, struct sk_buff *skb) { memset(srx, 0, sizeof(*srx)); switch (ntohs(skb->protocol)) { case ETH_P_IP: - if (local->srx.transport.family == AF_INET6) { - srx->transport_type = SOCK_DGRAM; - srx->transport_len = sizeof(srx->transport.sin6); - srx->transport.sin6.sin6_family = AF_INET6; - srx->transport.sin6.sin6_port = udp_hdr(skb)->source; - srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); - srx->transport.sin6.sin6_addr.s6_addr32[3] = ip_hdr(skb)->saddr; - } else { - srx->transport_type = SOCK_DGRAM; - srx->transport_len = sizeof(srx->transport.sin); - srx->transport.sin.sin_family = AF_INET; - srx->transport.sin.sin_port = udp_hdr(skb)->source; - srx->transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; - } + srx->transport_type = SOCK_DGRAM; + srx->transport_len = sizeof(srx->transport.sin); + srx->transport.sin.sin_family = AF_INET; + srx->transport.sin.sin_port = udp_hdr(skb)->source; + srx->transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; return 0; #ifdef CONFIG_AF_RXRPC_IPV6 diff --git a/net/sched/Kconfig b/net/sched/Kconfig index e95741388311..1b9afdee5ba9 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -194,6 +194,17 @@ config NET_SCH_ETF To compile this code as a module, choose M here: the module will be called sch_etf. +config NET_SCH_TAPRIO + tristate "Time Aware Priority (taprio) Scheduler" + help + Say Y here if you want to use the Time Aware Priority (taprio) packet + scheduling algorithm. + + See the top of <file:net/sched/sch_taprio.c> for more details. + + To compile this code as a module, choose M here: the + module will be called sch_taprio. + config NET_SCH_GRED tristate "Generic Random Early Detection (GRED)" ---help--- diff --git a/net/sched/Makefile b/net/sched/Makefile index f0403f49edcb..8a40431d7b5c 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -57,6 +57,7 @@ obj-$(CONFIG_NET_SCH_HHF) += sch_hhf.o obj-$(CONFIG_NET_SCH_PIE) += sch_pie.o obj-$(CONFIG_NET_SCH_CBS) += sch_cbs.o obj-$(CONFIG_NET_SCH_ETF) += sch_etf.o +obj-$(CONFIG_NET_SCH_TAPRIO) += sch_taprio.o obj-$(CONFIG_NET_CLS_U32) += cls_u32.o obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o diff --git a/net/sched/act_api.c b/net/sched/act_api.c index e12f8ef7baa4..9c1b0729aebf 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -81,6 +81,7 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie, static void free_tcf(struct tc_action *p) { free_percpu(p->cpu_bstats); + free_percpu(p->cpu_bstats_hw); free_percpu(p->cpu_qstats); tcf_set_action_cookie(&p->act_cookie, NULL); @@ -103,11 +104,11 @@ static int __tcf_action_put(struct tc_action *p, bool bind) { struct tcf_idrinfo *idrinfo = p->idrinfo; - if (refcount_dec_and_lock(&p->tcfa_refcnt, &idrinfo->lock)) { + if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) { if (bind) atomic_dec(&p->tcfa_bindcnt); idr_remove(&idrinfo->action_idr, p->tcfa_index); - spin_unlock(&idrinfo->lock); + mutex_unlock(&idrinfo->lock); tcf_action_cleanup(p); return 1; @@ -199,7 +200,7 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, struct tc_action *p; unsigned long id = 1; - spin_lock(&idrinfo->lock); + mutex_lock(&idrinfo->lock); s_i = cb->args[0]; @@ -234,7 +235,7 @@ done: if (index >= 0) cb->args[0] = index + 1; - spin_unlock(&idrinfo->lock); + mutex_unlock(&idrinfo->lock); if (n_i) { if (act_flags & TCA_FLAG_LARGE_DUMP_ON) cb->args[1] = n_i; @@ -246,6 +247,20 @@ nla_put_failure: goto done; } +static int tcf_idr_release_unsafe(struct tc_action *p) +{ + if (atomic_read(&p->tcfa_bindcnt) > 0) + return -EPERM; + + if (refcount_dec_and_test(&p->tcfa_refcnt)) { + idr_remove(&p->idrinfo->action_idr, p->tcfa_index); + tcf_action_cleanup(p); + return ACT_P_DELETED; + } + + return 0; +} + static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, const struct tc_action_ops *ops) { @@ -262,15 +277,19 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, if (nla_put_string(skb, TCA_KIND, ops->kind)) goto nla_put_failure; + mutex_lock(&idrinfo->lock); idr_for_each_entry_ul(idr, p, id) { - ret = __tcf_idr_release(p, false, true); + ret = tcf_idr_release_unsafe(p); if (ret == ACT_P_DELETED) { module_put(ops->owner); n_i++; } else if (ret < 0) { + mutex_unlock(&idrinfo->lock); goto nla_put_failure; } } + mutex_unlock(&idrinfo->lock); + if (nla_put_u32(skb, TCA_FCNT, n_i)) goto nla_put_failure; nla_nest_end(skb, nest); @@ -305,13 +324,13 @@ int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) struct tcf_idrinfo *idrinfo = tn->idrinfo; struct tc_action *p; - spin_lock(&idrinfo->lock); + mutex_lock(&idrinfo->lock); p = idr_find(&idrinfo->action_idr, index); if (IS_ERR(p)) p = NULL; else if (p) refcount_inc(&p->tcfa_refcnt); - spin_unlock(&idrinfo->lock); + mutex_unlock(&idrinfo->lock); if (p) { *a = p; @@ -326,10 +345,10 @@ static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index) struct tc_action *p; int ret = 0; - spin_lock(&idrinfo->lock); + mutex_lock(&idrinfo->lock); p = idr_find(&idrinfo->action_idr, index); if (!p) { - spin_unlock(&idrinfo->lock); + mutex_unlock(&idrinfo->lock); return -ENOENT; } @@ -339,7 +358,7 @@ static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index) WARN_ON(p != idr_remove(&idrinfo->action_idr, p->tcfa_index)); - spin_unlock(&idrinfo->lock); + mutex_unlock(&idrinfo->lock); tcf_action_cleanup(p); module_put(owner); @@ -350,7 +369,7 @@ static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index) ret = -EPERM; } - spin_unlock(&idrinfo->lock); + mutex_unlock(&idrinfo->lock); return ret; } @@ -372,9 +391,12 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); if (!p->cpu_bstats) goto err1; + p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); + if (!p->cpu_bstats_hw) + goto err2; p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); if (!p->cpu_qstats) - goto err2; + goto err3; } spin_lock_init(&p->tcfa_lock); p->tcfa_index = index; @@ -386,15 +408,17 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, &p->tcfa_rate_est, &p->tcfa_lock, NULL, est); if (err) - goto err3; + goto err4; } p->idrinfo = idrinfo; p->ops = ops; *a = p; return 0; -err3: +err4: free_percpu(p->cpu_qstats); +err3: + free_percpu(p->cpu_bstats_hw); err2: free_percpu(p->cpu_bstats); err1: @@ -407,10 +431,10 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a) { struct tcf_idrinfo *idrinfo = tn->idrinfo; - spin_lock(&idrinfo->lock); + mutex_lock(&idrinfo->lock); /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */ WARN_ON(!IS_ERR(idr_replace(&idrinfo->action_idr, a, a->tcfa_index))); - spin_unlock(&idrinfo->lock); + mutex_unlock(&idrinfo->lock); } EXPORT_SYMBOL(tcf_idr_insert); @@ -420,10 +444,10 @@ void tcf_idr_cleanup(struct tc_action_net *tn, u32 index) { struct tcf_idrinfo *idrinfo = tn->idrinfo; - spin_lock(&idrinfo->lock); + mutex_lock(&idrinfo->lock); /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */ WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index))); - spin_unlock(&idrinfo->lock); + mutex_unlock(&idrinfo->lock); } EXPORT_SYMBOL(tcf_idr_cleanup); @@ -441,14 +465,14 @@ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, int ret; again: - spin_lock(&idrinfo->lock); + mutex_lock(&idrinfo->lock); if (*index) { p = idr_find(&idrinfo->action_idr, *index); if (IS_ERR(p)) { /* This means that another process allocated * index but did not assign the pointer yet. */ - spin_unlock(&idrinfo->lock); + mutex_unlock(&idrinfo->lock); goto again; } @@ -461,7 +485,7 @@ again: } else { *a = NULL; ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, - *index, GFP_ATOMIC); + *index, GFP_KERNEL); if (!ret) idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY), *index); @@ -470,12 +494,12 @@ again: *index = 1; *a = NULL; ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, - UINT_MAX, GFP_ATOMIC); + UINT_MAX, GFP_KERNEL); if (!ret) idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY), *index); } - spin_unlock(&idrinfo->lock); + mutex_unlock(&idrinfo->lock); return ret; } EXPORT_SYMBOL(tcf_idr_check_alloc); @@ -979,6 +1003,8 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, goto errout; if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 || + gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw, + &p->tcfa_bstats_hw) < 0 || gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 || gnet_stats_copy_queue(&d, p->cpu_qstats, &p->tcfa_qstats, @@ -1073,12 +1099,14 @@ static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla, err = -EINVAL; ops = tc_lookup_action(tb[TCA_ACT_KIND]); if (!ops) { /* could happen in batch of actions */ - NL_SET_ERR_MSG(extack, "Specified TC action not found"); + NL_SET_ERR_MSG(extack, "Specified TC action kind not found"); goto err_out; } err = -ENOENT; - if (ops->lookup(net, &a, index, extack) == 0) + if (ops->lookup(net, &a, index) == 0) { + NL_SET_ERR_MSG(extack, "TC action with specified index not found"); goto err_mod; + } module_put(ops->owner); return a; @@ -1424,7 +1452,7 @@ static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) u32 act_count = 0; ret = nlmsg_parse(cb->nlh, sizeof(struct tcamsg), tb, TCA_ROOT_MAX, - tcaa_policy, NULL); + tcaa_policy, cb->extack); if (ret < 0) return ret; diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 0c68bc9cf0b4..c7633843e223 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -387,8 +387,7 @@ static int tcf_bpf_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, bpf_net_id); diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 6f0f273f1139..8475913f2070 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -143,8 +143,10 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, return -EEXIST; } /* replacing action and zone */ + spin_lock_bh(&ci->tcf_lock); ci->tcf_action = parm->action; ci->zone = parm->zone; + spin_unlock_bh(&ci->tcf_lock); ret = 0; } @@ -156,16 +158,16 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a, { unsigned char *b = skb_tail_pointer(skb); struct tcf_connmark_info *ci = to_connmark(a); - struct tc_connmark opt = { .index = ci->tcf_index, .refcnt = refcount_read(&ci->tcf_refcnt) - ref, .bindcnt = atomic_read(&ci->tcf_bindcnt) - bind, - .action = ci->tcf_action, - .zone = ci->zone, }; struct tcf_t t; + spin_lock_bh(&ci->tcf_lock); + opt.action = ci->tcf_action; + opt.zone = ci->zone; if (nla_put(skb, TCA_CONNMARK_PARMS, sizeof(opt), &opt)) goto nla_put_failure; @@ -173,9 +175,12 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a, if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t, TCA_CONNMARK_PAD)) goto nla_put_failure; + spin_unlock_bh(&ci->tcf_lock); return skb->len; + nla_put_failure: + spin_unlock_bh(&ci->tcf_lock); nlmsg_trim(skb, b); return -1; } @@ -190,8 +195,7 @@ static int tcf_connmark_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, connmark_net_id); diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index b8a67ae3105a..3dc25b7806d7 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -646,8 +646,7 @@ static int tcf_csum_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, csum_net_id); diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index cd1d9bd32ef9..c89a7fa43d1b 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -157,7 +157,7 @@ static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a, } static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse) + u64 lastuse, bool hw) { struct tcf_gact *gact = to_gact(a); int action = READ_ONCE(gact->tcf_action); @@ -168,6 +168,10 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets, if (action == TC_ACT_SHOT) this_cpu_ptr(gact->common.cpu_qstats)->drops += packets; + if (hw) + _bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats_hw), + bytes, packets); + tm->lastuse = max_t(u64, tm->lastuse, lastuse); } @@ -222,8 +226,7 @@ static int tcf_gact_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tcf_gact_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_gact_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, gact_net_id); diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 06a3d4801878..30b63fa23ee2 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -855,8 +855,7 @@ static int tcf_ife_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, ife_net_id); diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 8525de811616..8af6c11d2482 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -329,8 +329,7 @@ static int tcf_ipt_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, ipt_net_id); @@ -379,8 +378,7 @@ static int tcf_xt_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, xt_net_id); diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 8bf66d0a6800..1dae5f2b358f 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -283,12 +283,15 @@ out: } static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse) + u64 lastuse, bool hw) { struct tcf_mirred *m = to_mirred(a); struct tcf_t *tm = &m->tcf_tm; _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); + if (hw) + _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw), + bytes, packets); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } @@ -338,8 +341,7 @@ static int tcf_mirred_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, mirred_net_id); diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 4313aa102440..c5c1e23add77 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -256,28 +256,31 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, unsigned char *b = skb_tail_pointer(skb); struct tcf_nat *p = to_tcf_nat(a); struct tc_nat opt = { - .old_addr = p->old_addr, - .new_addr = p->new_addr, - .mask = p->mask, - .flags = p->flags, - .index = p->tcf_index, - .action = p->tcf_action, .refcnt = refcount_read(&p->tcf_refcnt) - ref, .bindcnt = atomic_read(&p->tcf_bindcnt) - bind, }; struct tcf_t t; + spin_lock_bh(&p->tcf_lock); + opt.old_addr = p->old_addr; + opt.new_addr = p->new_addr; + opt.mask = p->mask; + opt.flags = p->flags; + opt.action = p->tcf_action; + if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt)) goto nla_put_failure; tcf_tm_dump(&t, &p->tcf_tm); if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD)) goto nla_put_failure; + spin_unlock_bh(&p->tcf_lock); return skb->len; nla_put_failure: + spin_unlock_bh(&p->tcf_lock); nlmsg_trim(skb, b); return -1; } @@ -292,8 +295,7 @@ static int tcf_nat_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, nat_net_id); diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index ad99a99f11f6..da3dd0f68cc2 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -460,8 +460,7 @@ static int tcf_pedit_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, pedit_net_id); diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 5d8bfa878477..92649d2667ed 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -22,8 +22,7 @@ #include <net/act_api.h> #include <net/netlink.h> -struct tcf_police { - struct tc_action common; +struct tcf_police_params { int tcfp_result; u32 tcfp_ewma_rate; s64 tcfp_burst; @@ -36,6 +35,12 @@ struct tcf_police { bool rate_present; struct psched_ratecfg peak; bool peak_present; + struct rcu_head rcu; +}; + +struct tcf_police { + struct tc_action common; + struct tcf_police_params __rcu *params; }; #define to_police(pc) ((struct tcf_police *)pc) @@ -84,6 +89,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, struct tcf_police *police; struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; struct tc_action_net *tn = net_generic(net, police_net_id); + struct tcf_police_params *new; bool exists = false; int size; @@ -110,7 +116,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, parm->index, NULL, a, - &act_police_ops, bind, false); + &act_police_ops, bind, true); if (ret) { tcf_idr_cleanup(tn, parm->index); return ret; @@ -137,7 +143,8 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, } if (est) { - err = gen_replace_estimator(&police->tcf_bstats, NULL, + err = gen_replace_estimator(&police->tcf_bstats, + police->common.cpu_bstats, &police->tcf_rate_est, &police->tcf_lock, NULL, est); @@ -150,50 +157,60 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, goto failure; } - spin_lock_bh(&police->tcf_lock); + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (unlikely(!new)) { + err = -ENOMEM; + goto failure; + } + /* No failure allowed after this point */ - police->tcfp_mtu = parm->mtu; - if (police->tcfp_mtu == 0) { - police->tcfp_mtu = ~0; + new->tcfp_mtu = parm->mtu; + if (!new->tcfp_mtu) { + new->tcfp_mtu = ~0; if (R_tab) - police->tcfp_mtu = 255 << R_tab->rate.cell_log; + new->tcfp_mtu = 255 << R_tab->rate.cell_log; } if (R_tab) { - police->rate_present = true; - psched_ratecfg_precompute(&police->rate, &R_tab->rate, 0); + new->rate_present = true; + psched_ratecfg_precompute(&new->rate, &R_tab->rate, 0); qdisc_put_rtab(R_tab); } else { - police->rate_present = false; + new->rate_present = false; } if (P_tab) { - police->peak_present = true; - psched_ratecfg_precompute(&police->peak, &P_tab->rate, 0); + new->peak_present = true; + psched_ratecfg_precompute(&new->peak, &P_tab->rate, 0); qdisc_put_rtab(P_tab); } else { - police->peak_present = false; + new->peak_present = false; } if (tb[TCA_POLICE_RESULT]) - police->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]); - police->tcfp_burst = PSCHED_TICKS2NS(parm->burst); - police->tcfp_toks = police->tcfp_burst; - if (police->peak_present) { - police->tcfp_mtu_ptoks = (s64) psched_l2t_ns(&police->peak, - police->tcfp_mtu); - police->tcfp_ptoks = police->tcfp_mtu_ptoks; + new->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]); + new->tcfp_burst = PSCHED_TICKS2NS(parm->burst); + new->tcfp_toks = new->tcfp_burst; + if (new->peak_present) { + new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak, + new->tcfp_mtu); + new->tcfp_ptoks = new->tcfp_mtu_ptoks; } - police->tcf_action = parm->action; if (tb[TCA_POLICE_AVRATE]) - police->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); + new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); + spin_lock_bh(&police->tcf_lock); + new->tcfp_t_c = ktime_get_ns(); + police->tcf_action = parm->action; + rcu_swap_protected(police->params, + new, + lockdep_is_held(&police->tcf_lock)); spin_unlock_bh(&police->tcf_lock); - if (ret != ACT_P_CREATED) - return ret; - police->tcfp_t_c = ktime_get_ns(); - tcf_idr_insert(tn, *a); + if (new) + kfree_rcu(new, rcu); + if (ret == ACT_P_CREATED) + tcf_idr_insert(tn, *a); return ret; failure: @@ -207,64 +224,69 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_police *police = to_police(a); - s64 now; - s64 toks; - s64 ptoks = 0; - - spin_lock(&police->tcf_lock); + struct tcf_police_params *p; + s64 now, toks, ptoks = 0; + int ret; - bstats_update(&police->tcf_bstats, skb); tcf_lastuse_update(&police->tcf_tm); + bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb); - if (police->tcfp_ewma_rate) { + ret = READ_ONCE(police->tcf_action); + p = rcu_dereference_bh(police->params); + + if (p->tcfp_ewma_rate) { struct gnet_stats_rate_est64 sample; if (!gen_estimator_read(&police->tcf_rate_est, &sample) || - sample.bps >= police->tcfp_ewma_rate) { - police->tcf_qstats.overlimits++; - if (police->tcf_action == TC_ACT_SHOT) - police->tcf_qstats.drops++; - spin_unlock(&police->tcf_lock); - return police->tcf_action; - } + sample.bps >= p->tcfp_ewma_rate) + goto inc_overlimits; } - if (qdisc_pkt_len(skb) <= police->tcfp_mtu) { - if (!police->rate_present) { - spin_unlock(&police->tcf_lock); - return police->tcfp_result; + if (qdisc_pkt_len(skb) <= p->tcfp_mtu) { + if (!p->rate_present) { + ret = p->tcfp_result; + goto end; } now = ktime_get_ns(); - toks = min_t(s64, now - police->tcfp_t_c, - police->tcfp_burst); - if (police->peak_present) { - ptoks = toks + police->tcfp_ptoks; - if (ptoks > police->tcfp_mtu_ptoks) - ptoks = police->tcfp_mtu_ptoks; - ptoks -= (s64) psched_l2t_ns(&police->peak, - qdisc_pkt_len(skb)); + toks = min_t(s64, now - p->tcfp_t_c, p->tcfp_burst); + if (p->peak_present) { + ptoks = toks + p->tcfp_ptoks; + if (ptoks > p->tcfp_mtu_ptoks) + ptoks = p->tcfp_mtu_ptoks; + ptoks -= (s64)psched_l2t_ns(&p->peak, + qdisc_pkt_len(skb)); } - toks += police->tcfp_toks; - if (toks > police->tcfp_burst) - toks = police->tcfp_burst; - toks -= (s64) psched_l2t_ns(&police->rate, qdisc_pkt_len(skb)); + toks += p->tcfp_toks; + if (toks > p->tcfp_burst) + toks = p->tcfp_burst; + toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb)); if ((toks|ptoks) >= 0) { - police->tcfp_t_c = now; - police->tcfp_toks = toks; - police->tcfp_ptoks = ptoks; - if (police->tcfp_result == TC_ACT_SHOT) - police->tcf_qstats.drops++; - spin_unlock(&police->tcf_lock); - return police->tcfp_result; + p->tcfp_t_c = now; + p->tcfp_toks = toks; + p->tcfp_ptoks = ptoks; + ret = p->tcfp_result; + goto inc_drops; } } - police->tcf_qstats.overlimits++; - if (police->tcf_action == TC_ACT_SHOT) - police->tcf_qstats.drops++; - spin_unlock(&police->tcf_lock); - return police->tcf_action; +inc_overlimits: + qstats_overlimit_inc(this_cpu_ptr(police->common.cpu_qstats)); +inc_drops: + if (ret == TC_ACT_SHOT) + qstats_drop_inc(this_cpu_ptr(police->common.cpu_qstats)); +end: + return ret; +} + +static void tcf_police_cleanup(struct tc_action *a) +{ + struct tcf_police *police = to_police(a); + struct tcf_police_params *p; + + p = rcu_dereference_protected(police->params, 1); + if (p) + kfree_rcu(p, rcu); } static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a, @@ -272,6 +294,7 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a, { unsigned char *b = skb_tail_pointer(skb); struct tcf_police *police = to_police(a); + struct tcf_police_params *p; struct tc_police opt = { .index = police->tcf_index, .refcnt = refcount_read(&police->tcf_refcnt) - ref, @@ -281,19 +304,21 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a, spin_lock_bh(&police->tcf_lock); opt.action = police->tcf_action; - opt.mtu = police->tcfp_mtu; - opt.burst = PSCHED_NS2TICKS(police->tcfp_burst); - if (police->rate_present) - psched_ratecfg_getrate(&opt.rate, &police->rate); - if (police->peak_present) - psched_ratecfg_getrate(&opt.peakrate, &police->peak); + p = rcu_dereference_protected(police->params, + lockdep_is_held(&police->tcf_lock)); + opt.mtu = p->tcfp_mtu; + opt.burst = PSCHED_NS2TICKS(p->tcfp_burst); + if (p->rate_present) + psched_ratecfg_getrate(&opt.rate, &p->rate); + if (p->peak_present) + psched_ratecfg_getrate(&opt.peakrate, &p->peak); if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt)) goto nla_put_failure; - if (police->tcfp_result && - nla_put_u32(skb, TCA_POLICE_RESULT, police->tcfp_result)) + if (p->tcfp_result && + nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result)) goto nla_put_failure; - if (police->tcfp_ewma_rate && - nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate)) + if (p->tcfp_ewma_rate && + nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate)) goto nla_put_failure; t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install); @@ -312,8 +337,7 @@ nla_put_failure: return -1; } -static int tcf_police_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_police_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, police_net_id); @@ -333,6 +357,7 @@ static struct tc_action_ops act_police_ops = { .init = tcf_police_init, .walk = tcf_police_walker, .lookup = tcf_police_search, + .cleanup = tcf_police_cleanup, .size = sizeof(struct tcf_police), }; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 6b67aa13d2dd..1a0c682fd734 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -224,8 +224,7 @@ static int tcf_sample_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, sample_net_id); diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 52400d49f81f..902957beceb3 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -188,8 +188,7 @@ static int tcf_simp_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, simp_net_id); diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 73e44ce2a883..64dba3708fce 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -99,7 +99,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, skbedit_net_id); - struct tcf_skbedit_params *params_old, *params_new; + struct tcf_skbedit_params *params_new; struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; struct tc_skbedit *parm; struct tcf_skbedit *d; @@ -187,8 +187,6 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, } } - ASSERT_RTNL(); - params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); if (unlikely(!params_new)) { if (ret == ACT_P_CREATED) @@ -210,11 +208,13 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, if (flags & SKBEDIT_F_MASK) params_new->mask = *mask; + spin_lock_bh(&d->tcf_lock); d->tcf_action = parm->action; - params_old = rtnl_dereference(d->params); - rcu_assign_pointer(d->params, params_new); - if (params_old) - kfree_rcu(params_old, rcu); + rcu_swap_protected(d->params, params_new, + lockdep_is_held(&d->tcf_lock)); + spin_unlock_bh(&d->tcf_lock); + if (params_new) + kfree_rcu(params_new, rcu); if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); @@ -231,12 +231,14 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, .index = d->tcf_index, .refcnt = refcount_read(&d->tcf_refcnt) - ref, .bindcnt = atomic_read(&d->tcf_bindcnt) - bind, - .action = d->tcf_action, }; u64 pure_flags = 0; struct tcf_t t; - params = rtnl_dereference(d->params); + spin_lock_bh(&d->tcf_lock); + params = rcu_dereference_protected(d->params, + lockdep_is_held(&d->tcf_lock)); + opt.action = d->tcf_action; if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt)) goto nla_put_failure; @@ -264,9 +266,12 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, tcf_tm_dump(&t, &d->tcf_tm); if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD)) goto nla_put_failure; + spin_unlock_bh(&d->tcf_lock); + return skb->len; nla_put_failure: + spin_unlock_bh(&d->tcf_lock); nlmsg_trim(skb, b); return -1; } @@ -291,8 +296,7 @@ static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, skbedit_net_id); diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 588077fafd6c..59710a183bd3 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -251,8 +251,7 @@ static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, skbmod_net_id); diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 681f6f04e7da..4cca8f274662 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -548,8 +548,7 @@ static int tunnel_key_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 033d273afe50..ba677d54a7af 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -288,8 +288,7 @@ static int tcf_vlan_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack) +static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, vlan_net_id); diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 0a75cb2e5e7b..43c8559aca56 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -240,8 +240,8 @@ static void tcf_chain_destroy(struct tcf_chain *chain) if (!chain->index) block->chain0.chain = NULL; kfree(chain); - if (list_empty(&block->chain_list) && block->refcnt == 0) - kfree(block); + if (list_empty(&block->chain_list) && !refcount_read(&block->refcnt)) + kfree_rcu(block, rcu); } static void tcf_chain_hold(struct tcf_chain *chain) @@ -473,6 +473,7 @@ tcf_chain0_head_change_cb_del(struct tcf_block *block, } struct tcf_net { + spinlock_t idr_lock; /* Protects idr */ struct idr idr; }; @@ -482,16 +483,25 @@ static int tcf_block_insert(struct tcf_block *block, struct net *net, struct netlink_ext_ack *extack) { struct tcf_net *tn = net_generic(net, tcf_net_id); + int err; + + idr_preload(GFP_KERNEL); + spin_lock(&tn->idr_lock); + err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, + GFP_NOWAIT); + spin_unlock(&tn->idr_lock); + idr_preload_end(); - return idr_alloc_u32(&tn->idr, block, &block->index, block->index, - GFP_KERNEL); + return err; } static void tcf_block_remove(struct tcf_block *block, struct net *net) { struct tcf_net *tn = net_generic(net, tcf_net_id); + spin_lock(&tn->idr_lock); idr_remove(&tn->idr, block->index); + spin_unlock(&tn->idr_lock); } static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, @@ -510,7 +520,7 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, INIT_LIST_HEAD(&block->owner_list); INIT_LIST_HEAD(&block->chain0.filter_chain_list); - block->refcnt = 1; + refcount_set(&block->refcnt, 1); block->net = net; block->index = block_index; @@ -527,6 +537,78 @@ static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) return idr_find(&tn->idr, block_index); } +static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) +{ + struct tcf_block *block; + + rcu_read_lock(); + block = tcf_block_lookup(net, block_index); + if (block && !refcount_inc_not_zero(&block->refcnt)) + block = NULL; + rcu_read_unlock(); + + return block; +} + +static void tcf_block_flush_all_chains(struct tcf_block *block) +{ + struct tcf_chain *chain; + + /* Hold a refcnt for all chains, so that they don't disappear + * while we are iterating. + */ + list_for_each_entry(chain, &block->chain_list, list) + tcf_chain_hold(chain); + + list_for_each_entry(chain, &block->chain_list, list) + tcf_chain_flush(chain); +} + +static void tcf_block_put_all_chains(struct tcf_block *block) +{ + struct tcf_chain *chain, *tmp; + + /* At this point, all the chains should have refcnt >= 1. */ + list_for_each_entry_safe(chain, tmp, &block->chain_list, list) { + tcf_chain_put_explicitly_created(chain); + tcf_chain_put(chain); + } +} + +static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, + struct tcf_block_ext_info *ei) +{ + if (refcount_dec_and_test(&block->refcnt)) { + /* Flushing/putting all chains will cause the block to be + * deallocated when last chain is freed. However, if chain_list + * is empty, block has to be manually deallocated. After block + * reference counter reached 0, it is no longer possible to + * increment it or add new chains to block. + */ + bool free_block = list_empty(&block->chain_list); + + if (tcf_block_shared(block)) + tcf_block_remove(block, block->net); + if (!free_block) + tcf_block_flush_all_chains(block); + + if (q) + tcf_block_offload_unbind(block, q, ei); + + if (free_block) + kfree_rcu(block, rcu); + else + tcf_block_put_all_chains(block); + } else if (q) { + tcf_block_offload_unbind(block, q, ei); + } +} + +static void tcf_block_refcnt_put(struct tcf_block *block) +{ + __tcf_block_put(block, NULL, NULL); +} + /* Find tcf block. * Set q, parent, cl when appropriate. */ @@ -537,9 +619,10 @@ static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, struct netlink_ext_ack *extack) { struct tcf_block *block; + int err = 0; if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { - block = tcf_block_lookup(net, block_index); + block = tcf_block_refcnt_get(net, block_index); if (!block) { NL_SET_ERR_MSG(extack, "Block of given index was not found"); return ERR_PTR(-EINVAL); @@ -548,55 +631,106 @@ static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, const struct Qdisc_class_ops *cops; struct net_device *dev; + rcu_read_lock(); + /* Find link */ - dev = __dev_get_by_index(net, ifindex); - if (!dev) + dev = dev_get_by_index_rcu(net, ifindex); + if (!dev) { + rcu_read_unlock(); return ERR_PTR(-ENODEV); + } /* Find qdisc */ if (!*parent) { *q = dev->qdisc; *parent = (*q)->handle; } else { - *q = qdisc_lookup(dev, TC_H_MAJ(*parent)); + *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); if (!*q) { NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); - return ERR_PTR(-EINVAL); + err = -EINVAL; + goto errout_rcu; } } + *q = qdisc_refcount_inc_nz(*q); + if (!*q) { + NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); + err = -EINVAL; + goto errout_rcu; + } + /* Is it classful? */ cops = (*q)->ops->cl_ops; if (!cops) { NL_SET_ERR_MSG(extack, "Qdisc not classful"); - return ERR_PTR(-EINVAL); + err = -EINVAL; + goto errout_rcu; } if (!cops->tcf_block) { NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); - return ERR_PTR(-EOPNOTSUPP); + err = -EOPNOTSUPP; + goto errout_rcu; } + /* At this point we know that qdisc is not noop_qdisc, + * which means that qdisc holds a reference to net_device + * and we hold a reference to qdisc, so it is safe to release + * rcu read lock. + */ + rcu_read_unlock(); + /* Do we search for filter, attached to class? */ if (TC_H_MIN(*parent)) { *cl = cops->find(*q, *parent); if (*cl == 0) { NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); - return ERR_PTR(-ENOENT); + err = -ENOENT; + goto errout_qdisc; } } /* And the last stroke */ block = cops->tcf_block(*q, *cl, extack); - if (!block) - return ERR_PTR(-EINVAL); + if (!block) { + err = -EINVAL; + goto errout_qdisc; + } if (tcf_block_shared(block)) { NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); - return ERR_PTR(-EOPNOTSUPP); + err = -EOPNOTSUPP; + goto errout_qdisc; } + + /* Always take reference to block in order to support execution + * of rules update path of cls API without rtnl lock. Caller + * must release block when it is finished using it. 'if' block + * of this conditional obtain reference to block by calling + * tcf_block_refcnt_get(). + */ + refcount_inc(&block->refcnt); } return block; + +errout_rcu: + rcu_read_unlock(); +errout_qdisc: + if (*q) { + qdisc_put(*q); + *q = NULL; + } + return ERR_PTR(err); +} + +static void tcf_block_release(struct Qdisc *q, struct tcf_block *block) +{ + if (!IS_ERR_OR_NULL(block)) + tcf_block_refcnt_put(block); + + if (q) + qdisc_put(q); } struct tcf_block_owner_item { @@ -664,21 +798,16 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, { struct net *net = qdisc_net(q); struct tcf_block *block = NULL; - bool created = false; int err; - if (ei->block_index) { + if (ei->block_index) /* block_index not 0 means the shared block is requested */ - block = tcf_block_lookup(net, ei->block_index); - if (block) - block->refcnt++; - } + block = tcf_block_refcnt_get(net, ei->block_index); if (!block) { block = tcf_block_create(net, q, ei->block_index, extack); if (IS_ERR(block)) return PTR_ERR(block); - created = true; if (tcf_block_shared(block)) { err = tcf_block_insert(block, net, extack); if (err) @@ -708,14 +837,8 @@ err_block_offload_bind: err_chain0_head_change_cb_add: tcf_block_owner_del(block, q, ei->binder_type); err_block_owner_add: - if (created) { - if (tcf_block_shared(block)) - tcf_block_remove(block, net); err_block_insert: - kfree(block); - } else { - block->refcnt--; - } + tcf_block_refcnt_put(block); return err; } EXPORT_SYMBOL(tcf_block_get_ext); @@ -747,42 +870,12 @@ EXPORT_SYMBOL(tcf_block_get); void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, struct tcf_block_ext_info *ei) { - struct tcf_chain *chain, *tmp; - if (!block) return; tcf_chain0_head_change_cb_del(block, ei); tcf_block_owner_del(block, q, ei->binder_type); - if (block->refcnt == 1) { - if (tcf_block_shared(block)) - tcf_block_remove(block, block->net); - - /* Hold a refcnt for all chains, so that they don't disappear - * while we are iterating. - */ - list_for_each_entry(chain, &block->chain_list, list) - tcf_chain_hold(chain); - - list_for_each_entry(chain, &block->chain_list, list) - tcf_chain_flush(chain); - } - - tcf_block_offload_unbind(block, q, ei); - - if (block->refcnt == 1) { - /* At this point, all the chains should have refcnt >= 1. */ - list_for_each_entry_safe(chain, tmp, &block->chain_list, list) { - tcf_chain_put_explicitly_created(chain); - tcf_chain_put(chain); - } - - block->refcnt--; - if (list_empty(&block->chain_list)) - kfree(block); - } else { - block->refcnt--; - } + __tcf_block_put(block, q, ei); } EXPORT_SYMBOL(tcf_block_put_ext); @@ -1332,6 +1425,7 @@ replay: errout: if (chain) tcf_chain_put(chain); + tcf_block_release(q, block); if (err == -EAGAIN) /* Replay the request. */ goto replay; @@ -1453,6 +1547,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, errout: if (chain) tcf_chain_put(chain); + tcf_block_release(q, block); return err; } @@ -1538,6 +1633,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, errout: if (chain) tcf_chain_put(chain); + tcf_block_release(q, block); return err; } @@ -1631,12 +1727,13 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) if (nlmsg_len(cb->nlh) < sizeof(*tcm)) return skb->len; - err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL); + err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, + cb->extack); if (err) return err; if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { - block = tcf_block_lookup(net, tcm->tcm_block_index); + block = tcf_block_refcnt_get(net, tcm->tcm_block_index); if (!block) goto out; /* If we work with block index, q is NULL and parent value @@ -1695,6 +1792,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) } } + if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) + tcf_block_refcnt_put(block); cb->args[0] = index; out: @@ -1854,7 +1953,8 @@ replay: chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; if (chain_index > TC_ACT_EXT_VAL_MASK) { NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); - return -EINVAL; + err = -EINVAL; + goto errout_block; } chain = tcf_chain_lookup(block, chain_index); if (n->nlmsg_type == RTM_NEWCHAIN) { @@ -1866,23 +1966,27 @@ replay: tcf_chain_hold(chain); } else { NL_SET_ERR_MSG(extack, "Filter chain already exists"); - return -EEXIST; + err = -EEXIST; + goto errout_block; } } else { if (!(n->nlmsg_flags & NLM_F_CREATE)) { NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); - return -ENOENT; + err = -ENOENT; + goto errout_block; } chain = tcf_chain_create(block, chain_index); if (!chain) { NL_SET_ERR_MSG(extack, "Failed to create filter chain"); - return -ENOMEM; + err = -ENOMEM; + goto errout_block; } } } else { if (!chain || tcf_chain_held_by_acts_only(chain)) { NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); - return -EINVAL; + err = -EINVAL; + goto errout_block; } tcf_chain_hold(chain); } @@ -1926,6 +2030,8 @@ replay: errout: tcf_chain_put(chain); +errout_block: + tcf_block_release(q, block); if (err == -EAGAIN) /* Replay the request. */ goto replay; @@ -1949,12 +2055,13 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) if (nlmsg_len(cb->nlh) < sizeof(*tcm)) return skb->len; - err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL); + err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, + cb->extack); if (err) return err; if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { - block = tcf_block_lookup(net, tcm->tcm_block_index); + block = tcf_block_refcnt_get(net, tcm->tcm_block_index); if (!block) goto out; /* If we work with block index, q is NULL and parent value @@ -2021,6 +2128,8 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) index++; } + if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) + tcf_block_refcnt_put(block); cb->args[0] = index; out: @@ -2213,6 +2322,7 @@ static __net_init int tcf_net_init(struct net *net) { struct tcf_net *tn = net_generic(net, tcf_net_id); + spin_lock_init(&tn->idr_lock); idr_init(&tn->idr); return 0; } diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 6fd9bdd93796..9aada2d0ef06 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -98,7 +98,7 @@ struct cls_fl_filter { struct list_head list; u32 handle; u32 flags; - unsigned int in_hw_count; + u32 in_hw_count; struct rcu_work rwork; struct net_device *hw_dev; }; @@ -993,7 +993,7 @@ static int fl_init_mask_hashtable(struct fl_flow_mask *mask) } #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) -#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member)) +#define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member) #define FL_KEY_IS_MASKED(mask, member) \ memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \ @@ -1880,6 +1880,9 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) goto nla_put_failure; + if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count)) + goto nla_put_failure; + if (tcf_exts_dump(skb, &f->exts)) goto nla_put_failure; diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index b2c3406a2cf2..4b28fd44576d 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -68,7 +68,6 @@ struct tc_u_knode { u32 mask; u32 __percpu *pcpu_success; #endif - struct tcf_proto *tp; struct rcu_work rwork; /* The 'sel' field MUST be the last field in structure to allow for * tc_u32_keys allocated at end of structure. @@ -80,10 +79,10 @@ struct tc_u_hnode { struct tc_u_hnode __rcu *next; u32 handle; u32 prio; - struct tc_u_common *tp_c; int refcnt; unsigned int divisor; struct idr handle_idr; + bool is_root; struct rcu_head rcu; u32 flags; /* The 'ht' field MUST be the last field in structure to allow for @@ -98,7 +97,7 @@ struct tc_u_common { int refcnt; struct idr handle_idr; struct hlist_node hnode; - struct rcu_head rcu; + long knodes; }; static inline unsigned int u32_hash_fold(__be32 key, @@ -344,19 +343,16 @@ static void *tc_u_common_ptr(const struct tcf_proto *tp) return block->q; } -static unsigned int tc_u_hash(const struct tcf_proto *tp) +static struct hlist_head *tc_u_hash(void *key) { - return hash_ptr(tc_u_common_ptr(tp), U32_HASH_SHIFT); + return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT); } -static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp) +static struct tc_u_common *tc_u_common_find(void *key) { struct tc_u_common *tc; - unsigned int h; - - h = tc_u_hash(tp); - hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) { - if (tc->ptr == tc_u_common_ptr(tp)) + hlist_for_each_entry(tc, tc_u_hash(key), hnode) { + if (tc->ptr == key) return tc; } return NULL; @@ -365,10 +361,8 @@ static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp) static int u32_init(struct tcf_proto *tp) { struct tc_u_hnode *root_ht; - struct tc_u_common *tp_c; - unsigned int h; - - tp_c = tc_u_common_find(tp); + void *key = tc_u_common_ptr(tp); + struct tc_u_common *tp_c = tc_u_common_find(key); root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL); if (root_ht == NULL) @@ -377,6 +371,7 @@ static int u32_init(struct tcf_proto *tp) root_ht->refcnt++; root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000; root_ht->prio = tp->prio; + root_ht->is_root = true; idr_init(&root_ht->handle_idr); if (tp_c == NULL) { @@ -385,18 +380,16 @@ static int u32_init(struct tcf_proto *tp) kfree(root_ht); return -ENOBUFS; } - tp_c->ptr = tc_u_common_ptr(tp); + tp_c->ptr = key; INIT_HLIST_NODE(&tp_c->hnode); idr_init(&tp_c->handle_idr); - h = tc_u_hash(tp); - hlist_add_head(&tp_c->hnode, &tc_u_common_hash[h]); + hlist_add_head(&tp_c->hnode, tc_u_hash(key)); } tp_c->refcnt++; RCU_INIT_POINTER(root_ht->next, tp_c->hlist); rcu_assign_pointer(tp_c->hlist, root_ht); - root_ht->tp_c = tp_c; root_ht->refcnt++; rcu_assign_pointer(tp->root, root_ht); @@ -404,8 +397,7 @@ static int u32_init(struct tcf_proto *tp) return 0; } -static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n, - bool free_pf) +static int u32_destroy_key(struct tc_u_knode *n, bool free_pf) { struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); @@ -439,7 +431,7 @@ static void u32_delete_key_work(struct work_struct *work) struct tc_u_knode, rwork); rtnl_lock(); - u32_destroy_key(key->tp, key, false); + u32_destroy_key(key, false); rtnl_unlock(); } @@ -456,12 +448,13 @@ static void u32_delete_key_freepf_work(struct work_struct *work) struct tc_u_knode, rwork); rtnl_lock(); - u32_destroy_key(key->tp, key, true); + u32_destroy_key(key, true); rtnl_unlock(); } static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) { + struct tc_u_common *tp_c = tp->data; struct tc_u_knode __rcu **kp; struct tc_u_knode *pkp; struct tc_u_hnode *ht = rtnl_dereference(key->ht_up); @@ -472,6 +465,7 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) kp = &pkp->next, pkp = rtnl_dereference(*kp)) { if (pkp == key) { RCU_INIT_POINTER(*kp, key->next); + tp_c->knodes--; tcf_unbind_filter(tp, &key->res); idr_remove(&ht->handle_idr, key->handle); @@ -586,6 +580,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, struct netlink_ext_ack *extack) { + struct tc_u_common *tp_c = tp->data; struct tc_u_knode *n; unsigned int h; @@ -593,13 +588,14 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, while ((n = rtnl_dereference(ht->ht[h])) != NULL) { RCU_INIT_POINTER(ht->ht[h], rtnl_dereference(n->next)); + tp_c->knodes--; tcf_unbind_filter(tp, &n->res); u32_remove_hw_knode(tp, n, extack); idr_remove(&ht->handle_idr, n->handle); if (tcf_exts_get_net(&n->exts)) tcf_queue_work(&n->rwork, u32_delete_key_freepf_work); else - u32_destroy_key(n->tp, n, true); + u32_destroy_key(n, true); } } } @@ -632,17 +628,6 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, return -ENOENT; } -static bool ht_empty(struct tc_u_hnode *ht) -{ - unsigned int h; - - for (h = 0; h <= ht->divisor; h++) - if (rcu_access_pointer(ht->ht[h])) - return false; - - return true; -} - static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack) { struct tc_u_common *tp_c = tp->data; @@ -680,20 +665,16 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last, struct netlink_ext_ack *extack) { struct tc_u_hnode *ht = arg; - struct tc_u_hnode *root_ht = rtnl_dereference(tp->root); struct tc_u_common *tp_c = tp->data; int ret = 0; - if (ht == NULL) - goto out; - if (TC_U32_KEY(ht->handle)) { u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack); ret = u32_delete_key(tp, (struct tc_u_knode *)ht); goto out; } - if (root_ht == ht) { + if (ht->is_root) { NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node"); return -EINVAL; } @@ -706,38 +687,7 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last, } out: - *last = true; - if (root_ht) { - if (root_ht->refcnt > 2) { - *last = false; - goto ret; - } - if (root_ht->refcnt == 2) { - if (!ht_empty(root_ht)) { - *last = false; - goto ret; - } - } - } - - if (tp_c->refcnt > 1) { - *last = false; - goto ret; - } - - if (tp_c->refcnt == 1) { - struct tc_u_hnode *ht; - - for (ht = rtnl_dereference(tp_c->hlist); - ht; - ht = rtnl_dereference(ht->next)) - if (!ht_empty(ht)) { - *last = false; - break; - } - } - -ret: + *last = tp_c->refcnt == 1 && tp_c->knodes == 0; return ret; } @@ -768,7 +718,7 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { }; static int u32_set_parms(struct net *net, struct tcf_proto *tp, - unsigned long base, struct tc_u_hnode *ht, + unsigned long base, struct tc_u_knode *n, struct nlattr **tb, struct nlattr *est, bool ovr, struct netlink_ext_ack *extack) @@ -789,12 +739,16 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp, } if (handle) { - ht_down = u32_lookup_ht(ht->tp_c, handle); + ht_down = u32_lookup_ht(tp->data, handle); if (!ht_down) { NL_SET_ERR_MSG_MOD(extack, "Link hash table not found"); return -EINVAL; } + if (ht_down->is_root) { + NL_SET_ERR_MSG_MOD(extack, "Not linking to root node"); + return -EINVAL; + } ht_down->refcnt++; } @@ -891,7 +845,6 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp, /* Similarly success statistics must be moved as pointers */ new->pcpu_success = n->pcpu_success; #endif - new->tp = tp; memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) { @@ -960,18 +913,17 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, if (!new) return -ENOMEM; - err = u32_set_parms(net, tp, base, - rtnl_dereference(n->ht_up), new, tb, + err = u32_set_parms(net, tp, base, new, tb, tca[TCA_RATE], ovr, extack); if (err) { - u32_destroy_key(tp, new, false); + u32_destroy_key(new, false); return err; } err = u32_replace_hw_knode(tp, new, flags, extack); if (err) { - u32_destroy_key(tp, new, false); + u32_destroy_key(new, false); return err; } @@ -988,7 +940,11 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, if (tb[TCA_U32_DIVISOR]) { unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); - if (--divisor > 0x100) { + if (!is_power_of_2(divisor)) { + NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2"); + return -EINVAL; + } + if (divisor-- > 0x100) { NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets"); return -EINVAL; } @@ -1013,7 +969,6 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return err; } } - ht->tp_c = tp_c; ht->refcnt = 1; ht->divisor = divisor; ht->handle = handle; @@ -1103,7 +1058,6 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, n->handle = handle; n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; n->flags = flags; - n->tp = tp; err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE); if (err < 0) @@ -1125,7 +1079,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, } #endif - err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr, + err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], ovr, extack); if (err == 0) { struct tc_u_knode __rcu **ins; @@ -1146,6 +1100,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, RCU_INIT_POINTER(n->next, pins); rcu_assign_pointer(*ins, n); + tp_c->knodes++; *arg = n; return 0; } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 85e73f48e48f..cf5c714ae786 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -27,7 +27,6 @@ #include <linux/kmod.h> #include <linux/list.h> #include <linux/hrtimer.h> -#include <linux/lockdep.h> #include <linux/slab.h> #include <linux/hashtable.h> @@ -315,6 +314,24 @@ out: return q; } +struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle) +{ + struct netdev_queue *nq; + struct Qdisc *q; + + if (!handle) + return NULL; + q = qdisc_match_from_root(dev->qdisc, handle); + if (q) + goto out; + + nq = dev_ingress_queue_rcu(dev); + if (nq) + q = qdisc_match_from_root(nq->qdisc_sleeping, handle); +out: + return q; +} + static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) { unsigned long cl; @@ -921,7 +938,7 @@ static void notify_and_destroy(struct net *net, struct sk_buff *skb, qdisc_notify(net, skb, n, clid, old, new); if (old) - qdisc_destroy(old); + qdisc_put(old); } /* Graft qdisc "new" to class "classid" of qdisc "parent" or @@ -974,7 +991,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, qdisc_refcount_inc(new); if (!ingress) - qdisc_destroy(old); + qdisc_put(old); } skip: @@ -1053,10 +1070,6 @@ static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca, return 0; } -/* lockdep annotation is needed for ingress; egress gets it only for name */ -static struct lock_class_key qdisc_tx_lock; -static struct lock_class_key qdisc_rx_lock; - /* Allocate and initialize new qdisc. @@ -1121,7 +1134,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev, if (handle == TC_H_INGRESS) { sch->flags |= TCQ_F_INGRESS; handle = TC_H_MAKE(TC_H_INGRESS, 0); - lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock); } else { if (handle == 0) { handle = qdisc_alloc_handle(dev); @@ -1129,7 +1141,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev, if (handle == 0) goto err_out3; } - lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); if (!netif_is_multiqueue(dev)) sch->flags |= TCQ_F_ONETXQUEUE; } @@ -1582,7 +1593,7 @@ graft: err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack); if (err) { if (q) - qdisc_destroy(q); + qdisc_put(q); return err; } @@ -1660,7 +1671,7 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) ASSERT_RTNL(); err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX, - rtm_tca_policy, NULL); + rtm_tca_policy, cb->extack); if (err < 0) return err; diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index cd49afca9617..d714d3747bcb 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -150,7 +150,7 @@ static void atm_tc_put(struct Qdisc *sch, unsigned long cl) pr_debug("atm_tc_put: destroying\n"); list_del_init(&flow->list); pr_debug("atm_tc_put: qdisc %p\n", flow->q); - qdisc_destroy(flow->q); + qdisc_put(flow->q); tcf_block_put(flow->block); if (flow->sock) { pr_debug("atm_tc_put: f_count %ld\n", diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 793016d722ec..b910cd5c56f7 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -812,7 +812,7 @@ static struct sk_buff *dequeue_head(struct cake_flow *flow) if (skb) { flow->head = skb->next; - skb->next = NULL; + skb_mark_not_on_list(skb); } return skb; @@ -1252,7 +1252,7 @@ found: else flow->head = elig_ack->next; - elig_ack->next = NULL; + skb_mark_not_on_list(elig_ack); return elig_ack; } @@ -1675,7 +1675,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, while (segs) { nskb = segs->next; - segs->next = NULL; + skb_mark_not_on_list(segs); qdisc_skb_cb(segs)->pkt_len = segs->len; cobalt_set_enqueue_time(segs, now); get_cobalt_cb(segs)->adjusted_len = cake_overhead(q, diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index f42025d53cfe..4dc05409e3fb 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1418,7 +1418,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) WARN_ON(cl->filters); tcf_block_put(cl->block); - qdisc_destroy(cl->q); + qdisc_put(cl->q); qdisc_put_rtab(cl->R_tab); gen_kill_estimator(&cl->rate_est); if (cl != &q->link) diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index e26a24017faa..e689e11b6d0f 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -379,7 +379,7 @@ static void cbs_destroy(struct Qdisc *sch) cbs_disable_offload(dev, q); if (q->qdisc) - qdisc_destroy(q->qdisc); + qdisc_put(q->qdisc); } static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb) diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index e0b0cf8a9939..cdebaed0f8cf 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -134,7 +134,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, tca[TCA_RATE]); if (err) { NL_SET_ERR_MSG(extack, "Failed to replace estimator"); - qdisc_destroy(cl->qdisc); + qdisc_put(cl->qdisc); kfree(cl); return err; } @@ -153,7 +153,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl) { gen_kill_estimator(&cl->rate_est); - qdisc_destroy(cl->qdisc); + qdisc_put(cl->qdisc); kfree(cl); } diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 049714c57075..f6f480784bc6 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -412,7 +412,7 @@ static void dsmark_destroy(struct Qdisc *sch) pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p); tcf_block_put(p->block); - qdisc_destroy(p->q); + qdisc_put(p->q); if (p->mv != p->embedded) kfree(p->mv); } diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 24893d3b5d22..3809c9bf8896 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -177,7 +177,7 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, if (q) { err = fifo_set_limit(q, limit); if (err < 0) { - qdisc_destroy(q); + qdisc_put(q); q = NULL; } } diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 4808713c73b9..338222a6c664 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -106,7 +106,6 @@ struct fq_sched_data { u64 stat_gc_flows; u64 stat_internal_packets; - u64 stat_tcp_retrans; u64 stat_throttled; u64 stat_flows_plimit; u64 stat_pkts_too_long; @@ -319,7 +318,7 @@ static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow) if (skb) { flow->head = skb->next; - skb->next = NULL; + skb_mark_not_on_list(skb); flow->qlen--; qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; @@ -327,62 +326,17 @@ static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow) return skb; } -/* We might add in the future detection of retransmits - * For the time being, just return false - */ -static bool skb_is_retransmit(struct sk_buff *skb) -{ - return false; -} - -/* add skb to flow queue - * flow queue is a linked list, kind of FIFO, except for TCP retransmits - * We special case tcp retransmits to be transmitted before other packets. - * We rely on fact that TCP retransmits are unlikely, so we do not waste - * a separate queue or a pointer. - * head-> [retrans pkt 1] - * [retrans pkt 2] - * [ normal pkt 1] - * [ normal pkt 2] - * [ normal pkt 3] - * tail-> [ normal pkt 4] - */ static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb) { - struct sk_buff *prev, *head = flow->head; + struct sk_buff *head = flow->head; skb->next = NULL; - if (!head) { + if (!head) flow->head = skb; - flow->tail = skb; - return; - } - if (likely(!skb_is_retransmit(skb))) { + else flow->tail->next = skb; - flow->tail = skb; - return; - } - /* This skb is a tcp retransmit, - * find the last retrans packet in the queue - */ - prev = NULL; - while (skb_is_retransmit(head)) { - prev = head; - head = head->next; - if (!head) - break; - } - if (!prev) { /* no rtx packet in queue, become the new head */ - skb->next = flow->head; - flow->head = skb; - } else { - if (prev == flow->tail) - flow->tail = skb; - else - skb->next = prev->next; - prev->next = skb; - } + flow->tail = skb; } static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, @@ -401,8 +355,6 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, } f->qlen++; - if (skb_is_retransmit(skb)) - q->stat_tcp_retrans++; qdisc_qstats_backlog_inc(sch, skb); if (fq_flow_is_detached(f)) { struct sock *sk = skb->sk; @@ -491,11 +443,16 @@ begin: } skb = f->head; - if (unlikely(skb && now < f->time_next_packet && - !skb_is_tcp_pure_ack(skb))) { - head->first = f->next; - fq_flow_set_throttled(q, f); - goto begin; + if (skb && !skb_is_tcp_pure_ack(skb)) { + u64 time_next_packet = max_t(u64, ktime_to_ns(skb->tstamp), + f->time_next_packet); + + if (now < time_next_packet) { + head->first = f->next; + f->time_next_packet = time_next_packet; + fq_flow_set_throttled(q, f); + goto begin; + } } skb = fq_dequeue_head(sch, f); @@ -513,11 +470,7 @@ begin: prefetch(&skb->end); f->credit -= qdisc_pkt_len(skb); - if (!q->rate_enable) - goto out; - - /* Do not pace locally generated ack packets */ - if (skb_is_tcp_pure_ack(skb)) + if (ktime_to_ns(skb->tstamp) || !q->rate_enable) goto out; rate = q->flow_max_rate; @@ -823,7 +776,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt, q->fq_trees_log = ilog2(1024); q->orphan_mask = 1024 - 1; q->low_rate_threshold = 550000 / 8; - qdisc_watchdog_init(&q->watchdog, sch); + qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC); if (opt) err = fq_change(sch, opt, extack); @@ -873,7 +826,7 @@ static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) st.gc_flows = q->stat_gc_flows; st.highprio_packets = q->stat_internal_packets; - st.tcp_retrans = q->stat_tcp_retrans; + st.tcp_retrans = 0; st.throttled = q->stat_throttled; st.flows_plimit = q->stat_flows_plimit; st.pkts_too_long = q->stat_pkts_too_long; diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 6c0a9d5dbf94..cd04d40c30b6 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -124,7 +124,7 @@ static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) struct sk_buff *skb = flow->head; flow->head = skb->next; - skb->next = NULL; + skb_mark_not_on_list(skb); return skb; } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 69078c82963e..de1663f7d3ad 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -184,7 +184,7 @@ static void try_bulk_dequeue_skb(struct Qdisc *q, skb = nskb; (*packets)++; /* GSO counts as one pkt */ } - skb->next = NULL; + skb_mark_not_on_list(skb); } /* This variant of try_bulk_dequeue_skb() makes sure @@ -210,7 +210,7 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q, skb = nskb; } while (++cnt < 8); (*packets) += cnt; - skb->next = NULL; + skb_mark_not_on_list(skb); } /* Note that dequeue_skb can possibly return a SKB list (via skb->next). @@ -572,6 +572,18 @@ struct Qdisc noop_qdisc = { .dev_queue = &noop_netdev_queue, .running = SEQCNT_ZERO(noop_qdisc.running), .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), + .gso_skb = { + .next = (struct sk_buff *)&noop_qdisc.gso_skb, + .prev = (struct sk_buff *)&noop_qdisc.gso_skb, + .qlen = 0, + .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock), + }, + .skb_bad_txq = { + .next = (struct sk_buff *)&noop_qdisc.skb_bad_txq, + .prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq, + .qlen = 0, + .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock), + }, }; EXPORT_SYMBOL(noop_qdisc); @@ -901,7 +913,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, if (!ops->init || ops->init(sch, NULL, extack) == 0) return sch; - qdisc_destroy(sch); + qdisc_put(sch); return NULL; } EXPORT_SYMBOL(qdisc_create_dflt); @@ -941,15 +953,18 @@ void qdisc_free(struct Qdisc *qdisc) kfree((char *) qdisc - qdisc->padded); } -void qdisc_destroy(struct Qdisc *qdisc) +static void qdisc_free_cb(struct rcu_head *head) +{ + struct Qdisc *q = container_of(head, struct Qdisc, rcu); + + qdisc_free(q); +} + +static void qdisc_destroy(struct Qdisc *qdisc) { const struct Qdisc_ops *ops = qdisc->ops; struct sk_buff *skb, *tmp; - if (qdisc->flags & TCQ_F_BUILTIN || - !refcount_dec_and_test(&qdisc->refcnt)) - return; - #ifdef CONFIG_NET_SCHED qdisc_hash_del(qdisc); @@ -974,9 +989,34 @@ void qdisc_destroy(struct Qdisc *qdisc) kfree_skb_list(skb); } - qdisc_free(qdisc); + call_rcu(&qdisc->rcu, qdisc_free_cb); +} + +void qdisc_put(struct Qdisc *qdisc) +{ + if (qdisc->flags & TCQ_F_BUILTIN || + !refcount_dec_and_test(&qdisc->refcnt)) + return; + + qdisc_destroy(qdisc); +} +EXPORT_SYMBOL(qdisc_put); + +/* Version of qdisc_put() that is called with rtnl mutex unlocked. + * Intended to be used as optimization, this function only takes rtnl lock if + * qdisc reference counter reached zero. + */ + +void qdisc_put_unlocked(struct Qdisc *qdisc) +{ + if (qdisc->flags & TCQ_F_BUILTIN || + !refcount_dec_and_rtnl_lock(&qdisc->refcnt)) + return; + + qdisc_destroy(qdisc); + rtnl_unlock(); } -EXPORT_SYMBOL(qdisc_destroy); +EXPORT_SYMBOL(qdisc_put_unlocked); /* Attach toplevel qdisc to device queue. */ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, @@ -1245,8 +1285,6 @@ static void dev_init_scheduler_queue(struct net_device *dev, rcu_assign_pointer(dev_queue->qdisc, qdisc); dev_queue->qdisc_sleeping = qdisc; - __skb_queue_head_init(&qdisc->gso_skb); - __skb_queue_head_init(&qdisc->skb_bad_txq); } void dev_init_scheduler(struct net_device *dev) @@ -1270,7 +1308,7 @@ static void shutdown_scheduler_queue(struct net_device *dev, rcu_assign_pointer(dev_queue->qdisc, qdisc_default); dev_queue->qdisc_sleeping = qdisc_default; - qdisc_destroy(qdisc); + qdisc_put(qdisc); } } @@ -1279,7 +1317,7 @@ void dev_shutdown(struct net_device *dev) netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); if (dev_ingress_queue(dev)) shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); - qdisc_destroy(dev->qdisc); + qdisc_put(dev->qdisc); dev->qdisc = &noop_qdisc; WARN_ON(timer_pending(&dev->watchdog_timer)); diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 3278a76f6861..b18ec1f6de60 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1092,7 +1092,7 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl) struct hfsc_sched *q = qdisc_priv(sch); tcf_block_put(cl->block); - qdisc_destroy(cl->qdisc); + qdisc_put(cl->qdisc); gen_kill_estimator(&cl->rate_est); if (cl != &q->root) kfree(cl); diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index c3a8388dcdf6..9d6a47697406 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -330,7 +330,7 @@ static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) struct sk_buff *skb = bucket->head; bucket->head = skb->next; - skb->next = NULL; + skb_mark_not_on_list(skb); return skb; } diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 43c4bfe625a9..58b449490757 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -132,7 +132,7 @@ struct htb_class { struct htb_class_inner { struct htb_prio clprio[TC_HTB_NUMPRIO]; } inner; - } un; + }; s64 pq_key; int prio_activity; /* for which prios are we active */ @@ -411,13 +411,13 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) int prio = ffz(~m); m &= ~(1 << prio); - if (p->un.inner.clprio[prio].feed.rb_node) + if (p->inner.clprio[prio].feed.rb_node) /* parent already has its feed in use so that * reset bit in mask as parent is already ok */ mask &= ~(1 << prio); - htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio); + htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio); } p->prio_activity |= mask; cl = p; @@ -447,19 +447,19 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) int prio = ffz(~m); m &= ~(1 << prio); - if (p->un.inner.clprio[prio].ptr == cl->node + prio) { + if (p->inner.clprio[prio].ptr == cl->node + prio) { /* we are removing child which is pointed to from * parent feed - forget the pointer but remember * classid */ - p->un.inner.clprio[prio].last_ptr_id = cl->common.classid; - p->un.inner.clprio[prio].ptr = NULL; + p->inner.clprio[prio].last_ptr_id = cl->common.classid; + p->inner.clprio[prio].ptr = NULL; } htb_safe_rb_erase(cl->node + prio, - &p->un.inner.clprio[prio].feed); + &p->inner.clprio[prio].feed); - if (!p->un.inner.clprio[prio].feed.rb_node) + if (!p->inner.clprio[prio].feed.rb_node) mask |= 1 << prio; } @@ -555,7 +555,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) */ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) { - WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen); + WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen); if (!cl->prio_activity) { cl->prio_activity = 1 << cl->prio; @@ -577,22 +577,6 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) cl->prio_activity = 0; } -static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, - struct qdisc_skb_head *qh) -{ - struct sk_buff *last = qh->tail; - - if (last) { - skb->next = NULL; - last->next = skb; - qh->tail = skb; - } else { - qh->tail = skb; - qh->head = skb; - } - qh->qlen++; -} - static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { @@ -603,7 +587,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (cl == HTB_DIRECT) { /* enqueue to helper queue */ if (q->direct_queue.qlen < q->direct_qlen) { - htb_enqueue_tail(skb, sch, &q->direct_queue); + __qdisc_enqueue_tail(skb, &q->direct_queue); q->direct_pkts++; } else { return qdisc_drop(skb, sch, to_free); @@ -615,7 +599,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, __qdisc_drop(skb, to_free); return ret; #endif - } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q, + } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, to_free)) != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) { qdisc_qstats_drop(sch); @@ -823,7 +807,7 @@ static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio) cl = rb_entry(*sp->pptr, struct htb_class, node[prio]); if (!cl->level) return cl; - clp = &cl->un.inner.clprio[prio]; + clp = &cl->inner.clprio[prio]; (++sp)->root = clp->feed.rb_node; sp->pptr = &clp->ptr; sp->pid = &clp->last_ptr_id; @@ -857,7 +841,7 @@ next: * graft operation on the leaf since last dequeue; * simply deactivate and skip such class */ - if (unlikely(cl->un.leaf.q->q.qlen == 0)) { + if (unlikely(cl->leaf.q->q.qlen == 0)) { struct htb_class *next; htb_deactivate(q, cl); @@ -873,12 +857,12 @@ next: goto next; } - skb = cl->un.leaf.q->dequeue(cl->un.leaf.q); + skb = cl->leaf.q->dequeue(cl->leaf.q); if (likely(skb != NULL)) break; - qdisc_warn_nonwc("htb", cl->un.leaf.q); - htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr: + qdisc_warn_nonwc("htb", cl->leaf.q); + htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr: &q->hlevel[0].hprio[prio].ptr); cl = htb_lookup_leaf(hprio, prio); @@ -886,16 +870,16 @@ next: if (likely(skb != NULL)) { bstats_update(&cl->bstats, skb); - cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb); - if (cl->un.leaf.deficit[level] < 0) { - cl->un.leaf.deficit[level] += cl->quantum; - htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr : + cl->leaf.deficit[level] -= qdisc_pkt_len(skb); + if (cl->leaf.deficit[level] < 0) { + cl->leaf.deficit[level] += cl->quantum; + htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr : &q->hlevel[0].hprio[prio].ptr); } /* this used to be after charge_class but this constelation * gives us slightly better performance */ - if (!cl->un.leaf.q->q.qlen) + if (!cl->leaf.q->q.qlen) htb_deactivate(q, cl); htb_charge_class(q, cl, level, skb); } @@ -972,10 +956,10 @@ static void htb_reset(struct Qdisc *sch) for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { if (cl->level) - memset(&cl->un.inner, 0, sizeof(cl->un.inner)); + memset(&cl->inner, 0, sizeof(cl->inner)); else { - if (cl->un.leaf.q) - qdisc_reset(cl->un.leaf.q); + if (cl->leaf.q) + qdisc_reset(cl->leaf.q); } cl->prio_activity = 0; cl->cmode = HTB_CAN_SEND; @@ -1098,8 +1082,8 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, */ tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; tcm->tcm_handle = cl->common.classid; - if (!cl->level && cl->un.leaf.q) - tcm->tcm_info = cl->un.leaf.q->handle; + if (!cl->level && cl->leaf.q) + tcm->tcm_info = cl->leaf.q->handle; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) @@ -1142,9 +1126,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) }; __u32 qlen = 0; - if (!cl->level && cl->un.leaf.q) { - qlen = cl->un.leaf.q->q.qlen; - qs.backlog = cl->un.leaf.q->qstats.backlog; + if (!cl->level && cl->leaf.q) { + qlen = cl->leaf.q->q.qlen; + qs.backlog = cl->leaf.q->qstats.backlog; } cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), INT_MIN, INT_MAX); @@ -1172,14 +1156,14 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, cl->common.classid, extack)) == NULL) return -ENOBUFS; - *old = qdisc_replace(sch, new, &cl->un.leaf.q); + *old = qdisc_replace(sch, new, &cl->leaf.q); return 0; } static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg) { struct htb_class *cl = (struct htb_class *)arg; - return !cl->level ? cl->un.leaf.q : NULL; + return !cl->level ? cl->leaf.q : NULL; } static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) @@ -1205,15 +1189,15 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, { struct htb_class *parent = cl->parent; - WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity); + WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity); if (parent->cmode != HTB_CAN_SEND) htb_safe_rb_erase(&parent->pq_node, &q->hlevel[parent->level].wait_pq); parent->level = 0; - memset(&parent->un.inner, 0, sizeof(parent->un.inner)); - parent->un.leaf.q = new_q ? new_q : &noop_qdisc; + memset(&parent->inner, 0, sizeof(parent->inner)); + parent->leaf.q = new_q ? new_q : &noop_qdisc; parent->tokens = parent->buffer; parent->ctokens = parent->cbuffer; parent->t_c = ktime_get_ns(); @@ -1223,8 +1207,8 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) { if (!cl->level) { - WARN_ON(!cl->un.leaf.q); - qdisc_destroy(cl->un.leaf.q); + WARN_ON(!cl->leaf.q); + qdisc_put(cl->leaf.q); } gen_kill_estimator(&cl->rate_est); tcf_block_put(cl->block); @@ -1286,11 +1270,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) sch_tree_lock(sch); if (!cl->level) { - unsigned int qlen = cl->un.leaf.q->q.qlen; - unsigned int backlog = cl->un.leaf.q->qstats.backlog; + unsigned int qlen = cl->leaf.q->q.qlen; + unsigned int backlog = cl->leaf.q->qstats.backlog; - qdisc_reset(cl->un.leaf.q); - qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog); + qdisc_reset(cl->leaf.q); + qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog); } /* delete from hash and active; remainder in destroy_class */ @@ -1419,13 +1403,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, classid, NULL); sch_tree_lock(sch); if (parent && !parent->level) { - unsigned int qlen = parent->un.leaf.q->q.qlen; - unsigned int backlog = parent->un.leaf.q->qstats.backlog; + unsigned int qlen = parent->leaf.q->q.qlen; + unsigned int backlog = parent->leaf.q->qstats.backlog; /* turn parent into inner node */ - qdisc_reset(parent->un.leaf.q); - qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog); - qdisc_destroy(parent->un.leaf.q); + qdisc_reset(parent->leaf.q); + qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog); + qdisc_put(parent->leaf.q); if (parent->prio_activity) htb_deactivate(q, parent); @@ -1436,10 +1420,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, } parent->level = (parent->parent ? parent->parent->level : TC_HTB_MAXDEPTH) - 1; - memset(&parent->un.inner, 0, sizeof(parent->un.inner)); + memset(&parent->inner, 0, sizeof(parent->inner)); } /* leaf (we) needs elementary qdisc */ - cl->un.leaf.q = new_q ? new_q : &noop_qdisc; + cl->leaf.q = new_q ? new_q : &noop_qdisc; cl->common.classid = classid; cl->parent = parent; @@ -1455,8 +1439,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, qdisc_class_hash_insert(&q->clhash, &cl->common); if (parent) parent->children++; - if (cl->un.leaf.q != &noop_qdisc) - qdisc_hash_add(cl->un.leaf.q, true); + if (cl->leaf.q != &noop_qdisc) + qdisc_hash_add(cl->leaf.q, true); } else { if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, NULL, @@ -1478,7 +1462,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64); /* it used to be a nasty bug here, we have to check that node - * is really leaf before changing cl->un.leaf ! + * is really leaf before changing cl->leaf ! */ if (!cl->level) { u64 quantum = cl->rate.rate_bytes_ps; diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index d6b8ae4ed7a3..f20f3a0f8424 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -65,7 +65,7 @@ static void mq_destroy(struct Qdisc *sch) if (!priv->qdiscs) return; for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) - qdisc_destroy(priv->qdiscs[ntx]); + qdisc_put(priv->qdiscs[ntx]); kfree(priv->qdiscs); } @@ -119,7 +119,7 @@ static void mq_attach(struct Qdisc *sch) qdisc = priv->qdiscs[ntx]; old = dev_graft_qdisc(qdisc->dev_queue, qdisc); if (old) - qdisc_destroy(old); + qdisc_put(old); #ifdef CONFIG_NET_SCHED if (ntx < dev->real_num_tx_queues) qdisc_hash_add(qdisc, false); diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 0e9d761cdd80..d364e63c396d 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -40,7 +40,7 @@ static void mqprio_destroy(struct Qdisc *sch) for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) - qdisc_destroy(priv->qdiscs[ntx]); + qdisc_put(priv->qdiscs[ntx]); kfree(priv->qdiscs); } @@ -300,7 +300,7 @@ static void mqprio_attach(struct Qdisc *sch) qdisc = priv->qdiscs[ntx]; old = dev_graft_qdisc(qdisc->dev_queue, qdisc); if (old) - qdisc_destroy(old); + qdisc_put(old); if (ntx < dev->real_num_tx_queues) qdisc_hash_add(qdisc, false); } diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 1da7ea8de0ad..7410ce4d0321 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -175,7 +175,7 @@ multiq_destroy(struct Qdisc *sch) tcf_block_put(q->block); for (band = 0; band < q->bands; band++) - qdisc_destroy(q->queues[band]); + qdisc_put(q->queues[band]); kfree(q->queues); } @@ -204,7 +204,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt, q->queues[i] = &noop_qdisc; qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog); - qdisc_destroy(child); + qdisc_put(child); } } @@ -228,7 +228,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt, qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); - qdisc_destroy(old); + qdisc_put(old); } sch_tree_unlock(sch); } diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index ad18a2052416..57b3ad9394ad 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -412,16 +412,6 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, return segs; } -static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb) -{ - skb->next = qh->head; - - if (!qh->head) - qh->tail = skb; - qh->head = skb; - qh->qlen++; -} - /* * Insert one skb into qdisc. * Note: parent depends on return value to account for queue length. @@ -570,7 +560,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, cb->time_to_send = ktime_get_ns(); q->counter = 0; - netem_enqueue_skb_head(&sch->q, skb); + __qdisc_enqueue_head(skb, &sch->q); sch->qstats.requeues++; } @@ -578,7 +568,7 @@ finish_segs: if (segs) { while (segs) { skb2 = segs->next; - segs->next = NULL; + skb_mark_not_on_list(segs); qdisc_skb_cb(segs)->pkt_len = segs->len; last_len = segs->len; rc = qdisc_enqueue(segs, sch, to_free); @@ -1032,7 +1022,7 @@ static void netem_destroy(struct Qdisc *sch) qdisc_watchdog_cancel(&q->watchdog); if (q->qdisc) - qdisc_destroy(q->qdisc); + qdisc_put(q->qdisc); dist_free(q->delay_dist); dist_free(q->slot_dist); } diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index 18d30bb86881..d1429371592f 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -110,8 +110,8 @@ static bool drop_early(struct Qdisc *sch, u32 packet_size) /* If current delay is less than half of target, and * if drop prob is low already, disable early_drop */ - if ((q->vars.qdelay < q->params.target / 2) - && (q->vars.prob < MAX_PROB / 5)) + if ((q->vars.qdelay < q->params.target / 2) && + (q->vars.prob < MAX_PROB / 5)) return false; /* If we have fewer than 2 mtu-sized packets, disable drop_early, @@ -209,7 +209,8 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt, /* tupdate is in jiffies */ if (tb[TCA_PIE_TUPDATE]) - q->params.tupdate = usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE])); + q->params.tupdate = + usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE])); if (tb[TCA_PIE_LIMIT]) { u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]); @@ -247,7 +248,6 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt, static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb) { - struct pie_sched_data *q = qdisc_priv(sch); int qlen = sch->qstats.backlog; /* current queue size in bytes */ @@ -294,9 +294,9 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb) * dq_count to 0 to re-enter the if block when the next * packet is dequeued */ - if (qlen < QUEUE_THRESHOLD) + if (qlen < QUEUE_THRESHOLD) { q->vars.dq_count = DQCOUNT_INVALID; - else { + } else { q->vars.dq_count = 0; q->vars.dq_tstamp = psched_get_time(); } @@ -370,7 +370,7 @@ static void calculate_probability(struct Qdisc *sch) oldprob = q->vars.prob; /* to ensure we increase probability in steps of no more than 2% */ - if (delta > (s32) (MAX_PROB / (100 / 2)) && + if (delta > (s32)(MAX_PROB / (100 / 2)) && q->vars.prob >= MAX_PROB / 10) delta = (MAX_PROB / 100) * 2; @@ -405,7 +405,7 @@ static void calculate_probability(struct Qdisc *sch) * delay is 0 for 2 consecutive Tupdate periods. */ - if ((qdelay == 0) && (qdelay_old == 0) && update_prob) + if (qdelay == 0 && qdelay_old == 0 && update_prob) q->vars.prob = (q->vars.prob * 98) / 100; q->vars.qdelay = qdelay; @@ -419,8 +419,8 @@ static void calculate_probability(struct Qdisc *sch) */ if ((q->vars.qdelay < q->params.target / 2) && (q->vars.qdelay_old < q->params.target / 2) && - (q->vars.prob == 0) && - (q->vars.avg_dq_rate > 0)) + q->vars.prob == 0 && + q->vars.avg_dq_rate > 0) pie_vars_init(&q->vars); } @@ -437,7 +437,6 @@ static void pie_timer(struct timer_list *t) if (q->params.tupdate) mod_timer(&q->adapt_timer, jiffies + q->params.tupdate); spin_unlock(root_lock); - } static int pie_init(struct Qdisc *sch, struct nlattr *opt, @@ -469,15 +468,16 @@ static int pie_dump(struct Qdisc *sch, struct sk_buff *skb) struct nlattr *opts; opts = nla_nest_start(skb, TCA_OPTIONS); - if (opts == NULL) + if (!opts) goto nla_put_failure; /* convert target from pschedtime to us */ if (nla_put_u32(skb, TCA_PIE_TARGET, - ((u32) PSCHED_TICKS2NS(q->params.target)) / + ((u32)PSCHED_TICKS2NS(q->params.target)) / NSEC_PER_USEC) || nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) || - nla_put_u32(skb, TCA_PIE_TUPDATE, jiffies_to_usecs(q->params.tupdate)) || + nla_put_u32(skb, TCA_PIE_TUPDATE, + jiffies_to_usecs(q->params.tupdate)) || nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) || nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) || nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) || @@ -489,7 +489,6 @@ static int pie_dump(struct Qdisc *sch, struct sk_buff *skb) nla_put_failure: nla_nest_cancel(skb, opts); return -1; - } static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) @@ -497,7 +496,7 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) struct pie_sched_data *q = qdisc_priv(sch); struct tc_pie_xstats st = { .prob = q->vars.prob, - .delay = ((u32) PSCHED_TICKS2NS(q->vars.qdelay)) / + .delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) / NSEC_PER_USEC, /* unscale and return dq_rate in bytes per sec */ .avg_dq_rate = q->vars.avg_dq_rate * @@ -514,8 +513,7 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch) { - struct sk_buff *skb; - skb = qdisc_dequeue_head(sch); + struct sk_buff *skb = qdisc_dequeue_head(sch); if (!skb) return NULL; @@ -527,6 +525,7 @@ static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch) static void pie_reset(struct Qdisc *sch) { struct pie_sched_data *q = qdisc_priv(sch); + qdisc_reset_queue(sch); pie_vars_init(&q->vars); } @@ -534,6 +533,7 @@ static void pie_reset(struct Qdisc *sch) static void pie_destroy(struct Qdisc *sch) { struct pie_sched_data *q = qdisc_priv(sch); + q->params.tupdate = 0; del_timer_sync(&q->adapt_timer); } diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 222e53d3d27a..f8af98621179 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -175,7 +175,7 @@ prio_destroy(struct Qdisc *sch) tcf_block_put(q->block); prio_offload(sch, NULL); for (prio = 0; prio < q->bands; prio++) - qdisc_destroy(q->queues[prio]); + qdisc_put(q->queues[prio]); } static int prio_tune(struct Qdisc *sch, struct nlattr *opt, @@ -205,7 +205,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt, extack); if (!queues[i]) { while (i > oldbands) - qdisc_destroy(queues[--i]); + qdisc_put(queues[--i]); return -ENOMEM; } } @@ -220,7 +220,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt, qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog); - qdisc_destroy(child); + qdisc_put(child); } for (i = oldbands; i < q->bands; i++) { diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index bb1a9c11fc54..dc37c4ead439 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -526,7 +526,7 @@ set_change_agg: return 0; destroy_class: - qdisc_destroy(cl->qdisc); + qdisc_put(cl->qdisc); kfree(cl); return err; } @@ -537,7 +537,7 @@ static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl) qfq_rm_from_agg(q, cl); gen_kill_estimator(&cl->rate_est); - qdisc_destroy(cl->qdisc); + qdisc_put(cl->qdisc); kfree(cl); } diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 56c181c3feeb..3ce6c0a2c493 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -181,7 +181,7 @@ static void red_destroy(struct Qdisc *sch) del_timer_sync(&q->adapt_timer); red_offload(sch, false); - qdisc_destroy(q->qdisc); + qdisc_put(q->qdisc); } static const struct nla_policy red_policy[TCA_RED_MAX + 1] = { @@ -233,7 +233,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt, if (child) { qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, q->qdisc->qstats.backlog); - qdisc_destroy(q->qdisc); + qdisc_put(q->qdisc); q->qdisc = child; } diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 7cbdad8419b7..bab506b01a32 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -469,7 +469,7 @@ static void sfb_destroy(struct Qdisc *sch) struct sfb_sched_data *q = qdisc_priv(sch); tcf_block_put(q->block); - qdisc_destroy(q->qdisc); + qdisc_put(q->qdisc); } static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = { @@ -523,7 +523,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt, qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, q->qdisc->qstats.backlog); - qdisc_destroy(q->qdisc); + qdisc_put(q->qdisc); q->qdisc = child; q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval); diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c new file mode 100644 index 000000000000..206e4dbed12f --- /dev/null +++ b/net/sched/sch_taprio.c @@ -0,0 +1,962 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* net/sched/sch_taprio.c Time Aware Priority Scheduler + * + * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com> + * + */ + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/list.h> +#include <linux/errno.h> +#include <linux/skbuff.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <net/netlink.h> +#include <net/pkt_sched.h> +#include <net/pkt_cls.h> +#include <net/sch_generic.h> + +#define TAPRIO_ALL_GATES_OPEN -1 + +struct sched_entry { + struct list_head list; + + /* The instant that this entry "closes" and the next one + * should open, the qdisc will make some effort so that no + * packet leaves after this time. + */ + ktime_t close_time; + atomic_t budget; + int index; + u32 gate_mask; + u32 interval; + u8 command; +}; + +struct taprio_sched { + struct Qdisc **qdiscs; + struct Qdisc *root; + s64 base_time; + int clockid; + int picos_per_byte; /* Using picoseconds because for 10Gbps+ + * speeds it's sub-nanoseconds per byte + */ + size_t num_entries; + + /* Protects the update side of the RCU protected current_entry */ + spinlock_t current_entry_lock; + struct sched_entry __rcu *current_entry; + struct list_head entries; + ktime_t (*get_time)(void); + struct hrtimer advance_timer; +}; + +static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +{ + struct taprio_sched *q = qdisc_priv(sch); + struct Qdisc *child; + int queue; + + queue = skb_get_queue_mapping(skb); + + child = q->qdiscs[queue]; + if (unlikely(!child)) + return qdisc_drop(skb, sch, to_free); + + qdisc_qstats_backlog_inc(sch, skb); + sch->q.qlen++; + + return qdisc_enqueue(skb, child, to_free); +} + +static struct sk_buff *taprio_peek(struct Qdisc *sch) +{ + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + struct sched_entry *entry; + struct sk_buff *skb; + u32 gate_mask; + int i; + + rcu_read_lock(); + entry = rcu_dereference(q->current_entry); + gate_mask = entry ? entry->gate_mask : -1; + rcu_read_unlock(); + + if (!gate_mask) + return NULL; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct Qdisc *child = q->qdiscs[i]; + int prio; + u8 tc; + + if (unlikely(!child)) + continue; + + skb = child->ops->peek(child); + if (!skb) + continue; + + prio = skb->priority; + tc = netdev_get_prio_tc_map(dev, prio); + + if (!(gate_mask & BIT(tc))) + return NULL; + + return skb; + } + + return NULL; +} + +static inline int length_to_duration(struct taprio_sched *q, int len) +{ + return (len * q->picos_per_byte) / 1000; +} + +static struct sk_buff *taprio_dequeue(struct Qdisc *sch) +{ + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + struct sched_entry *entry; + struct sk_buff *skb; + u32 gate_mask; + int i; + + rcu_read_lock(); + entry = rcu_dereference(q->current_entry); + /* if there's no entry, it means that the schedule didn't + * start yet, so force all gates to be open, this is in + * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5 + * "AdminGateSates" + */ + gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN; + rcu_read_unlock(); + + if (!gate_mask) + return NULL; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct Qdisc *child = q->qdiscs[i]; + ktime_t guard; + int prio; + int len; + u8 tc; + + if (unlikely(!child)) + continue; + + skb = child->ops->peek(child); + if (!skb) + continue; + + prio = skb->priority; + tc = netdev_get_prio_tc_map(dev, prio); + + if (!(gate_mask & BIT(tc))) + continue; + + len = qdisc_pkt_len(skb); + guard = ktime_add_ns(q->get_time(), + length_to_duration(q, len)); + + /* In the case that there's no gate entry, there's no + * guard band ... + */ + if (gate_mask != TAPRIO_ALL_GATES_OPEN && + ktime_after(guard, entry->close_time)) + return NULL; + + /* ... and no budget. */ + if (gate_mask != TAPRIO_ALL_GATES_OPEN && + atomic_sub_return(len, &entry->budget) < 0) + return NULL; + + skb = child->ops->dequeue(child); + if (unlikely(!skb)) + return NULL; + + qdisc_bstats_update(sch, skb); + qdisc_qstats_backlog_dec(sch, skb); + sch->q.qlen--; + + return skb; + } + + return NULL; +} + +static bool should_restart_cycle(const struct taprio_sched *q, + const struct sched_entry *entry) +{ + WARN_ON(!entry); + + return list_is_last(&entry->list, &q->entries); +} + +static enum hrtimer_restart advance_sched(struct hrtimer *timer) +{ + struct taprio_sched *q = container_of(timer, struct taprio_sched, + advance_timer); + struct sched_entry *entry, *next; + struct Qdisc *sch = q->root; + ktime_t close_time; + + spin_lock(&q->current_entry_lock); + entry = rcu_dereference_protected(q->current_entry, + lockdep_is_held(&q->current_entry_lock)); + + /* This is the case that it's the first time that the schedule + * runs, so it only happens once per schedule. The first entry + * is pre-calculated during the schedule initialization. + */ + if (unlikely(!entry)) { + next = list_first_entry(&q->entries, struct sched_entry, + list); + close_time = next->close_time; + goto first_run; + } + + if (should_restart_cycle(q, entry)) + next = list_first_entry(&q->entries, struct sched_entry, + list); + else + next = list_next_entry(entry, list); + + close_time = ktime_add_ns(entry->close_time, next->interval); + + next->close_time = close_time; + atomic_set(&next->budget, + (next->interval * 1000) / q->picos_per_byte); + +first_run: + rcu_assign_pointer(q->current_entry, next); + spin_unlock(&q->current_entry_lock); + + hrtimer_set_expires(&q->advance_timer, close_time); + + rcu_read_lock(); + __netif_schedule(sch); + rcu_read_unlock(); + + return HRTIMER_RESTART; +} + +static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { + [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 }, + [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 }, + [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 }, + [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 }, +}; + +static const struct nla_policy entry_list_policy[TCA_TAPRIO_SCHED_MAX + 1] = { + [TCA_TAPRIO_SCHED_ENTRY] = { .type = NLA_NESTED }, +}; + +static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { + [TCA_TAPRIO_ATTR_PRIOMAP] = { + .len = sizeof(struct tc_mqprio_qopt) + }, + [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED }, + [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 }, + [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED }, + [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, +}; + +static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, + struct netlink_ext_ack *extack) +{ + u32 interval = 0; + + if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD]) + entry->command = nla_get_u8( + tb[TCA_TAPRIO_SCHED_ENTRY_CMD]); + + if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]) + entry->gate_mask = nla_get_u32( + tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]); + + if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]) + interval = nla_get_u32( + tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]); + + if (interval == 0) { + NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); + return -EINVAL; + } + + entry->interval = interval; + + return 0; +} + +static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry, + int index, struct netlink_ext_ack *extack) +{ + struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { }; + int err; + + err = nla_parse_nested(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n, + entry_policy, NULL); + if (err < 0) { + NL_SET_ERR_MSG(extack, "Could not parse nested entry"); + return -EINVAL; + } + + entry->index = index; + + return fill_sched_entry(tb, entry, extack); +} + +/* Returns the number of entries in case of success */ +static int parse_sched_single_entry(struct nlattr *n, + struct taprio_sched *q, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb_entry[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { }; + struct nlattr *tb_list[TCA_TAPRIO_SCHED_MAX + 1] = { }; + struct sched_entry *entry; + bool found = false; + u32 index; + int err; + + err = nla_parse_nested(tb_list, TCA_TAPRIO_SCHED_MAX, + n, entry_list_policy, NULL); + if (err < 0) { + NL_SET_ERR_MSG(extack, "Could not parse nested entry"); + return -EINVAL; + } + + if (!tb_list[TCA_TAPRIO_SCHED_ENTRY]) { + NL_SET_ERR_MSG(extack, "Single-entry must include an entry"); + return -EINVAL; + } + + err = nla_parse_nested(tb_entry, TCA_TAPRIO_SCHED_ENTRY_MAX, + tb_list[TCA_TAPRIO_SCHED_ENTRY], + entry_policy, NULL); + if (err < 0) { + NL_SET_ERR_MSG(extack, "Could not parse nested entry"); + return -EINVAL; + } + + if (!tb_entry[TCA_TAPRIO_SCHED_ENTRY_INDEX]) { + NL_SET_ERR_MSG(extack, "Entry must specify an index\n"); + return -EINVAL; + } + + index = nla_get_u32(tb_entry[TCA_TAPRIO_SCHED_ENTRY_INDEX]); + if (index >= q->num_entries) { + NL_SET_ERR_MSG(extack, "Index for single entry exceeds number of entries in schedule"); + return -EINVAL; + } + + list_for_each_entry(entry, &q->entries, list) { + if (entry->index == index) { + found = true; + break; + } + } + + if (!found) { + NL_SET_ERR_MSG(extack, "Could not find entry"); + return -ENOENT; + } + + err = fill_sched_entry(tb_entry, entry, extack); + if (err < 0) + return err; + + return q->num_entries; +} + +static int parse_sched_list(struct nlattr *list, + struct taprio_sched *q, + struct netlink_ext_ack *extack) +{ + struct nlattr *n; + int err, rem; + int i = 0; + + if (!list) + return -EINVAL; + + nla_for_each_nested(n, list, rem) { + struct sched_entry *entry; + + if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) { + NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'"); + continue; + } + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + NL_SET_ERR_MSG(extack, "Not enough memory for entry"); + return -ENOMEM; + } + + err = parse_sched_entry(n, entry, i, extack); + if (err < 0) { + kfree(entry); + return err; + } + + list_add_tail(&entry->list, &q->entries); + i++; + } + + q->num_entries = i; + + return i; +} + +/* Returns the number of entries in case of success */ +static int parse_taprio_opt(struct nlattr **tb, struct taprio_sched *q, + struct netlink_ext_ack *extack) +{ + int err = 0; + int clockid; + + if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] && + tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) + return -EINVAL; + + if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] && q->num_entries == 0) + return -EINVAL; + + if (q->clockid == -1 && !tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) + return -EINVAL; + + if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]) + q->base_time = nla_get_s64( + tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]); + + if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { + clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]); + + /* We only support static clockids and we don't allow + * for it to be modified after the first init. + */ + if (clockid < 0 || (q->clockid != -1 && q->clockid != clockid)) + return -EINVAL; + + q->clockid = clockid; + } + + if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]) + err = parse_sched_list( + tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], q, extack); + else if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) + err = parse_sched_single_entry( + tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY], q, extack); + + /* parse_sched_* return the number of entries in the schedule, + * a schedule with zero entries is an error. + */ + if (err == 0) { + NL_SET_ERR_MSG(extack, "The schedule should contain at least one entry"); + return -EINVAL; + } + + return err; +} + +static int taprio_parse_mqprio_opt(struct net_device *dev, + struct tc_mqprio_qopt *qopt, + struct netlink_ext_ack *extack) +{ + int i, j; + + if (!qopt) { + NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary"); + return -EINVAL; + } + + /* Verify num_tc is not out of max range */ + if (qopt->num_tc > TC_MAX_QUEUE) { + NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range"); + return -EINVAL; + } + + /* taprio imposes that traffic classes map 1:n to tx queues */ + if (qopt->num_tc > dev->num_tx_queues) { + NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues"); + return -EINVAL; + } + + /* Verify priority mapping uses valid tcs */ + for (i = 0; i < TC_BITMASK + 1; i++) { + if (qopt->prio_tc_map[i] >= qopt->num_tc) { + NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping"); + return -EINVAL; + } + } + + for (i = 0; i < qopt->num_tc; i++) { + unsigned int last = qopt->offset[i] + qopt->count[i]; + + /* Verify the queue count is in tx range being equal to the + * real_num_tx_queues indicates the last queue is in use. + */ + if (qopt->offset[i] >= dev->num_tx_queues || + !qopt->count[i] || + last > dev->real_num_tx_queues) { + NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping"); + return -EINVAL; + } + + /* Verify that the offset and counts do not overlap */ + for (j = i + 1; j < qopt->num_tc; j++) { + if (last > qopt->offset[j]) { + NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping"); + return -EINVAL; + } + } + } + + return 0; +} + +static ktime_t taprio_get_start_time(struct Qdisc *sch) +{ + struct taprio_sched *q = qdisc_priv(sch); + struct sched_entry *entry; + ktime_t now, base, cycle; + s64 n; + + base = ns_to_ktime(q->base_time); + cycle = 0; + + /* Calculate the cycle_time, by summing all the intervals. + */ + list_for_each_entry(entry, &q->entries, list) + cycle = ktime_add_ns(cycle, entry->interval); + + if (!cycle) + return base; + + now = q->get_time(); + + if (ktime_after(base, now)) + return base; + + /* Schedule the start time for the beginning of the next + * cycle. + */ + n = div64_s64(ktime_sub_ns(now, base), cycle); + + return ktime_add_ns(base, (n + 1) * cycle); +} + +static void taprio_start_sched(struct Qdisc *sch, ktime_t start) +{ + struct taprio_sched *q = qdisc_priv(sch); + struct sched_entry *first; + unsigned long flags; + + spin_lock_irqsave(&q->current_entry_lock, flags); + + first = list_first_entry(&q->entries, struct sched_entry, + list); + + first->close_time = ktime_add_ns(start, first->interval); + atomic_set(&first->budget, + (first->interval * 1000) / q->picos_per_byte); + rcu_assign_pointer(q->current_entry, NULL); + + spin_unlock_irqrestore(&q->current_entry_lock, flags); + + hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS); +} + +static int taprio_change(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { }; + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + struct tc_mqprio_qopt *mqprio = NULL; + struct ethtool_link_ksettings ecmd; + int i, err, size; + s64 link_speed; + ktime_t start; + + err = nla_parse_nested(tb, TCA_TAPRIO_ATTR_MAX, opt, + taprio_policy, extack); + if (err < 0) + return err; + + err = -EINVAL; + if (tb[TCA_TAPRIO_ATTR_PRIOMAP]) + mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]); + + err = taprio_parse_mqprio_opt(dev, mqprio, extack); + if (err < 0) + return err; + + /* A schedule with less than one entry is an error */ + size = parse_taprio_opt(tb, q, extack); + if (size < 0) + return size; + + hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS); + q->advance_timer.function = advance_sched; + + switch (q->clockid) { + case CLOCK_REALTIME: + q->get_time = ktime_get_real; + break; + case CLOCK_MONOTONIC: + q->get_time = ktime_get; + break; + case CLOCK_BOOTTIME: + q->get_time = ktime_get_boottime; + break; + case CLOCK_TAI: + q->get_time = ktime_get_clocktai; + break; + default: + return -ENOTSUPP; + } + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *dev_queue; + struct Qdisc *qdisc; + + dev_queue = netdev_get_tx_queue(dev, i); + qdisc = qdisc_create_dflt(dev_queue, + &pfifo_qdisc_ops, + TC_H_MAKE(TC_H_MAJ(sch->handle), + TC_H_MIN(i + 1)), + extack); + if (!qdisc) + return -ENOMEM; + + if (i < dev->real_num_tx_queues) + qdisc_hash_add(qdisc, false); + + q->qdiscs[i] = qdisc; + } + + if (mqprio) { + netdev_set_num_tc(dev, mqprio->num_tc); + for (i = 0; i < mqprio->num_tc; i++) + netdev_set_tc_queue(dev, i, + mqprio->count[i], + mqprio->offset[i]); + + /* Always use supplied priority mappings */ + for (i = 0; i < TC_BITMASK + 1; i++) + netdev_set_prio_tc_map(dev, i, + mqprio->prio_tc_map[i]); + } + + if (!__ethtool_get_link_ksettings(dev, &ecmd)) + link_speed = ecmd.base.speed; + else + link_speed = SPEED_1000; + + q->picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, + link_speed * 1000 * 1000); + + start = taprio_get_start_time(sch); + if (!start) + return 0; + + taprio_start_sched(sch, start); + + return 0; +} + +static void taprio_destroy(struct Qdisc *sch) +{ + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + struct sched_entry *entry, *n; + unsigned int i; + + hrtimer_cancel(&q->advance_timer); + + if (q->qdiscs) { + for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++) + qdisc_put(q->qdiscs[i]); + + kfree(q->qdiscs); + } + q->qdiscs = NULL; + + netdev_set_num_tc(dev, 0); + + list_for_each_entry_safe(entry, n, &q->entries, list) { + list_del(&entry->list); + kfree(entry); + } +} + +static int taprio_init(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + + INIT_LIST_HEAD(&q->entries); + spin_lock_init(&q->current_entry_lock); + + /* We may overwrite the configuration later */ + hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS); + + q->root = sch; + + /* We only support static clockids. Use an invalid value as default + * and get the valid one on taprio_change(). + */ + q->clockid = -1; + + if (sch->parent != TC_H_ROOT) + return -EOPNOTSUPP; + + if (!netif_is_multiqueue(dev)) + return -EOPNOTSUPP; + + /* pre-allocate qdisc, attachment can't fail */ + q->qdiscs = kcalloc(dev->num_tx_queues, + sizeof(q->qdiscs[0]), + GFP_KERNEL); + + if (!q->qdiscs) + return -ENOMEM; + + if (!opt) + return -EINVAL; + + return taprio_change(sch, opt, extack); +} + +static struct netdev_queue *taprio_queue_get(struct Qdisc *sch, + unsigned long cl) +{ + struct net_device *dev = qdisc_dev(sch); + unsigned long ntx = cl - 1; + + if (ntx >= dev->num_tx_queues) + return NULL; + + return netdev_get_tx_queue(dev, ntx); +} + +static int taprio_graft(struct Qdisc *sch, unsigned long cl, + struct Qdisc *new, struct Qdisc **old, + struct netlink_ext_ack *extack) +{ + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); + + if (!dev_queue) + return -EINVAL; + + if (dev->flags & IFF_UP) + dev_deactivate(dev); + + *old = q->qdiscs[cl - 1]; + q->qdiscs[cl - 1] = new; + + if (new) + new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; + + if (dev->flags & IFF_UP) + dev_activate(dev); + + return 0; +} + +static int dump_entry(struct sk_buff *msg, + const struct sched_entry *entry) +{ + struct nlattr *item; + + item = nla_nest_start(msg, TCA_TAPRIO_SCHED_ENTRY); + if (!item) + return -ENOSPC; + + if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index)) + goto nla_put_failure; + + if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command)) + goto nla_put_failure; + + if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, + entry->gate_mask)) + goto nla_put_failure; + + if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL, + entry->interval)) + goto nla_put_failure; + + return nla_nest_end(msg, item); + +nla_put_failure: + nla_nest_cancel(msg, item); + return -1; +} + +static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) +{ + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + struct tc_mqprio_qopt opt = { 0 }; + struct nlattr *nest, *entry_list; + struct sched_entry *entry; + unsigned int i; + + opt.num_tc = netdev_get_num_tc(dev); + memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map)); + + for (i = 0; i < netdev_get_num_tc(dev); i++) { + opt.count[i] = dev->tc_to_txq[i].count; + opt.offset[i] = dev->tc_to_txq[i].offset; + } + + nest = nla_nest_start(skb, TCA_OPTIONS); + if (!nest) + return -ENOSPC; + + if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt)) + goto options_error; + + if (nla_put_s64(skb, TCA_TAPRIO_ATTR_SCHED_BASE_TIME, + q->base_time, TCA_TAPRIO_PAD)) + goto options_error; + + if (nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid)) + goto options_error; + + entry_list = nla_nest_start(skb, TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST); + if (!entry_list) + goto options_error; + + list_for_each_entry(entry, &q->entries, list) { + if (dump_entry(skb, entry) < 0) + goto options_error; + } + + nla_nest_end(skb, entry_list); + + return nla_nest_end(skb, nest); + +options_error: + nla_nest_cancel(skb, nest); + return -1; +} + +static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) +{ + struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); + + if (!dev_queue) + return NULL; + + return dev_queue->qdisc_sleeping; +} + +static unsigned long taprio_find(struct Qdisc *sch, u32 classid) +{ + unsigned int ntx = TC_H_MIN(classid); + + if (!taprio_queue_get(sch, ntx)) + return 0; + return ntx; +} + +static int taprio_dump_class(struct Qdisc *sch, unsigned long cl, + struct sk_buff *skb, struct tcmsg *tcm) +{ + struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); + + tcm->tcm_parent = TC_H_ROOT; + tcm->tcm_handle |= TC_H_MIN(cl); + tcm->tcm_info = dev_queue->qdisc_sleeping->handle; + + return 0; +} + +static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, + struct gnet_dump *d) + __releases(d->lock) + __acquires(d->lock) +{ + struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); + + sch = dev_queue->qdisc_sleeping; + if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || + gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) + return -1; + return 0; +} + +static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) +{ + struct net_device *dev = qdisc_dev(sch); + unsigned long ntx; + + if (arg->stop) + return; + + arg->count = arg->skip; + for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { + if (arg->fn(sch, ntx + 1, arg) < 0) { + arg->stop = 1; + break; + } + arg->count++; + } +} + +static struct netdev_queue *taprio_select_queue(struct Qdisc *sch, + struct tcmsg *tcm) +{ + return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent)); +} + +static const struct Qdisc_class_ops taprio_class_ops = { + .graft = taprio_graft, + .leaf = taprio_leaf, + .find = taprio_find, + .walk = taprio_walk, + .dump = taprio_dump_class, + .dump_stats = taprio_dump_class_stats, + .select_queue = taprio_select_queue, +}; + +static struct Qdisc_ops taprio_qdisc_ops __read_mostly = { + .cl_ops = &taprio_class_ops, + .id = "taprio", + .priv_size = sizeof(struct taprio_sched), + .init = taprio_init, + .destroy = taprio_destroy, + .peek = taprio_peek, + .dequeue = taprio_dequeue, + .enqueue = taprio_enqueue, + .dump = taprio_dump, + .owner = THIS_MODULE, +}; + +static int __init taprio_module_init(void) +{ + return register_qdisc(&taprio_qdisc_ops); +} + +static void __exit taprio_module_exit(void) +{ + unregister_qdisc(&taprio_qdisc_ops); +} + +module_init(taprio_module_init); +module_exit(taprio_module_exit); +MODULE_LICENSE("GPL"); diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 6f74a426f159..942dcca09cf2 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -162,7 +162,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, nb = 0; while (segs) { nskb = segs->next; - segs->next = NULL; + skb_mark_not_on_list(segs); qdisc_skb_cb(segs)->pkt_len = segs->len; len += segs->len; ret = qdisc_enqueue(segs, q->qdisc, to_free); @@ -392,7 +392,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, if (child) { qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, q->qdisc->qstats.backlog); - qdisc_destroy(q->qdisc); + qdisc_put(q->qdisc); q->qdisc = child; } q->limit = qopt->limit; @@ -438,7 +438,7 @@ static void tbf_destroy(struct Qdisc *sch) struct tbf_sched_data *q = qdisc_priv(sch); qdisc_watchdog_cancel(&q->watchdog); - qdisc_destroy(q->qdisc); + qdisc_put(q->qdisc); } static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 0b427100b0d4..331cc734e3db 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -459,7 +459,7 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul * element in the queue, then count it towards * possible PD. */ - if (pos == ulpq->reasm.next) { + if (skb_queue_is_first(&ulpq->reasm, pos)) { pd_first = pos; pd_last = pos; pd_len = pos->len; diff --git a/net/socket.c b/net/socket.c index 01f3f8f32d6f..713dc4833d40 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1475,7 +1475,7 @@ int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen) sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { err = move_addr_to_kernel(umyaddr, addrlen, &address); - if (err >= 0) { + if (!err) { err = security_socket_bind(sock, (struct sockaddr *)&address, addrlen); diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 645c16052052..e65c3a8551e4 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -577,7 +577,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev, rcu_dereference_rtnl(orig_dev->tipc_ptr); if (likely(b && test_bit(0, &b->up) && (skb->pkt_type <= PACKET_MULTICAST))) { - skb->next = NULL; + skb_mark_not_on_list(skb); tipc_rcv(dev_net(b->pt.dev), skb, b); rcu_read_unlock(); return NET_RX_SUCCESS; diff --git a/net/tipc/msg.c b/net/tipc/msg.c index b61891054709..f48e5857210f 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -499,54 +499,56 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg, /** * tipc_msg_reverse(): swap source and destination addresses and add error code * @own_node: originating node id for reversed message - * @skb: buffer containing message to be reversed; may be replaced. + * @skb: buffer containing message to be reversed; will be consumed * @err: error code to be set in message, if any - * Consumes buffer at failure + * Replaces consumed buffer with new one when successful * Returns true if success, otherwise false */ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) { struct sk_buff *_skb = *skb; - struct tipc_msg *hdr; - struct tipc_msg ohdr; - int dlen; + struct tipc_msg *_hdr, *hdr; + int hlen, dlen; if (skb_linearize(_skb)) goto exit; - hdr = buf_msg(_skb); - dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE); - if (msg_dest_droppable(hdr)) + _hdr = buf_msg(_skb); + dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE); + hlen = msg_hdr_sz(_hdr); + + if (msg_dest_droppable(_hdr)) goto exit; - if (msg_errcode(hdr)) + if (msg_errcode(_hdr)) goto exit; - /* Take a copy of original header before altering message */ - memcpy(&ohdr, hdr, msg_hdr_sz(hdr)); - - /* Never return SHORT header; expand by replacing buffer if necessary */ - if (msg_short(hdr)) { - *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC); - if (!*skb) - goto exit; - memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen); - kfree_skb(_skb); - _skb = *skb; - hdr = buf_msg(_skb); - memcpy(hdr, &ohdr, BASIC_H_SIZE); - msg_set_hdr_sz(hdr, BASIC_H_SIZE); - } + /* Never return SHORT header */ + if (hlen == SHORT_H_SIZE) + hlen = BASIC_H_SIZE; + + /* Don't return data along with SYN+, - sender has a clone */ + if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD) + dlen = 0; + + /* Allocate new buffer to return */ + *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC); + if (!*skb) + goto exit; + memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr)); + memcpy((*skb)->data + hlen, msg_data(_hdr), dlen); - /* Now reverse the concerned fields */ + /* Build reverse header in new buffer */ + hdr = buf_msg(*skb); + msg_set_hdr_sz(hdr, hlen); msg_set_errcode(hdr, err); msg_set_non_seq(hdr, 0); - msg_set_origport(hdr, msg_destport(&ohdr)); - msg_set_destport(hdr, msg_origport(&ohdr)); - msg_set_destnode(hdr, msg_prevnode(&ohdr)); + msg_set_origport(hdr, msg_destport(_hdr)); + msg_set_destport(hdr, msg_origport(_hdr)); + msg_set_destnode(hdr, msg_prevnode(_hdr)); msg_set_prevnode(hdr, own_node); msg_set_orignode(hdr, own_node); - msg_set_size(hdr, msg_hdr_sz(hdr) + dlen); - skb_trim(_skb, msg_size(hdr)); + msg_set_size(hdr, hlen + dlen); skb_orphan(_skb); + kfree_skb(_skb); return true; exit: kfree_skb(_skb); @@ -554,6 +556,22 @@ exit: return false; } +bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy) +{ + struct sk_buff *skb, *_skb; + + skb_queue_walk(msg, skb) { + _skb = skb_clone(skb, GFP_ATOMIC); + if (!_skb) { + __skb_queue_purge(cpy); + pr_err_ratelimited("Failed to clone buffer chain\n"); + return false; + } + __skb_queue_tail(cpy, _skb); + } + return true; +} + /** * tipc_msg_lookup_dest(): try to find new destination for named message * @skb: the buffer containing the message. diff --git a/net/tipc/msg.h b/net/tipc/msg.h index a4e944d59394..a2879e6ec5b6 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -216,6 +216,16 @@ static inline void msg_set_non_seq(struct tipc_msg *m, u32 n) msg_set_bits(m, 0, 20, 1, n); } +static inline int msg_is_syn(struct tipc_msg *m) +{ + return msg_bits(m, 0, 17, 1); +} + +static inline void msg_set_syn(struct tipc_msg *m, u32 d) +{ + msg_set_bits(m, 0, 17, 1, d); +} + static inline int msg_dest_droppable(struct tipc_msg *m) { return msg_bits(m, 0, 19, 1); @@ -970,6 +980,7 @@ bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg, struct sk_buff_head *cpy); void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno, struct sk_buff *skb); +bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy); static inline u16 buf_seqno(struct sk_buff *skb) { diff --git a/net/tipc/node.h b/net/tipc/node.h index 48b3298a248d..03f5efb62cfb 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -45,6 +45,7 @@ /* Optional capabilities supported by this code version */ enum { + TIPC_SYN_BIT = (1), TIPC_BCAST_SYNCH = (1 << 1), TIPC_BCAST_STATE_NACK = (1 << 2), TIPC_BLOCK_FLOWCTL = (1 << 3), @@ -53,11 +54,12 @@ enum { TIPC_LINK_PROTO_SEQNO = (1 << 6) }; -#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \ - TIPC_BCAST_STATE_NACK | \ - TIPC_BCAST_RCAST | \ - TIPC_BLOCK_FLOWCTL | \ - TIPC_NODE_ID128 | \ +#define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ + TIPC_BCAST_SYNCH | \ + TIPC_BCAST_STATE_NACK | \ + TIPC_BCAST_RCAST | \ + TIPC_BLOCK_FLOWCTL | \ + TIPC_NODE_ID128 | \ TIPC_LINK_PROTO_SEQNO) #define INVALID_BEARER_ID -1 diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 49810fdff4c5..de09f514428c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -47,7 +47,7 @@ #include "netlink.h" #include "group.h" -#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ +#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */ #define TIPC_FWD_MSG 1 #define TIPC_MAX_PORT 0xffffffff @@ -80,7 +80,6 @@ struct sockaddr_pair { * @publications: list of publications for port * @blocking_link: address of the congested link we are currently sleeping on * @pub_count: total # of publications port has made during its lifetime - * @probing_state: * @conn_timeout: the time we can wait for an unresponded setup request * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue * @cong_link_cnt: number of congested links @@ -102,8 +101,8 @@ struct tipc_sock { struct list_head cong_links; struct list_head publications; u32 pub_count; - uint conn_timeout; atomic_t dupl_rcvcnt; + u16 conn_timeout; bool probe_unacked; u16 cong_link_cnt; u16 snt_unacked; @@ -507,6 +506,9 @@ static void __tipc_shutdown(struct socket *sock, int error) tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))); + /* Remove any pending SYN message */ + __skb_queue_purge(&sk->sk_write_queue); + /* Reject all unreceived messages, except on an active connection * (which disconnects locally & sends a 'FIN+' to peer). */ @@ -1329,6 +1331,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) tsk->conn_type = dest->addr.name.name.type; tsk->conn_instance = dest->addr.name.name.instance; } + msg_set_syn(hdr, 1); } seq = &dest->addr.nameseq; @@ -1371,6 +1374,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); if (unlikely(rc != dlen)) return rc; + if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) + return -ENOMEM; rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); if (unlikely(rc == -ELINKCONG)) { @@ -1490,6 +1495,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, struct net *net = sock_net(sk); struct tipc_msg *msg = &tsk->phdr; + msg_set_syn(msg, 0); msg_set_destnode(msg, peer_node); msg_set_destport(msg, peer_port); msg_set_type(msg, TIPC_CONN_MSG); @@ -1501,6 +1507,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); tsk->peer_caps = tipc_node_get_capabilities(net, peer_node); + __skb_queue_purge(&sk->sk_write_queue); if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) return; @@ -1971,91 +1978,90 @@ static void tipc_sk_proto_rcv(struct sock *sk, } /** - * tipc_filter_connect - Handle incoming message for a connection-based socket + * tipc_sk_filter_connect - check incoming message for a connection-based socket * @tsk: TIPC socket - * @skb: pointer to message buffer. Set to NULL if buffer is consumed - * - * Returns true if everything ok, false otherwise + * @skb: pointer to message buffer. + * Returns true if message should be added to receive queue, false otherwise */ static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb) { struct sock *sk = &tsk->sk; struct net *net = sock_net(sk); struct tipc_msg *hdr = buf_msg(skb); - u32 pport = msg_origport(hdr); - u32 pnode = msg_orignode(hdr); + bool con_msg = msg_connected(hdr); + u32 pport = tsk_peer_port(tsk); + u32 pnode = tsk_peer_node(tsk); + u32 oport = msg_origport(hdr); + u32 onode = msg_orignode(hdr); + int err = msg_errcode(hdr); + unsigned long delay; if (unlikely(msg_mcast(hdr))) return false; switch (sk->sk_state) { case TIPC_CONNECTING: - /* Accept only ACK or NACK message */ - if (unlikely(!msg_connected(hdr))) { - if (pport != tsk_peer_port(tsk) || - pnode != tsk_peer_node(tsk)) - return false; - - tipc_set_sk_state(sk, TIPC_DISCONNECTING); - sk->sk_err = ECONNREFUSED; - sk->sk_state_change(sk); - return true; - } - - if (unlikely(msg_errcode(hdr))) { - tipc_set_sk_state(sk, TIPC_DISCONNECTING); - sk->sk_err = ECONNREFUSED; - sk->sk_state_change(sk); - return true; - } - - if (unlikely(!msg_isdata(hdr))) { - tipc_set_sk_state(sk, TIPC_DISCONNECTING); - sk->sk_err = EINVAL; - sk->sk_state_change(sk); - return true; + /* Setup ACK */ + if (likely(con_msg)) { + if (err) + break; + tipc_sk_finish_conn(tsk, oport, onode); + msg_set_importance(&tsk->phdr, msg_importance(hdr)); + /* ACK+ message with data is added to receive queue */ + if (msg_data_sz(hdr)) + return true; + /* Empty ACK-, - wake up sleeping connect() and drop */ + sk->sk_data_ready(sk); + msg_set_dest_droppable(hdr, 1); + return false; } + /* Ignore connectionless message if not from listening socket */ + if (oport != pport || onode != pnode) + return false; - tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr)); - msg_set_importance(&tsk->phdr, msg_importance(hdr)); - - /* If 'ACK+' message, add to socket receive queue */ - if (msg_data_sz(hdr)) - return true; - - /* If empty 'ACK-' message, wake up sleeping connect() */ - sk->sk_data_ready(sk); + /* Rejected SYN */ + if (err != TIPC_ERR_OVERLOAD) + break; - /* 'ACK-' message is neither accepted nor rejected: */ - msg_set_dest_droppable(hdr, 1); + /* Prepare for new setup attempt if we have a SYN clone */ + if (skb_queue_empty(&sk->sk_write_queue)) + break; + get_random_bytes(&delay, 2); + delay %= (tsk->conn_timeout / 4); + delay = msecs_to_jiffies(delay + 100); + sk_reset_timer(sk, &sk->sk_timer, jiffies + delay); return false; - case TIPC_OPEN: case TIPC_DISCONNECTING: - break; + return false; case TIPC_LISTEN: /* Accept only SYN message */ - if (!msg_connected(hdr) && !(msg_errcode(hdr))) + if (!msg_is_syn(hdr) && + tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT) + return false; + if (!con_msg && !err) return true; - break; + return false; case TIPC_ESTABLISHED: /* Accept only connection-based messages sent by peer */ - if (unlikely(!tsk_peer_msg(tsk, hdr))) + if (likely(con_msg && !err && pport == oport && pnode == onode)) + return true; + if (!tsk_peer_msg(tsk, hdr)) return false; - - if (unlikely(msg_errcode(hdr))) { - tipc_set_sk_state(sk, TIPC_DISCONNECTING); - /* Let timer expire on it's own */ - tipc_node_remove_conn(net, tsk_peer_node(tsk), - tsk->portid); - sk->sk_state_change(sk); - } + if (!err) + return true; + tipc_set_sk_state(sk, TIPC_DISCONNECTING); + tipc_node_remove_conn(net, pnode, tsk->portid); + sk->sk_state_change(sk); return true; default: pr_err("Unknown sk_state %u\n", sk->sk_state); } - - return false; + /* Abort connection setup attempt */ + tipc_set_sk_state(sk, TIPC_DISCONNECTING); + sk->sk_err = ECONNREFUSED; + sk->sk_state_change(sk); + return true; } /** @@ -2557,43 +2563,78 @@ static int tipc_shutdown(struct socket *sock, int how) return res; } +static void tipc_sk_check_probing_state(struct sock *sk, + struct sk_buff_head *list) +{ + struct tipc_sock *tsk = tipc_sk(sk); + u32 pnode = tsk_peer_node(tsk); + u32 pport = tsk_peer_port(tsk); + u32 self = tsk_own_node(tsk); + u32 oport = tsk->portid; + struct sk_buff *skb; + + if (tsk->probe_unacked) { + tipc_set_sk_state(sk, TIPC_DISCONNECTING); + sk->sk_err = ECONNABORTED; + tipc_node_remove_conn(sock_net(sk), pnode, pport); + sk->sk_state_change(sk); + return; + } + /* Prepare new probe */ + skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0, + pnode, self, pport, oport, TIPC_OK); + if (skb) + __skb_queue_tail(list, skb); + tsk->probe_unacked = true; + sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); +} + +static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list) +{ + struct tipc_sock *tsk = tipc_sk(sk); + + /* Try again later if dest link is congested */ + if (tsk->cong_link_cnt) { + sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100)); + return; + } + /* Prepare SYN for retransmit */ + tipc_msg_skb_clone(&sk->sk_write_queue, list); +} + static void tipc_sk_timeout(struct timer_list *t) { struct sock *sk = from_timer(sk, t, sk_timer); struct tipc_sock *tsk = tipc_sk(sk); - u32 peer_port = tsk_peer_port(tsk); - u32 peer_node = tsk_peer_node(tsk); - u32 own_node = tsk_own_node(tsk); - u32 own_port = tsk->portid; - struct net *net = sock_net(sk); - struct sk_buff *skb = NULL; + u32 pnode = tsk_peer_node(tsk); + struct sk_buff_head list; + int rc = 0; + skb_queue_head_init(&list); bh_lock_sock(sk); - if (!tipc_sk_connected(sk)) - goto exit; /* Try again later if socket is busy */ if (sock_owned_by_user(sk)) { sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20); - goto exit; + bh_unlock_sock(sk); + return; } - if (tsk->probe_unacked) { - tipc_set_sk_state(sk, TIPC_DISCONNECTING); - tipc_node_remove_conn(net, peer_node, peer_port); - sk->sk_state_change(sk); - goto exit; - } - /* Send new probe */ - skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0, - peer_node, own_node, peer_port, own_port, - TIPC_OK); - tsk->probe_unacked = true; - sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); -exit: + if (sk->sk_state == TIPC_ESTABLISHED) + tipc_sk_check_probing_state(sk, &list); + else if (sk->sk_state == TIPC_CONNECTING) + tipc_sk_retry_connect(sk, &list); + bh_unlock_sock(sk); - if (skb) - tipc_node_xmit_skb(net, skb, peer_node, own_port); + + if (!skb_queue_empty(&list)) + rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid); + + /* SYN messages may cause link congestion */ + if (rc == -ELINKCONG) { + tipc_dest_push(&tsk->cong_links, pnode, 0); + tsk->cong_link_cnt = 1; + } sock_put(sk); } diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index 2627b5d812e9..d8956f7daac4 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -57,16 +57,12 @@ * @idr_lock: protect the connection identifier set * @idr_in_use: amount of allocated identifier entry * @net: network namspace instance - * @rcvbuf_cache: memory cache of server receive buffer + * @awork: accept work item * @rcv_wq: receive workqueue * @send_wq: send workqueue * @max_rcvbuf_size: maximum permitted receive message length - * @tipc_conn_new: callback will be called when new connection is incoming - * @tipc_conn_release: callback will be called before releasing the connection - * @tipc_conn_recvmsg: callback will be called when message arrives + * @listener: topsrv listener socket * @name: server name - * @imp: message importance - * @type: socket type */ struct tipc_topsrv { struct idr conn_idr; @@ -90,9 +86,7 @@ struct tipc_topsrv { * @server: pointer to connected server * @sub_list: lsit to all pertaing subscriptions * @sub_lock: lock protecting the subscription list - * @outqueue_lock: control access to the outqueue * @rwork: receive work item - * @rx_action: what to do when connection socket is active * @outqueue: pointer to first outbound message in queue * @outqueue_lock: control access to the outqueue * @swork: send work item diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 523622dc74f8..b428069a1b05 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -141,7 +141,6 @@ retry: size = sg->length; } - clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags); ctx->in_tcp_sendpages = false; ctx->sk_write_space(sk); @@ -193,15 +192,12 @@ int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, return rc; } -int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx, - int flags, long *timeo) +int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, + int flags) { struct scatterlist *sg; u16 offset; - if (!tls_is_partially_sent_record(ctx)) - return ctx->push_pending_record(sk, flags); - sg = ctx->partially_sent_record; offset = ctx->partially_sent_offset; @@ -209,9 +205,23 @@ int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx, return tls_push_sg(sk, ctx, sg, offset, flags); } +int tls_push_pending_closed_record(struct sock *sk, + struct tls_context *tls_ctx, + int flags, long *timeo) +{ + struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); + + if (tls_is_partially_sent_record(tls_ctx) || + !list_empty(&ctx->tx_list)) + return tls_tx_records(sk, flags); + else + return tls_ctx->push_pending_record(sk, flags); +} + static void tls_write_space(struct sock *sk) { struct tls_context *ctx = tls_get_ctx(sk); + struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); /* If in_tcp_sendpages call lower protocol write space handler * to ensure we wake up any waiting operations there. For example @@ -222,20 +232,11 @@ static void tls_write_space(struct sock *sk) return; } - if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { - gfp_t sk_allocation = sk->sk_allocation; - int rc; - long timeo = 0; - - sk->sk_allocation = GFP_ATOMIC; - rc = tls_push_pending_closed_record(sk, ctx, - MSG_DONTWAIT | - MSG_NOSIGNAL, - &timeo); - sk->sk_allocation = sk_allocation; - - if (rc < 0) - return; + /* Schedule the transmission if tx list is ready */ + if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) { + /* Schedule the transmission */ + if (!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) + schedule_delayed_work(&tx_ctx->tx_work.work, 0); } ctx->sk_write_space(sk); @@ -270,19 +271,6 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) if (!tls_complete_pending_work(sk, ctx, 0, &timeo)) tls_handle_open_record(sk, 0); - if (ctx->partially_sent_record) { - struct scatterlist *sg = ctx->partially_sent_record; - - while (1) { - put_page(sg_page(sg)); - sk_mem_uncharge(sk, sg->length); - - if (sg_is_last(sg)) - break; - sg++; - } - } - /* We need these for tls_sw_fallback handling of other packets */ if (ctx->tx_conf == TLS_SW) { kfree(ctx->tx.rec_seq); diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index b9c6ecfbcfea..aa9fdce272b6 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -43,12 +43,133 @@ #define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE +static int __skb_nsg(struct sk_buff *skb, int offset, int len, + unsigned int recursion_level) +{ + int start = skb_headlen(skb); + int i, chunk = start - offset; + struct sk_buff *frag_iter; + int elt = 0; + + if (unlikely(recursion_level >= 24)) + return -EMSGSIZE; + + if (chunk > 0) { + if (chunk > len) + chunk = len; + elt++; + len -= chunk; + if (len == 0) + return elt; + offset += chunk; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + int end; + + WARN_ON(start > offset + len); + + end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); + chunk = end - offset; + if (chunk > 0) { + if (chunk > len) + chunk = len; + elt++; + len -= chunk; + if (len == 0) + return elt; + offset += chunk; + } + start = end; + } + + if (unlikely(skb_has_frag_list(skb))) { + skb_walk_frags(skb, frag_iter) { + int end, ret; + + WARN_ON(start > offset + len); + + end = start + frag_iter->len; + chunk = end - offset; + if (chunk > 0) { + if (chunk > len) + chunk = len; + ret = __skb_nsg(frag_iter, offset - start, chunk, + recursion_level + 1); + if (unlikely(ret < 0)) + return ret; + elt += ret; + len -= chunk; + if (len == 0) + return elt; + offset += chunk; + } + start = end; + } + } + BUG_ON(len); + return elt; +} + +/* Return the number of scatterlist elements required to completely map the + * skb, or -EMSGSIZE if the recursion depth is exceeded. + */ +static int skb_nsg(struct sk_buff *skb, int offset, int len) +{ + return __skb_nsg(skb, offset, len, 0); +} + +static void tls_decrypt_done(struct crypto_async_request *req, int err) +{ + struct aead_request *aead_req = (struct aead_request *)req; + struct scatterlist *sgout = aead_req->dst; + struct tls_sw_context_rx *ctx; + struct tls_context *tls_ctx; + struct scatterlist *sg; + struct sk_buff *skb; + unsigned int pages; + int pending; + + skb = (struct sk_buff *)req->data; + tls_ctx = tls_get_ctx(skb->sk); + ctx = tls_sw_ctx_rx(tls_ctx); + pending = atomic_dec_return(&ctx->decrypt_pending); + + /* Propagate if there was an err */ + if (err) { + ctx->async_wait.err = err; + tls_err_abort(skb->sk, err); + } + + /* After using skb->sk to propagate sk through crypto async callback + * we need to NULL it again. + */ + skb->sk = NULL; + + /* Release the skb, pages and memory allocated for crypto req */ + kfree_skb(skb); + + /* Skip the first S/G entry as it points to AAD */ + for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) { + if (!sg) + break; + put_page(sg_page(sg)); + } + + kfree(aead_req); + + if (!pending && READ_ONCE(ctx->async_notify)) + complete(&ctx->async_wait.completion); +} + static int tls_do_decryption(struct sock *sk, + struct sk_buff *skb, struct scatterlist *sgin, struct scatterlist *sgout, char *iv_recv, size_t data_len, - struct aead_request *aead_req) + struct aead_request *aead_req, + bool async) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); @@ -59,10 +180,36 @@ static int tls_do_decryption(struct sock *sk, aead_request_set_crypt(aead_req, sgin, sgout, data_len + tls_ctx->rx.tag_size, (u8 *)iv_recv); - aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, &ctx->async_wait); - ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait); + if (async) { + /* Using skb->sk to push sk through to crypto async callback + * handler. This allows propagating errors up to the socket + * if needed. It _must_ be cleared in the async handler + * before kfree_skb is called. We _know_ skb->sk is NULL + * because it is a clone from strparser. + */ + skb->sk = sk; + aead_request_set_callback(aead_req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + tls_decrypt_done, skb); + atomic_inc(&ctx->decrypt_pending); + } else { + aead_request_set_callback(aead_req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &ctx->async_wait); + } + + ret = crypto_aead_decrypt(aead_req); + if (ret == -EINPROGRESS) { + if (async) + return ret; + + ret = crypto_wait_req(ret, &ctx->async_wait); + } + + if (async) + atomic_dec(&ctx->decrypt_pending); + return ret; } @@ -99,18 +246,19 @@ static void trim_both_sgl(struct sock *sk, int target_size) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); + struct tls_rec *rec = ctx->open_rec; - trim_sg(sk, ctx->sg_plaintext_data, - &ctx->sg_plaintext_num_elem, - &ctx->sg_plaintext_size, + trim_sg(sk, &rec->sg_plaintext_data[1], + &rec->sg_plaintext_num_elem, + &rec->sg_plaintext_size, target_size); if (target_size > 0) target_size += tls_ctx->tx.overhead_size; - trim_sg(sk, ctx->sg_encrypted_data, - &ctx->sg_encrypted_num_elem, - &ctx->sg_encrypted_size, + trim_sg(sk, &rec->sg_encrypted_data[1], + &rec->sg_encrypted_num_elem, + &rec->sg_encrypted_size, target_size); } @@ -118,33 +266,87 @@ static int alloc_encrypted_sg(struct sock *sk, int len) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); + struct tls_rec *rec = ctx->open_rec; int rc = 0; rc = sk_alloc_sg(sk, len, - ctx->sg_encrypted_data, 0, - &ctx->sg_encrypted_num_elem, - &ctx->sg_encrypted_size, 0); + &rec->sg_encrypted_data[1], 0, + &rec->sg_encrypted_num_elem, + &rec->sg_encrypted_size, 0); if (rc == -ENOSPC) - ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data); + rec->sg_encrypted_num_elem = + ARRAY_SIZE(rec->sg_encrypted_data) - 1; return rc; } -static int alloc_plaintext_sg(struct sock *sk, int len) +static int move_to_plaintext_sg(struct sock *sk, int required_size) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); - int rc = 0; + struct tls_rec *rec = ctx->open_rec; + struct scatterlist *plain_sg = &rec->sg_plaintext_data[1]; + struct scatterlist *enc_sg = &rec->sg_encrypted_data[1]; + int enc_sg_idx = 0; + int skip, len; + + if (rec->sg_plaintext_num_elem == MAX_SKB_FRAGS) + return -ENOSPC; + + /* We add page references worth len bytes from enc_sg at the + * end of plain_sg. It is guaranteed that sg_encrypted_data + * has enough required room (ensured by caller). + */ + len = required_size - rec->sg_plaintext_size; - rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0, - &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, - tls_ctx->pending_open_record_frags); + /* Skip initial bytes in sg_encrypted_data to be able + * to use same offset of both plain and encrypted data. + */ + skip = tls_ctx->tx.prepend_size + rec->sg_plaintext_size; - if (rc == -ENOSPC) - ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data); + while (enc_sg_idx < rec->sg_encrypted_num_elem) { + if (enc_sg[enc_sg_idx].length > skip) + break; - return rc; + skip -= enc_sg[enc_sg_idx].length; + enc_sg_idx++; + } + + /* unmark the end of plain_sg*/ + sg_unmark_end(plain_sg + rec->sg_plaintext_num_elem - 1); + + while (len) { + struct page *page = sg_page(&enc_sg[enc_sg_idx]); + int bytes = enc_sg[enc_sg_idx].length - skip; + int offset = enc_sg[enc_sg_idx].offset + skip; + + if (bytes > len) + bytes = len; + else + enc_sg_idx++; + + /* Skipping is required only one time */ + skip = 0; + + /* Increment page reference */ + get_page(page); + + sg_set_page(&plain_sg[rec->sg_plaintext_num_elem], page, + bytes, offset); + + sk_mem_charge(sk, bytes); + + len -= bytes; + rec->sg_plaintext_size += bytes; + + rec->sg_plaintext_num_elem++; + + if (rec->sg_plaintext_num_elem == MAX_SKB_FRAGS) + return -ENOSPC; + } + + return 0; } static void free_sg(struct sock *sk, struct scatterlist *sg, @@ -160,41 +362,192 @@ static void free_sg(struct sock *sk, struct scatterlist *sg, *sg_size = 0; } -static void tls_free_both_sg(struct sock *sk) +static void tls_free_open_rec(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); + struct tls_rec *rec = ctx->open_rec; - free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem, - &ctx->sg_encrypted_size); + /* Return if there is no open record */ + if (!rec) + return; - free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, - &ctx->sg_plaintext_size); + free_sg(sk, &rec->sg_encrypted_data[1], + &rec->sg_encrypted_num_elem, + &rec->sg_encrypted_size); + + free_sg(sk, &rec->sg_plaintext_data[1], + &rec->sg_plaintext_num_elem, + &rec->sg_plaintext_size); + + kfree(rec); } -static int tls_do_encryption(struct tls_context *tls_ctx, +int tls_tx_records(struct sock *sk, int flags) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); + struct tls_rec *rec, *tmp; + int tx_flags, rc = 0; + + if (tls_is_partially_sent_record(tls_ctx)) { + rec = list_first_entry(&ctx->tx_list, + struct tls_rec, list); + + if (flags == -1) + tx_flags = rec->tx_flags; + else + tx_flags = flags; + + rc = tls_push_partial_record(sk, tls_ctx, tx_flags); + if (rc) + goto tx_err; + + /* Full record has been transmitted. + * Remove the head of tx_list + */ + list_del(&rec->list); + free_sg(sk, &rec->sg_plaintext_data[1], + &rec->sg_plaintext_num_elem, &rec->sg_plaintext_size); + + kfree(rec); + } + + /* Tx all ready records */ + list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { + if (READ_ONCE(rec->tx_ready)) { + if (flags == -1) + tx_flags = rec->tx_flags; + else + tx_flags = flags; + + rc = tls_push_sg(sk, tls_ctx, + &rec->sg_encrypted_data[1], + 0, tx_flags); + if (rc) + goto tx_err; + + list_del(&rec->list); + free_sg(sk, &rec->sg_plaintext_data[1], + &rec->sg_plaintext_num_elem, + &rec->sg_plaintext_size); + + kfree(rec); + } else { + break; + } + } + +tx_err: + if (rc < 0 && rc != -EAGAIN) + tls_err_abort(sk, EBADMSG); + + return rc; +} + +static void tls_encrypt_done(struct crypto_async_request *req, int err) +{ + struct aead_request *aead_req = (struct aead_request *)req; + struct sock *sk = req->data; + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); + struct tls_rec *rec; + bool ready = false; + int pending; + + rec = container_of(aead_req, struct tls_rec, aead_req); + + rec->sg_encrypted_data[1].offset -= tls_ctx->tx.prepend_size; + rec->sg_encrypted_data[1].length += tls_ctx->tx.prepend_size; + + + /* Check if error is previously set on socket */ + if (err || sk->sk_err) { + rec = NULL; + + /* If err is already set on socket, return the same code */ + if (sk->sk_err) { + ctx->async_wait.err = sk->sk_err; + } else { + ctx->async_wait.err = err; + tls_err_abort(sk, err); + } + } + + if (rec) { + struct tls_rec *first_rec; + + /* Mark the record as ready for transmission */ + smp_store_mb(rec->tx_ready, true); + + /* If received record is at head of tx_list, schedule tx */ + first_rec = list_first_entry(&ctx->tx_list, + struct tls_rec, list); + if (rec == first_rec) + ready = true; + } + + pending = atomic_dec_return(&ctx->encrypt_pending); + + if (!pending && READ_ONCE(ctx->async_notify)) + complete(&ctx->async_wait.completion); + + if (!ready) + return; + + /* Schedule the transmission */ + if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) + schedule_delayed_work(&ctx->tx_work.work, 2); +} + +static int tls_do_encryption(struct sock *sk, + struct tls_context *tls_ctx, struct tls_sw_context_tx *ctx, struct aead_request *aead_req, size_t data_len) { + struct tls_rec *rec = ctx->open_rec; + struct scatterlist *plain_sg = rec->sg_plaintext_data; + struct scatterlist *enc_sg = rec->sg_encrypted_data; int rc; - ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size; - ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size; + /* Skip the first index as it contains AAD data */ + rec->sg_encrypted_data[1].offset += tls_ctx->tx.prepend_size; + rec->sg_encrypted_data[1].length -= tls_ctx->tx.prepend_size; + + /* If it is inplace crypto, then pass same SG list as both src, dst */ + if (rec->inplace_crypto) + plain_sg = enc_sg; aead_request_set_tfm(aead_req, ctx->aead_send); aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); - aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out, + aead_request_set_crypt(aead_req, plain_sg, enc_sg, data_len, tls_ctx->tx.iv); aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, &ctx->async_wait); + tls_encrypt_done, sk); - rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait); + /* Add the record in tx_list */ + list_add_tail((struct list_head *)&rec->list, &ctx->tx_list); + atomic_inc(&ctx->encrypt_pending); - ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size; - ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size; + rc = crypto_aead_encrypt(aead_req); + if (!rc || rc != -EINPROGRESS) { + atomic_dec(&ctx->encrypt_pending); + rec->sg_encrypted_data[1].offset -= tls_ctx->tx.prepend_size; + rec->sg_encrypted_data[1].length += tls_ctx->tx.prepend_size; + } + + if (!rc) { + WRITE_ONCE(rec->tx_ready, true); + } else if (rc != -EINPROGRESS) { + list_del(&rec->list); + return rc; + } + /* Unhook the record from context if encryption is not failure */ + ctx->open_rec = NULL; + tls_advance_record_sn(sk, &tls_ctx->tx); return rc; } @@ -203,53 +556,40 @@ static int tls_push_record(struct sock *sk, int flags, { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); + struct tls_rec *rec = ctx->open_rec; struct aead_request *req; int rc; - req = aead_request_alloc(ctx->aead_send, sk->sk_allocation); - if (!req) - return -ENOMEM; + if (!rec) + return 0; + + rec->tx_flags = flags; + req = &rec->aead_req; - sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1); - sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1); + sg_mark_end(rec->sg_plaintext_data + rec->sg_plaintext_num_elem); + sg_mark_end(rec->sg_encrypted_data + rec->sg_encrypted_num_elem); - tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size, + tls_make_aad(rec->aad_space, rec->sg_plaintext_size, tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size, record_type); tls_fill_prepend(tls_ctx, - page_address(sg_page(&ctx->sg_encrypted_data[0])) + - ctx->sg_encrypted_data[0].offset, - ctx->sg_plaintext_size, record_type); + page_address(sg_page(&rec->sg_encrypted_data[1])) + + rec->sg_encrypted_data[1].offset, + rec->sg_plaintext_size, record_type); tls_ctx->pending_open_record_frags = 0; - set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags); - rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size); - if (rc < 0) { - /* If we are called from write_space and - * we fail, we need to set this SOCK_NOSPACE - * to trigger another write_space in the future. - */ - set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); - goto out_req; - } + rc = tls_do_encryption(sk, tls_ctx, ctx, req, rec->sg_plaintext_size); + if (rc == -EINPROGRESS) + return -EINPROGRESS; - free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, - &ctx->sg_plaintext_size); - - ctx->sg_encrypted_num_elem = 0; - ctx->sg_encrypted_size = 0; - - /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */ - rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags); - if (rc < 0 && rc != -EAGAIN) + if (rc < 0) { tls_err_abort(sk, EBADMSG); + return rc; + } - tls_advance_record_sn(sk, &tls_ctx->tx); -out_req: - aead_request_free(req); - return rc; + return tls_tx_records(sk, flags); } static int tls_sw_push_pending_record(struct sock *sk, int flags) @@ -326,11 +666,12 @@ static int memcopy_from_iter(struct sock *sk, struct iov_iter *from, { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); - struct scatterlist *sg = ctx->sg_plaintext_data; + struct tls_rec *rec = ctx->open_rec; + struct scatterlist *sg = &rec->sg_plaintext_data[1]; int copy, i, rc = 0; for (i = tls_ctx->pending_open_record_frags; - i < ctx->sg_plaintext_num_elem; ++i) { + i < rec->sg_plaintext_num_elem; ++i) { copy = sg[i].length; if (copy_from_iter( page_address(sg_page(&sg[i])) + sg[i].offset, @@ -350,33 +691,79 @@ out: return rc; } -int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) +static struct tls_rec *get_rec(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); - int ret = 0; - int required_size; + struct tls_rec *rec; + int mem_size; + + /* Return if we already have an open record */ + if (ctx->open_rec) + return ctx->open_rec; + + mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send); + + rec = kzalloc(mem_size, sk->sk_allocation); + if (!rec) + return NULL; + + sg_init_table(&rec->sg_plaintext_data[0], + ARRAY_SIZE(rec->sg_plaintext_data)); + sg_init_table(&rec->sg_encrypted_data[0], + ARRAY_SIZE(rec->sg_encrypted_data)); + + sg_set_buf(&rec->sg_plaintext_data[0], rec->aad_space, + sizeof(rec->aad_space)); + sg_set_buf(&rec->sg_encrypted_data[0], rec->aad_space, + sizeof(rec->aad_space)); + + ctx->open_rec = rec; + rec->inplace_crypto = 1; + + return rec; +} + +int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) +{ long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); + struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send); + bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC; + unsigned char record_type = TLS_RECORD_TYPE_DATA; + bool is_kvec = msg->msg_iter.type & ITER_KVEC; bool eor = !(msg->msg_flags & MSG_MORE); size_t try_to_copy, copied = 0; - unsigned char record_type = TLS_RECORD_TYPE_DATA; - int record_room; + struct tls_rec *rec; + int required_size; + int num_async = 0; bool full_record; + int record_room; + int num_zc = 0; int orig_size; - bool is_kvec = msg->msg_iter.type & ITER_KVEC; + int ret = 0; if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) return -ENOTSUPP; lock_sock(sk); - if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo)) - goto send_end; + /* Wait till there is any pending write on socket */ + if (unlikely(sk->sk_write_pending)) { + ret = wait_on_pending_writer(sk, &timeo); + if (unlikely(ret)) + goto send_end; + } if (unlikely(msg->msg_controllen)) { ret = tls_proccess_cmsg(sk, msg, &record_type); - if (ret) - goto send_end; + if (ret) { + if (ret == -EINPROGRESS) + num_async++; + else if (ret != -EAGAIN) + goto send_end; + } } while (msg_data_left(msg)) { @@ -385,20 +772,27 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) goto send_end; } - orig_size = ctx->sg_plaintext_size; + rec = get_rec(sk); + if (!rec) { + ret = -ENOMEM; + goto send_end; + } + + orig_size = rec->sg_plaintext_size; full_record = false; try_to_copy = msg_data_left(msg); - record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size; + record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size; if (try_to_copy >= record_room) { try_to_copy = record_room; full_record = true; } - required_size = ctx->sg_plaintext_size + try_to_copy + + required_size = rec->sg_plaintext_size + try_to_copy + tls_ctx->tx.overhead_size; if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; + alloc_encrypted: ret = alloc_encrypted_sg(sk, required_size); if (ret) { @@ -409,50 +803,58 @@ alloc_encrypted: * actually allocated. The difference is due * to max sg elements limit */ - try_to_copy -= required_size - ctx->sg_encrypted_size; + try_to_copy -= required_size - rec->sg_encrypted_size; full_record = true; } - if (!is_kvec && (full_record || eor)) { + + if (!is_kvec && (full_record || eor) && !async_capable) { ret = zerocopy_from_iter(sk, &msg->msg_iter, - try_to_copy, &ctx->sg_plaintext_num_elem, - &ctx->sg_plaintext_size, - ctx->sg_plaintext_data, - ARRAY_SIZE(ctx->sg_plaintext_data), + try_to_copy, &rec->sg_plaintext_num_elem, + &rec->sg_plaintext_size, + &rec->sg_plaintext_data[1], + ARRAY_SIZE(rec->sg_plaintext_data) - 1, true); if (ret) goto fallback_to_reg_send; + rec->inplace_crypto = 0; + + num_zc++; copied += try_to_copy; ret = tls_push_record(sk, msg->msg_flags, record_type); - if (ret) - goto send_end; + if (ret) { + if (ret == -EINPROGRESS) + num_async++; + else if (ret != -EAGAIN) + goto send_end; + } continue; fallback_to_reg_send: - trim_sg(sk, ctx->sg_plaintext_data, - &ctx->sg_plaintext_num_elem, - &ctx->sg_plaintext_size, + trim_sg(sk, &rec->sg_plaintext_data[1], + &rec->sg_plaintext_num_elem, + &rec->sg_plaintext_size, orig_size); } - required_size = ctx->sg_plaintext_size + try_to_copy; -alloc_plaintext: - ret = alloc_plaintext_sg(sk, required_size); + required_size = rec->sg_plaintext_size + try_to_copy; + + ret = move_to_plaintext_sg(sk, required_size); if (ret) { if (ret != -ENOSPC) - goto wait_for_memory; + goto send_end; /* Adjust try_to_copy according to the amount that was * actually allocated. The difference is due * to max sg elements limit */ - try_to_copy -= required_size - ctx->sg_plaintext_size; + try_to_copy -= required_size - rec->sg_plaintext_size; full_record = true; - trim_sg(sk, ctx->sg_encrypted_data, - &ctx->sg_encrypted_num_elem, - &ctx->sg_encrypted_size, - ctx->sg_plaintext_size + + trim_sg(sk, &rec->sg_encrypted_data[1], + &rec->sg_encrypted_num_elem, + &rec->sg_encrypted_size, + rec->sg_plaintext_size + tls_ctx->tx.overhead_size); } @@ -462,13 +864,12 @@ alloc_plaintext: copied += try_to_copy; if (full_record || eor) { -push_record: ret = tls_push_record(sk, msg->msg_flags, record_type); if (ret) { - if (ret == -ENOMEM) - goto wait_for_memory; - - goto send_end; + if (ret == -EINPROGRESS) + num_async++; + else if (ret != -EAGAIN) + goto send_end; } } @@ -484,13 +885,33 @@ trim_sgl: goto send_end; } - if (tls_is_pending_closed_record(tls_ctx)) - goto push_record; - - if (ctx->sg_encrypted_size < required_size) + if (rec->sg_encrypted_size < required_size) goto alloc_encrypted; + } + + if (!num_async) { + goto send_end; + } else if (num_zc) { + /* Wait for pending encryptions to get completed */ + smp_store_mb(ctx->async_notify, true); + + if (atomic_read(&ctx->encrypt_pending)) + crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + else + reinit_completion(&ctx->async_wait.completion); + + WRITE_ONCE(ctx->async_notify, false); - goto alloc_plaintext; + if (ctx->async_wait.err) { + ret = ctx->async_wait.err; + copied = 0; + } + } + + /* Transmit if any encryptions have completed */ + if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { + cancel_delayed_work(&ctx->tx_work.work); + tls_tx_records(sk, msg->msg_flags); } send_end: @@ -503,16 +924,18 @@ send_end: int tls_sw_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { + long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); - int ret = 0; - long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); - bool eor; - size_t orig_size = size; unsigned char record_type = TLS_RECORD_TYPE_DATA; + size_t orig_size = size; struct scatterlist *sg; + struct tls_rec *rec; + int num_async = 0; bool full_record; int record_room; + int ret = 0; + bool eor; if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST)) @@ -525,8 +948,12 @@ int tls_sw_sendpage(struct sock *sk, struct page *page, sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); - if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo)) - goto sendpage_end; + /* Wait till there is any pending write on socket */ + if (unlikely(sk->sk_write_pending)) { + ret = wait_on_pending_writer(sk, &timeo); + if (unlikely(ret)) + goto sendpage_end; + } /* Call the sk_stream functions to manage the sndbuf mem. */ while (size > 0) { @@ -537,14 +964,20 @@ int tls_sw_sendpage(struct sock *sk, struct page *page, goto sendpage_end; } + rec = get_rec(sk); + if (!rec) { + ret = -ENOMEM; + goto sendpage_end; + } + full_record = false; - record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size; + record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size; copy = size; if (copy >= record_room) { copy = record_room; full_record = true; } - required_size = ctx->sg_plaintext_size + copy + + required_size = rec->sg_plaintext_size + copy + tls_ctx->tx.overhead_size; if (!sk_stream_memory_free(sk)) @@ -559,33 +992,33 @@ alloc_payload: * actually allocated. The difference is due * to max sg elements limit */ - copy -= required_size - ctx->sg_plaintext_size; + copy -= required_size - rec->sg_plaintext_size; full_record = true; } get_page(page); - sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem; + sg = &rec->sg_plaintext_data[1] + rec->sg_plaintext_num_elem; sg_set_page(sg, page, copy, offset); sg_unmark_end(sg); - ctx->sg_plaintext_num_elem++; + rec->sg_plaintext_num_elem++; sk_mem_charge(sk, copy); offset += copy; size -= copy; - ctx->sg_plaintext_size += copy; - tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem; + rec->sg_plaintext_size += copy; + tls_ctx->pending_open_record_frags = rec->sg_plaintext_num_elem; if (full_record || eor || - ctx->sg_plaintext_num_elem == - ARRAY_SIZE(ctx->sg_plaintext_data)) { -push_record: + rec->sg_plaintext_num_elem == + ARRAY_SIZE(rec->sg_plaintext_data) - 1) { + rec->inplace_crypto = 0; ret = tls_push_record(sk, flags, record_type); if (ret) { - if (ret == -ENOMEM) - goto wait_for_memory; - - goto sendpage_end; + if (ret == -EINPROGRESS) + num_async++; + else if (ret != -EAGAIN) + goto sendpage_end; } } continue; @@ -594,16 +1027,20 @@ wait_for_sndbuf: wait_for_memory: ret = sk_stream_wait_memory(sk, &timeo); if (ret) { - trim_both_sgl(sk, ctx->sg_plaintext_size); + trim_both_sgl(sk, rec->sg_plaintext_size); goto sendpage_end; } - if (tls_is_pending_closed_record(tls_ctx)) - goto push_record; - goto alloc_payload; } + if (num_async) { + /* Transmit if any encryptions have completed */ + if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { + cancel_delayed_work(&ctx->tx_work.work); + tls_tx_records(sk, flags); + } + } sendpage_end: if (orig_size > size) ret = orig_size - size; @@ -684,12 +1121,14 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1; else n_sgout = sg_nents(out_sg); + n_sgin = skb_nsg(skb, rxm->offset + tls_ctx->rx.prepend_size, + rxm->full_len - tls_ctx->rx.prepend_size); } else { n_sgout = 0; *zc = false; + n_sgin = skb_cow_data(skb, 0, &unused); } - n_sgin = skb_cow_data(skb, 0, &unused); if (n_sgin < 1) return -EBADMSG; @@ -769,7 +1208,10 @@ fallback_to_reg_recv: } /* Prepare and submit AEAD request */ - err = tls_do_decryption(sk, sgin, sgout, iv, data_len, aead_req); + err = tls_do_decryption(sk, skb, sgin, sgout, iv, + data_len, aead_req, *zc); + if (err == -EINPROGRESS) + return err; /* Release the pages in case iov was mapped to pages */ for (; pages > 0; pages--) @@ -794,8 +1236,12 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, #endif if (!ctx->decrypted) { err = decrypt_internal(sk, skb, dest, NULL, chunk, zc); - if (err < 0) + if (err < 0) { + if (err == -EINPROGRESS) + tls_advance_record_sn(sk, &tls_ctx->rx); + return err; + } } else { *zc = false; } @@ -823,18 +1269,20 @@ static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb, { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); - struct strp_msg *rxm = strp_msg(skb); - if (len < rxm->full_len) { - rxm->offset += len; - rxm->full_len -= len; + if (skb) { + struct strp_msg *rxm = strp_msg(skb); - return false; + if (len < rxm->full_len) { + rxm->offset += len; + rxm->full_len -= len; + return false; + } + kfree_skb(skb); } /* Finished with message */ ctx->recv_pkt = NULL; - kfree_skb(skb); __strp_unpause(&ctx->strp); return true; @@ -857,6 +1305,7 @@ int tls_sw_recvmsg(struct sock *sk, int target, err = 0; long timeo; bool is_kvec = msg->msg_iter.type & ITER_KVEC; + int num_async = 0; flags |= nonblock; @@ -869,6 +1318,7 @@ int tls_sw_recvmsg(struct sock *sk, timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); do { bool zc = false; + bool async = false; int chunk = 0; skb = tls_wait_data(sk, flags, timeo, &err); @@ -876,6 +1326,7 @@ int tls_sw_recvmsg(struct sock *sk, goto recv_end; rxm = strp_msg(skb); + if (!cmsg) { int cerr; @@ -902,26 +1353,39 @@ int tls_sw_recvmsg(struct sock *sk, err = decrypt_skb_update(sk, skb, &msg->msg_iter, &chunk, &zc); - if (err < 0) { + if (err < 0 && err != -EINPROGRESS) { tls_err_abort(sk, EBADMSG); goto recv_end; } + + if (err == -EINPROGRESS) { + async = true; + num_async++; + goto pick_next_record; + } + ctx->decrypted = true; } if (!zc) { chunk = min_t(unsigned int, rxm->full_len, len); + err = skb_copy_datagram_msg(skb, rxm->offset, msg, chunk); if (err < 0) goto recv_end; } +pick_next_record: copied += chunk; len -= chunk; if (likely(!(flags & MSG_PEEK))) { u8 control = ctx->control; + /* For async, drop current skb reference */ + if (async) + skb = NULL; + if (tls_sw_advance_skb(sk, skb, chunk)) { /* Return full control message to * userspace before trying to parse @@ -930,6 +1394,8 @@ int tls_sw_recvmsg(struct sock *sk, msg->msg_flags |= MSG_EOR; if (control != TLS_RECORD_TYPE_DATA) goto recv_end; + } else { + break; } } else { /* MSG_PEEK right now cannot look beyond current skb @@ -946,6 +1412,22 @@ int tls_sw_recvmsg(struct sock *sk, } while (len); recv_end: + if (num_async) { + /* Wait for all previously submitted records to be decrypted */ + smp_store_mb(ctx->async_notify, true); + if (atomic_read(&ctx->decrypt_pending)) { + err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + if (err) { + /* one of async decrypt failed */ + tls_err_abort(sk, err); + copied = 0; + } + } else { + reinit_completion(&ctx->async_wait.completion); + } + WRITE_ONCE(ctx->async_notify, false); + } + release_sock(sk); return copied ? : err; } @@ -1106,9 +1588,61 @@ void tls_sw_free_resources_tx(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); + struct tls_rec *rec, *tmp; + + /* Wait for any pending async encryptions to complete */ + smp_store_mb(ctx->async_notify, true); + if (atomic_read(&ctx->encrypt_pending)) + crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + + cancel_delayed_work_sync(&ctx->tx_work.work); + + /* Tx whatever records we can transmit and abandon the rest */ + tls_tx_records(sk, -1); + + /* Free up un-sent records in tx_list. First, free + * the partially sent record if any at head of tx_list. + */ + if (tls_ctx->partially_sent_record) { + struct scatterlist *sg = tls_ctx->partially_sent_record; + + while (1) { + put_page(sg_page(sg)); + sk_mem_uncharge(sk, sg->length); + + if (sg_is_last(sg)) + break; + sg++; + } + + tls_ctx->partially_sent_record = NULL; + + rec = list_first_entry(&ctx->tx_list, + struct tls_rec, list); + + free_sg(sk, &rec->sg_plaintext_data[1], + &rec->sg_plaintext_num_elem, + &rec->sg_plaintext_size); + + list_del(&rec->list); + kfree(rec); + } + + list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { + free_sg(sk, &rec->sg_encrypted_data[1], + &rec->sg_encrypted_num_elem, + &rec->sg_encrypted_size); + + free_sg(sk, &rec->sg_plaintext_data[1], + &rec->sg_plaintext_num_elem, + &rec->sg_plaintext_size); + + list_del(&rec->list); + kfree(rec); + } crypto_free_aead(ctx->aead_send); - tls_free_both_sg(sk); + tls_free_open_rec(sk); kfree(ctx); } @@ -1142,6 +1676,24 @@ void tls_sw_free_resources_rx(struct sock *sk) kfree(ctx); } +/* The work handler to transmitt the encrypted records in tx_list */ +static void tx_work_handler(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct tx_work *tx_work = container_of(delayed_work, + struct tx_work, work); + struct sock *sk = tx_work->sk; + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); + + if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) + return; + + lock_sock(sk); + tls_tx_records(sk, -1); + release_sock(sk); +} + int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) { struct tls_crypto_info *crypto_info; @@ -1191,6 +1743,9 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) crypto_info = &ctx->crypto_send.info; cctx = &ctx->tx; aead = &sw_ctx_tx->aead_send; + INIT_LIST_HEAD(&sw_ctx_tx->tx_list); + INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); + sw_ctx_tx->tx_work.sk = sk; } else { crypto_init_wait(&sw_ctx_rx->async_wait); crypto_info = &ctx->crypto_recv.info; @@ -1241,26 +1796,6 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) goto free_iv; } - if (sw_ctx_tx) { - sg_init_table(sw_ctx_tx->sg_encrypted_data, - ARRAY_SIZE(sw_ctx_tx->sg_encrypted_data)); - sg_init_table(sw_ctx_tx->sg_plaintext_data, - ARRAY_SIZE(sw_ctx_tx->sg_plaintext_data)); - - sg_init_table(sw_ctx_tx->sg_aead_in, 2); - sg_set_buf(&sw_ctx_tx->sg_aead_in[0], sw_ctx_tx->aad_space, - sizeof(sw_ctx_tx->aad_space)); - sg_unmark_end(&sw_ctx_tx->sg_aead_in[1]); - sg_chain(sw_ctx_tx->sg_aead_in, 2, - sw_ctx_tx->sg_plaintext_data); - sg_init_table(sw_ctx_tx->sg_aead_out, 2); - sg_set_buf(&sw_ctx_tx->sg_aead_out[0], sw_ctx_tx->aad_space, - sizeof(sw_ctx_tx->aad_space)); - sg_unmark_end(&sw_ctx_tx->sg_aead_out[1]); - sg_chain(sw_ctx_tx->sg_aead_out, 2, - sw_ctx_tx->sg_encrypted_data); - } - if (!*aead) { *aead = crypto_alloc_aead("gcm(aes)", 0, 0); if (IS_ERR(*aead)) { diff --git a/net/wireless/core.c b/net/wireless/core.c index a88551f3bc43..5bd01058b9e6 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1019,36 +1019,49 @@ void cfg80211_cqm_config_free(struct wireless_dev *wdev) wdev->cqm_config = NULL; } -void cfg80211_unregister_wdev(struct wireless_dev *wdev) +static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); ASSERT_RTNL(); - if (WARN_ON(wdev->netdev)) - return; - nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); list_del_rcu(&wdev->list); - synchronize_rcu(); + if (sync) + synchronize_rcu(); rdev->devlist_generation++; + cfg80211_mlme_purge_registrations(wdev); + switch (wdev->iftype) { case NL80211_IFTYPE_P2P_DEVICE: - cfg80211_mlme_purge_registrations(wdev); cfg80211_stop_p2p_device(rdev, wdev); break; case NL80211_IFTYPE_NAN: cfg80211_stop_nan(rdev, wdev); break; default: - WARN_ON_ONCE(1); break; } +#ifdef CONFIG_CFG80211_WEXT + kzfree(wdev->wext.keys); +#endif + /* only initialized if we have a netdev */ + if (wdev->netdev) + flush_work(&wdev->disconnect_wk); + cfg80211_cqm_config_free(wdev); } + +void cfg80211_unregister_wdev(struct wireless_dev *wdev) +{ + if (WARN_ON(wdev->netdev)) + return; + + __cfg80211_unregister_wdev(wdev, true); +} EXPORT_SYMBOL(cfg80211_unregister_wdev); static const struct device_type wiphy_type = { @@ -1153,6 +1166,30 @@ void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev, } EXPORT_SYMBOL(cfg80211_stop_iface); +void cfg80211_init_wdev(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev) +{ + mutex_init(&wdev->mtx); + INIT_LIST_HEAD(&wdev->event_list); + spin_lock_init(&wdev->event_lock); + INIT_LIST_HEAD(&wdev->mgmt_registrations); + spin_lock_init(&wdev->mgmt_registrations_lock); + + /* + * We get here also when the interface changes network namespaces, + * as it's registered into the new one, but we don't want it to + * change ID in that case. Checking if the ID is already assigned + * works, because 0 isn't considered a valid ID and the memory is + * 0-initialized. + */ + if (!wdev->identifier) + wdev->identifier = ++rdev->wdev_id; + list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list); + rdev->devlist_generation++; + + nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE); +} + static int cfg80211_netdev_notifier_call(struct notifier_block *nb, unsigned long state, void *ptr) { @@ -1178,23 +1215,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, * called within code protected by it when interfaces * are added with nl80211. */ - mutex_init(&wdev->mtx); - INIT_LIST_HEAD(&wdev->event_list); - spin_lock_init(&wdev->event_lock); - INIT_LIST_HEAD(&wdev->mgmt_registrations); - spin_lock_init(&wdev->mgmt_registrations_lock); - - /* - * We get here also when the interface changes network namespaces, - * as it's registered into the new one, but we don't want it to - * change ID in that case. Checking if the ID is already assigned - * works, because 0 isn't considered a valid ID and the memory is - * 0-initialized. - */ - if (!wdev->identifier) - wdev->identifier = ++rdev->wdev_id; - list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list); - rdev->devlist_generation++; /* can only change netns with wiphy */ dev->features |= NETIF_F_NETNS_LOCAL; @@ -1223,7 +1243,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk); - nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE); + cfg80211_init_wdev(rdev, wdev); break; case NETDEV_GOING_DOWN: cfg80211_leave(rdev, wdev); @@ -1238,7 +1258,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, list_for_each_entry_safe(pos, tmp, &rdev->sched_scan_req_list, list) { - if (WARN_ON(pos && pos->dev == wdev->netdev)) + if (WARN_ON(pos->dev == wdev->netdev)) cfg80211_stop_sched_scan_req(rdev, pos, false); } @@ -1302,17 +1322,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, * remove and clean it up. */ if (!list_empty(&wdev->list)) { - nl80211_notify_iface(rdev, wdev, - NL80211_CMD_DEL_INTERFACE); + __cfg80211_unregister_wdev(wdev, false); sysfs_remove_link(&dev->dev.kobj, "phy80211"); - list_del_rcu(&wdev->list); - rdev->devlist_generation++; - cfg80211_mlme_purge_registrations(wdev); -#ifdef CONFIG_CFG80211_WEXT - kzfree(wdev->wext.keys); -#endif - flush_work(&wdev->disconnect_wk); - cfg80211_cqm_config_free(wdev); } /* * synchronise (so that we won't find this netdev diff --git a/net/wireless/core.h b/net/wireless/core.h index 7f52ef569320..c61dbba8bf47 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -66,6 +66,7 @@ struct cfg80211_registered_device { /* protected by RTNL only */ int num_running_ifaces; int num_running_monitor_ifaces; + u64 cookie_counter; /* BSSes/scanning */ spinlock_t bss_lock; @@ -133,6 +134,16 @@ cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev) #endif } +static inline u64 cfg80211_assign_cookie(struct cfg80211_registered_device *rdev) +{ + u64 r = ++rdev->cookie_counter; + + if (WARN_ON(r == 0)) + r = ++rdev->cookie_counter; + + return r; +} + extern struct workqueue_struct *cfg80211_wq; extern struct list_head cfg80211_rdev_list; extern int cfg80211_rdev_list_generation; @@ -187,6 +198,9 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx); int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, struct net *net); +void cfg80211_init_wdev(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev); + static inline void wdev_lock(struct wireless_dev *wdev) __acquires(wdev) { diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c index e6bce1f130c9..b5e235573c8a 100644 --- a/net/wireless/lib80211_crypt_tkip.c +++ b/net/wireless/lib80211_crypt_tkip.c @@ -30,7 +30,7 @@ #include <net/iw_handler.h> #include <crypto/hash.h> -#include <crypto/skcipher.h> +#include <linux/crypto.h> #include <linux/crc32.h> #include <net/lib80211.h> @@ -64,9 +64,9 @@ struct lib80211_tkip_data { int key_idx; - struct crypto_skcipher *rx_tfm_arc4; + struct crypto_cipher *rx_tfm_arc4; struct crypto_shash *rx_tfm_michael; - struct crypto_skcipher *tx_tfm_arc4; + struct crypto_cipher *tx_tfm_arc4; struct crypto_shash *tx_tfm_michael; /* scratch buffers for virt_to_page() (crypto API) */ @@ -99,8 +99,7 @@ static void *lib80211_tkip_init(int key_idx) priv->key_idx = key_idx; - priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, - CRYPTO_ALG_ASYNC); + priv->tx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm_arc4)) { priv->tx_tfm_arc4 = NULL; goto fail; @@ -112,8 +111,7 @@ static void *lib80211_tkip_init(int key_idx) goto fail; } - priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, - CRYPTO_ALG_ASYNC); + priv->rx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm_arc4)) { priv->rx_tfm_arc4 = NULL; goto fail; @@ -130,9 +128,9 @@ static void *lib80211_tkip_init(int key_idx) fail: if (priv) { crypto_free_shash(priv->tx_tfm_michael); - crypto_free_skcipher(priv->tx_tfm_arc4); + crypto_free_cipher(priv->tx_tfm_arc4); crypto_free_shash(priv->rx_tfm_michael); - crypto_free_skcipher(priv->rx_tfm_arc4); + crypto_free_cipher(priv->rx_tfm_arc4); kfree(priv); } @@ -144,9 +142,9 @@ static void lib80211_tkip_deinit(void *priv) struct lib80211_tkip_data *_priv = priv; if (_priv) { crypto_free_shash(_priv->tx_tfm_michael); - crypto_free_skcipher(_priv->tx_tfm_arc4); + crypto_free_cipher(_priv->tx_tfm_arc4); crypto_free_shash(_priv->rx_tfm_michael); - crypto_free_skcipher(_priv->rx_tfm_arc4); + crypto_free_cipher(_priv->rx_tfm_arc4); } kfree(priv); } @@ -344,12 +342,10 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len, static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_tkip_data *tkey = priv; - SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4); int len; u8 rc4key[16], *pos, *icv; u32 crc; - struct scatterlist sg; - int err; + int i; if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -374,14 +370,10 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) icv[2] = crc >> 16; icv[3] = crc >> 24; - crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); - sg_init_one(&sg, pos, len + 4); - skcipher_request_set_tfm(req, tkey->tx_tfm_arc4); - skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL); - err = crypto_skcipher_encrypt(req); - skcipher_request_zero(req); - return err; + crypto_cipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); + for (i = 0; i < len + 4; i++) + crypto_cipher_encrypt_one(tkey->tx_tfm_arc4, pos + i, pos + i); + return 0; } /* @@ -400,7 +392,6 @@ static inline int tkip_replay_check(u32 iv32_n, u16 iv16_n, static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_tkip_data *tkey = priv; - SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4); u8 rc4key[16]; u8 keyidx, *pos; u32 iv32; @@ -408,9 +399,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) struct ieee80211_hdr *hdr; u8 icv[4]; u32 crc; - struct scatterlist sg; int plen; - int err; + int i; hdr = (struct ieee80211_hdr *)skb->data; @@ -463,18 +453,9 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) plen = skb->len - hdr_len - 12; - crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); - sg_init_one(&sg, pos, plen + 4); - skcipher_request_set_tfm(req, tkey->rx_tfm_arc4); - skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL); - err = crypto_skcipher_decrypt(req); - skcipher_request_zero(req); - if (err) { - net_dbg_ratelimited("TKIP: failed to decrypt received packet from %pM\n", - hdr->addr2); - return -7; - } + crypto_cipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); + for (i = 0; i < plen + 4; i++) + crypto_cipher_decrypt_one(tkey->rx_tfm_arc4, pos + i, pos + i); crc = ~crc32_le(~0, pos, plen); icv[0] = crc; @@ -660,9 +641,9 @@ static int lib80211_tkip_set_key(void *key, int len, u8 * seq, void *priv) struct lib80211_tkip_data *tkey = priv; int keyidx; struct crypto_shash *tfm = tkey->tx_tfm_michael; - struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4; + struct crypto_cipher *tfm2 = tkey->tx_tfm_arc4; struct crypto_shash *tfm3 = tkey->rx_tfm_michael; - struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4; + struct crypto_cipher *tfm4 = tkey->rx_tfm_arc4; keyidx = tkey->key_idx; memset(tkey, 0, sizeof(*tkey)); diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c index d05f58b0fd04..6015f6b542a6 100644 --- a/net/wireless/lib80211_crypt_wep.c +++ b/net/wireless/lib80211_crypt_wep.c @@ -22,7 +22,7 @@ #include <net/lib80211.h> -#include <crypto/skcipher.h> +#include <linux/crypto.h> #include <linux/crc32.h> MODULE_AUTHOR("Jouni Malinen"); @@ -35,8 +35,8 @@ struct lib80211_wep_data { u8 key[WEP_KEY_LEN + 1]; u8 key_len; u8 key_idx; - struct crypto_skcipher *tx_tfm; - struct crypto_skcipher *rx_tfm; + struct crypto_cipher *tx_tfm; + struct crypto_cipher *rx_tfm; }; static void *lib80211_wep_init(int keyidx) @@ -48,13 +48,13 @@ static void *lib80211_wep_init(int keyidx) goto fail; priv->key_idx = keyidx; - priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); + priv->tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm)) { priv->tx_tfm = NULL; goto fail; } - priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); + priv->rx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm)) { priv->rx_tfm = NULL; goto fail; @@ -66,8 +66,8 @@ static void *lib80211_wep_init(int keyidx) fail: if (priv) { - crypto_free_skcipher(priv->tx_tfm); - crypto_free_skcipher(priv->rx_tfm); + crypto_free_cipher(priv->tx_tfm); + crypto_free_cipher(priv->rx_tfm); kfree(priv); } return NULL; @@ -77,8 +77,8 @@ static void lib80211_wep_deinit(void *priv) { struct lib80211_wep_data *_priv = priv; if (_priv) { - crypto_free_skcipher(_priv->tx_tfm); - crypto_free_skcipher(_priv->rx_tfm); + crypto_free_cipher(_priv->tx_tfm); + crypto_free_cipher(_priv->rx_tfm); } kfree(priv); } @@ -129,12 +129,10 @@ static int lib80211_wep_build_iv(struct sk_buff *skb, int hdr_len, static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_wep_data *wep = priv; - SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm); u32 crc, klen, len; u8 *pos, *icv; - struct scatterlist sg; u8 key[WEP_KEY_LEN + 3]; - int err; + int i; /* other checks are in lib80211_wep_build_iv */ if (skb_tailroom(skb) < 4) @@ -162,14 +160,12 @@ static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) icv[2] = crc >> 16; icv[3] = crc >> 24; - crypto_skcipher_setkey(wep->tx_tfm, key, klen); - sg_init_one(&sg, pos, len + 4); - skcipher_request_set_tfm(req, wep->tx_tfm); - skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL); - err = crypto_skcipher_encrypt(req); - skcipher_request_zero(req); - return err; + crypto_cipher_setkey(wep->tx_tfm, key, klen); + + for (i = 0; i < len + 4; i++) + crypto_cipher_encrypt_one(wep->tx_tfm, pos + i, pos + i); + + return 0; } /* Perform WEP decryption on given buffer. Buffer includes whole WEP part of @@ -182,12 +178,10 @@ static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_wep_data *wep = priv; - SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm); u32 crc, klen, plen; u8 key[WEP_KEY_LEN + 3]; u8 keyidx, *pos, icv[4]; - struct scatterlist sg; - int err; + int i; if (skb->len < hdr_len + 8) return -1; @@ -208,15 +202,9 @@ static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) /* Apply RC4 to data and compute CRC32 over decrypted data */ plen = skb->len - hdr_len - 8; - crypto_skcipher_setkey(wep->rx_tfm, key, klen); - sg_init_one(&sg, pos, plen + 4); - skcipher_request_set_tfm(req, wep->rx_tfm); - skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL); - err = crypto_skcipher_decrypt(req); - skcipher_request_zero(req); - if (err) - return -7; + crypto_cipher_setkey(wep->rx_tfm, key, klen); + for (i = 0; i < plen + 4; i++) + crypto_cipher_decrypt_one(wep->rx_tfm, pos + i, pos + i); crc = ~crc32_le(~0, pos, plen); icv[0] = crc; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 176edfefcbaa..744b5851bbf9 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -200,7 +200,46 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info) return __cfg80211_rdev_from_attrs(netns, info->attrs); } +static int validate_ie_attr(const struct nlattr *attr, + struct netlink_ext_ack *extack) +{ + const u8 *pos; + int len; + + pos = nla_data(attr); + len = nla_len(attr); + + while (len) { + u8 elemlen; + + if (len < 2) + goto error; + len -= 2; + + elemlen = pos[1]; + if (elemlen > len) + goto error; + + len -= elemlen; + pos += 2 + elemlen; + } + + return 0; +error: + NL_SET_ERR_MSG_ATTR(extack, attr, "malformed information elements"); + return -EINVAL; +} + /* policy for the attributes */ +static const struct nla_policy +nl80211_ftm_responder_policy[NL80211_FTM_RESP_ATTR_MAX + 1] = { + [NL80211_FTM_RESP_ATTR_ENABLED] = { .type = NLA_FLAG, }, + [NL80211_FTM_RESP_ATTR_LCI] = { .type = NLA_BINARY, + .len = U8_MAX }, + [NL80211_FTM_RESP_ATTR_CIVICLOC] = { .type = NLA_BINARY, + .len = U8_MAX }, +}; + static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, @@ -213,14 +252,14 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_CENTER_FREQ1] = { .type = NLA_U32 }, [NL80211_ATTR_CENTER_FREQ2] = { .type = NLA_U32 }, - [NL80211_ATTR_WIPHY_RETRY_SHORT] = { .type = NLA_U8 }, - [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 }, + [NL80211_ATTR_WIPHY_RETRY_SHORT] = NLA_POLICY_MIN(NLA_U8, 1), + [NL80211_ATTR_WIPHY_RETRY_LONG] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_COVERAGE_CLASS] = { .type = NLA_U8 }, [NL80211_ATTR_WIPHY_DYN_ACK] = { .type = NLA_FLAG }, - [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 }, + [NL80211_ATTR_IFTYPE] = NLA_POLICY_MAX(NLA_U32, NL80211_IFTYPE_MAX), [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, @@ -230,24 +269,28 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, - [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 }, + [NL80211_ATTR_KEY_IDX] = NLA_POLICY_MAX(NLA_U8, 5), [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 }, [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG }, [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 }, - [NL80211_ATTR_KEY_TYPE] = { .type = NLA_U32 }, + [NL80211_ATTR_KEY_TYPE] = + NLA_POLICY_MAX(NLA_U32, NUM_NL80211_KEYTYPES), [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 }, [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 }, [NL80211_ATTR_BEACON_HEAD] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, - [NL80211_ATTR_BEACON_TAIL] = { .type = NLA_BINARY, - .len = IEEE80211_MAX_DATA_LEN }, - [NL80211_ATTR_STA_AID] = { .type = NLA_U16 }, + [NL80211_ATTR_BEACON_TAIL] = + NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr, + IEEE80211_MAX_DATA_LEN), + [NL80211_ATTR_STA_AID] = + NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_STA_FLAGS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_LISTEN_INTERVAL] = { .type = NLA_U16 }, [NL80211_ATTR_STA_SUPPORTED_RATES] = { .type = NLA_BINARY, .len = NL80211_MAX_SUPP_RATES }, - [NL80211_ATTR_STA_PLINK_ACTION] = { .type = NLA_U8 }, + [NL80211_ATTR_STA_PLINK_ACTION] = + NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_ACTIONS - 1), [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 }, [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ }, [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, @@ -270,8 +313,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_HT_CAPABILITY] = { .len = NL80211_HT_CAPABILITY_LEN }, [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 }, - [NL80211_ATTR_IE] = { .type = NLA_BINARY, - .len = IEEE80211_MAX_DATA_LEN }, + [NL80211_ATTR_IE] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, + validate_ie_attr, + IEEE80211_MAX_DATA_LEN), [NL80211_ATTR_SCAN_FREQUENCIES] = { .type = NLA_NESTED }, [NL80211_ATTR_SCAN_SSIDS] = { .type = NLA_NESTED }, @@ -281,7 +325,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_REASON_CODE] = { .type = NLA_U16 }, [NL80211_ATTR_FREQ_FIXED] = { .type = NLA_FLAG }, [NL80211_ATTR_TIMED_OUT] = { .type = NLA_FLAG }, - [NL80211_ATTR_USE_MFP] = { .type = NLA_U32 }, + [NL80211_ATTR_USE_MFP] = NLA_POLICY_RANGE(NLA_U32, + NL80211_MFP_NO, + NL80211_MFP_OPTIONAL), [NL80211_ATTR_STA_FLAGS2] = { .len = sizeof(struct nl80211_sta_flag_update), }, @@ -301,7 +347,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_FRAME] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, }, - [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 }, + [NL80211_ATTR_PS_STATE] = NLA_POLICY_RANGE(NLA_U32, + NL80211_PS_DISABLED, + NL80211_PS_ENABLED), [NL80211_ATTR_CQM] = { .type = NLA_NESTED, }, [NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG }, [NL80211_ATTR_AP_ISOLATE] = { .type = NLA_U8 }, @@ -314,15 +362,23 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_OFFCHANNEL_TX_OK] = { .type = NLA_FLAG }, [NL80211_ATTR_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED }, [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED }, - [NL80211_ATTR_STA_PLINK_STATE] = { .type = NLA_U8 }, + [NL80211_ATTR_STA_PLINK_STATE] = + NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_STATES - 1), + [NL80211_ATTR_MESH_PEER_AID] = + NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 }, [NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED }, [NL80211_ATTR_SCAN_SUPP_RATES] = { .type = NLA_NESTED }, - [NL80211_ATTR_HIDDEN_SSID] = { .type = NLA_U32 }, - [NL80211_ATTR_IE_PROBE_RESP] = { .type = NLA_BINARY, - .len = IEEE80211_MAX_DATA_LEN }, - [NL80211_ATTR_IE_ASSOC_RESP] = { .type = NLA_BINARY, - .len = IEEE80211_MAX_DATA_LEN }, + [NL80211_ATTR_HIDDEN_SSID] = + NLA_POLICY_RANGE(NLA_U32, + NL80211_HIDDEN_SSID_NOT_IN_USE, + NL80211_HIDDEN_SSID_ZERO_CONTENTS), + [NL80211_ATTR_IE_PROBE_RESP] = + NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr, + IEEE80211_MAX_DATA_LEN), + [NL80211_ATTR_IE_ASSOC_RESP] = + NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr, + IEEE80211_MAX_DATA_LEN), [NL80211_ATTR_ROAM_SUPPORT] = { .type = NLA_FLAG }, [NL80211_ATTR_SCHED_SCAN_MATCH] = { .type = NLA_NESTED }, [NL80211_ATTR_TX_NO_CCK_RATE] = { .type = NLA_FLAG }, @@ -348,9 +404,12 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_AUTH_DATA] = { .type = NLA_BINARY, }, [NL80211_ATTR_VHT_CAPABILITY] = { .len = NL80211_VHT_CAPABILITY_LEN }, [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, - [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 }, - [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 }, - [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 }, + [NL80211_ATTR_P2P_CTWINDOW] = NLA_POLICY_MAX(NLA_U8, 127), + [NL80211_ATTR_P2P_OPPPS] = NLA_POLICY_MAX(NLA_U8, 1), + [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = + NLA_POLICY_RANGE(NLA_U32, + NL80211_MESH_POWER_UNKNOWN + 1, + NL80211_MESH_POWER_MAX), [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 }, [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 }, @@ -363,7 +422,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_MDID] = { .type = NLA_U16 }, [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, - [NL80211_ATTR_PEER_AID] = { .type = NLA_U16 }, + [NL80211_ATTR_PEER_AID] = + NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 }, [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG }, [NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED }, @@ -384,8 +444,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_SOCKET_OWNER] = { .type = NLA_FLAG }, [NL80211_ATTR_CSA_C_OFFSETS_TX] = { .type = NLA_BINARY }, [NL80211_ATTR_USE_RRM] = { .type = NLA_FLAG }, - [NL80211_ATTR_TSID] = { .type = NLA_U8 }, - [NL80211_ATTR_USER_PRIO] = { .type = NLA_U8 }, + [NL80211_ATTR_TSID] = NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_TIDS - 1), + [NL80211_ATTR_USER_PRIO] = + NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1), [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 }, [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 }, [NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN }, @@ -395,12 +456,13 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG }, [NL80211_ATTR_PBSS] = { .type = NLA_FLAG }, [NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED }, - [NL80211_ATTR_STA_SUPPORT_P2P_PS] = { .type = NLA_U8 }, + [NL80211_ATTR_STA_SUPPORT_P2P_PS] = + NLA_POLICY_MAX(NLA_U8, NUM_NL80211_P2P_PS_STATUS - 1), [NL80211_ATTR_MU_MIMO_GROUP_DATA] = { .len = VHT_MUMIMO_GROUPS_DATA_LEN }, [NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = { .len = ETH_ALEN }, - [NL80211_ATTR_NAN_MASTER_PREF] = { .type = NLA_U8 }, + [NL80211_ATTR_NAN_MASTER_PREF] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_ATTR_BANDS] = { .type = NLA_U32 }, [NL80211_ATTR_NAN_FUNC] = { .type = NLA_NESTED }, [NL80211_ATTR_FILS_KEK] = { .type = NLA_BINARY, @@ -430,6 +492,11 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 }, [NL80211_ATTR_HE_CAPABILITY] = { .type = NLA_BINARY, .len = NL80211_HE_MAX_CAPABILITY_LEN }, + + [NL80211_ATTR_FTM_RESPONDER] = { + .type = NLA_NESTED, + .validation_data = nl80211_ftm_responder_policy, + }, }; /* policy for the key attributes */ @@ -440,7 +507,7 @@ static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = { [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 }, [NL80211_KEY_DEFAULT] = { .type = NLA_FLAG }, [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG }, - [NL80211_KEY_TYPE] = { .type = NLA_U32 }, + [NL80211_KEY_TYPE] = NLA_POLICY_MAX(NLA_U32, NUM_NL80211_KEYTYPES - 1), [NL80211_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED }, }; @@ -491,7 +558,10 @@ nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = { static const struct nla_policy nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = { [NL80211_ATTR_COALESCE_RULE_DELAY] = { .type = NLA_U32 }, - [NL80211_ATTR_COALESCE_RULE_CONDITION] = { .type = NLA_U32 }, + [NL80211_ATTR_COALESCE_RULE_CONDITION] = + NLA_POLICY_RANGE(NLA_U32, + NL80211_COALESCE_CONDITION_MATCH, + NL80211_COALESCE_CONDITION_NO_MATCH), [NL80211_ATTR_COALESCE_RULE_PKT_PATTERN] = { .type = NLA_NESTED }, }; @@ -567,8 +637,7 @@ nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = { [NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 }, }; -static int nl80211_prepare_wdev_dump(struct sk_buff *skb, - struct netlink_callback *cb, +static int nl80211_prepare_wdev_dump(struct netlink_callback *cb, struct cfg80211_registered_device **rdev, struct wireless_dev **wdev) { @@ -582,7 +651,7 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, return err; *wdev = __cfg80211_wdev_from_attrs( - sock_net(skb->sk), + sock_net(cb->skb->sk), genl_family_attrbuf(&nl80211_fam)); if (IS_ERR(*wdev)) return PTR_ERR(*wdev); @@ -614,36 +683,6 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, return 0; } -/* IE validation */ -static bool is_valid_ie_attr(const struct nlattr *attr) -{ - const u8 *pos; - int len; - - if (!attr) - return true; - - pos = nla_data(attr); - len = nla_len(attr); - - while (len) { - u8 elemlen; - - if (len < 2) - return false; - len -= 2; - - elemlen = pos[1]; - if (elemlen > len) - return false; - - len -= elemlen; - pos += 2 + elemlen; - } - - return true; -} - /* message building helper */ static inline void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq, int flags, u8 cmd) @@ -858,12 +897,8 @@ static int nl80211_parse_key_new(struct genl_info *info, struct nlattr *key, if (tb[NL80211_KEY_CIPHER]) k->p.cipher = nla_get_u32(tb[NL80211_KEY_CIPHER]); - if (tb[NL80211_KEY_TYPE]) { + if (tb[NL80211_KEY_TYPE]) k->type = nla_get_u32(tb[NL80211_KEY_TYPE]); - if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES) - return genl_err_attr(info, -EINVAL, - tb[NL80211_KEY_TYPE]); - } if (tb[NL80211_KEY_DEFAULT_TYPES]) { struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES]; @@ -910,13 +945,8 @@ static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k) if (k->defmgmt) k->def_multi = true; - if (info->attrs[NL80211_ATTR_KEY_TYPE]) { + if (info->attrs[NL80211_ATTR_KEY_TYPE]) k->type = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]); - if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES) { - GENL_SET_ERR_MSG(info, "key type out of range"); - return -EINVAL; - } - } if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) { struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES]; @@ -2292,12 +2322,14 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev, struct genl_info *info, struct cfg80211_chan_def *chandef) { + struct netlink_ext_ack *extack = info->extack; + struct nlattr **attrs = info->attrs; u32 control_freq; - if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) + if (!attrs[NL80211_ATTR_WIPHY_FREQ]) return -EINVAL; - control_freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); + control_freq = nla_get_u32(attrs[NL80211_ATTR_WIPHY_FREQ]); chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq); chandef->width = NL80211_CHAN_WIDTH_20_NOHT; @@ -2305,14 +2337,16 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev, chandef->center_freq2 = 0; /* Primary channel not allowed */ - if (!chandef->chan || chandef->chan->flags & IEEE80211_CHAN_DISABLED) + if (!chandef->chan || chandef->chan->flags & IEEE80211_CHAN_DISABLED) { + NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_WIPHY_FREQ], + "Channel is disabled"); return -EINVAL; + } - if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { + if (attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { enum nl80211_channel_type chantype; - chantype = nla_get_u32( - info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]); + chantype = nla_get_u32(attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]); switch (chantype) { case NL80211_CHAN_NO_HT: @@ -2322,42 +2356,56 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev, cfg80211_chandef_create(chandef, chandef->chan, chantype); /* user input for center_freq is incorrect */ - if (info->attrs[NL80211_ATTR_CENTER_FREQ1] && - chandef->center_freq1 != nla_get_u32( - info->attrs[NL80211_ATTR_CENTER_FREQ1])) + if (attrs[NL80211_ATTR_CENTER_FREQ1] && + chandef->center_freq1 != nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1])) { + NL_SET_ERR_MSG_ATTR(extack, + attrs[NL80211_ATTR_CENTER_FREQ1], + "bad center frequency 1"); return -EINVAL; + } /* center_freq2 must be zero */ - if (info->attrs[NL80211_ATTR_CENTER_FREQ2] && - nla_get_u32(info->attrs[NL80211_ATTR_CENTER_FREQ2])) + if (attrs[NL80211_ATTR_CENTER_FREQ2] && + nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ2])) { + NL_SET_ERR_MSG_ATTR(extack, + attrs[NL80211_ATTR_CENTER_FREQ2], + "center frequency 2 can't be used"); return -EINVAL; + } break; default: + NL_SET_ERR_MSG_ATTR(extack, + attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE], + "invalid channel type"); return -EINVAL; } - } else if (info->attrs[NL80211_ATTR_CHANNEL_WIDTH]) { + } else if (attrs[NL80211_ATTR_CHANNEL_WIDTH]) { chandef->width = - nla_get_u32(info->attrs[NL80211_ATTR_CHANNEL_WIDTH]); - if (info->attrs[NL80211_ATTR_CENTER_FREQ1]) + nla_get_u32(attrs[NL80211_ATTR_CHANNEL_WIDTH]); + if (attrs[NL80211_ATTR_CENTER_FREQ1]) chandef->center_freq1 = - nla_get_u32( - info->attrs[NL80211_ATTR_CENTER_FREQ1]); - if (info->attrs[NL80211_ATTR_CENTER_FREQ2]) + nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1]); + if (attrs[NL80211_ATTR_CENTER_FREQ2]) chandef->center_freq2 = - nla_get_u32( - info->attrs[NL80211_ATTR_CENTER_FREQ2]); + nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ2]); } - if (!cfg80211_chandef_valid(chandef)) + if (!cfg80211_chandef_valid(chandef)) { + NL_SET_ERR_MSG(extack, "invalid channel definition"); return -EINVAL; + } if (!cfg80211_chandef_usable(&rdev->wiphy, chandef, - IEEE80211_CHAN_DISABLED)) + IEEE80211_CHAN_DISABLED)) { + NL_SET_ERR_MSG(extack, "(extension) channel is disabled"); return -EINVAL; + } if ((chandef->width == NL80211_CHAN_WIDTH_5 || chandef->width == NL80211_CHAN_WIDTH_10) && - !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ)) + !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ)) { + NL_SET_ERR_MSG(extack, "5/10 MHz not supported"); return -EINVAL; + } return 0; } @@ -2617,8 +2665,6 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) { retry_short = nla_get_u8( info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]); - if (retry_short == 0) - return -EINVAL; changed |= WIPHY_PARAM_RETRY_SHORT; } @@ -2626,8 +2672,6 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]) { retry_long = nla_get_u8( info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]); - if (retry_long == 0) - return -EINVAL; changed |= WIPHY_PARAM_RETRY_LONG; } @@ -3119,8 +3163,6 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); if (otype != ntype) change = true; - if (ntype > NL80211_IFTYPE_MAX) - return -EINVAL; } if (info->attrs[NL80211_ATTR_MESH_ID]) { @@ -3185,11 +3227,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) if (!info->attrs[NL80211_ATTR_IFNAME]) return -EINVAL; - if (info->attrs[NL80211_ATTR_IFTYPE]) { + if (info->attrs[NL80211_ATTR_IFTYPE]) type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); - if (type > NL80211_IFTYPE_MAX) - return -EINVAL; - } if (!rdev->ops->add_virtual_intf || !(rdev->wiphy.interface_modes & (1 << type))) @@ -3252,15 +3291,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) * P2P Device and NAN do not have a netdev, so don't go * through the netdev notifier and must be added here */ - mutex_init(&wdev->mtx); - INIT_LIST_HEAD(&wdev->event_list); - spin_lock_init(&wdev->event_lock); - INIT_LIST_HEAD(&wdev->mgmt_registrations); - spin_lock_init(&wdev->mgmt_registrations_lock); - - wdev->identifier = ++rdev->wdev_id; - list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list); - rdev->devlist_generation++; + cfg80211_init_wdev(rdev, wdev); break; default: break; @@ -3272,15 +3303,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) return -ENOBUFS; } - /* - * For wdevs which have no associated netdev object (e.g. of type - * NL80211_IFTYPE_P2P_DEVICE), emit the NEW_INTERFACE event here. - * For all other types, the event will be generated from the - * netdev notifier - */ - if (!wdev->netdev) - nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE); - return genlmsg_reply(msg, info); } @@ -3359,7 +3381,7 @@ static void get_key_callback(void *c, struct key_params *params) params->cipher))) goto nla_put_failure; - if (nla_put_u8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx)) + if (nla_put_u8(cookie->msg, NL80211_KEY_IDX, cookie->idx)) goto nla_put_failure; nla_nest_end(cookie->msg, key); @@ -3386,9 +3408,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_KEY_IDX]) key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); - if (key_idx > 5) - return -EINVAL; - if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); @@ -3396,8 +3415,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_KEY_TYPE]) { u32 kt = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]); - if (kt >= NUM_NL80211_KEYTYPES) - return -EINVAL; if (kt != NL80211_KEYTYPE_GROUP && kt != NL80211_KEYTYPE_PAIRWISE) return -EINVAL; @@ -3998,16 +4015,12 @@ static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev, return 0; } -static int nl80211_parse_beacon(struct nlattr *attrs[], +static int nl80211_parse_beacon(struct cfg80211_registered_device *rdev, + struct nlattr *attrs[], struct cfg80211_beacon_data *bcn) { bool haveinfo = false; - - if (!is_valid_ie_attr(attrs[NL80211_ATTR_BEACON_TAIL]) || - !is_valid_ie_attr(attrs[NL80211_ATTR_IE]) || - !is_valid_ie_attr(attrs[NL80211_ATTR_IE_PROBE_RESP]) || - !is_valid_ie_attr(attrs[NL80211_ATTR_IE_ASSOC_RESP])) - return -EINVAL; + int err; memset(bcn, 0, sizeof(*bcn)); @@ -4052,6 +4065,35 @@ static int nl80211_parse_beacon(struct nlattr *attrs[], bcn->probe_resp_len = nla_len(attrs[NL80211_ATTR_PROBE_RESP]); } + if (attrs[NL80211_ATTR_FTM_RESPONDER]) { + struct nlattr *tb[NL80211_FTM_RESP_ATTR_MAX + 1]; + + err = nla_parse_nested(tb, NL80211_FTM_RESP_ATTR_MAX, + attrs[NL80211_ATTR_FTM_RESPONDER], + NULL, NULL); + if (err) + return err; + + if (tb[NL80211_FTM_RESP_ATTR_ENABLED] && + wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER)) + bcn->ftm_responder = 1; + else + return -EOPNOTSUPP; + + if (tb[NL80211_FTM_RESP_ATTR_LCI]) { + bcn->lci = nla_data(tb[NL80211_FTM_RESP_ATTR_LCI]); + bcn->lci_len = nla_len(tb[NL80211_FTM_RESP_ATTR_LCI]); + } + + if (tb[NL80211_FTM_RESP_ATTR_CIVICLOC]) { + bcn->civicloc = nla_data(tb[NL80211_FTM_RESP_ATTR_CIVICLOC]); + bcn->civicloc_len = nla_len(tb[NL80211_FTM_RESP_ATTR_CIVICLOC]); + } + } else { + bcn->ftm_responder = -1; + } + return 0; } @@ -4096,6 +4138,9 @@ static void nl80211_calculate_ap_params(struct cfg80211_ap_settings *params) cap = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, ies, ies_len); if (cap && cap[1] >= sizeof(*params->vht_cap)) params->vht_cap = (void *)(cap + 2); + cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ies, ies_len); + if (cap && cap[1] >= sizeof(*params->he_cap) + 1) + params->he_cap = (void *)(cap + 3); } static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev, @@ -4195,7 +4240,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) !info->attrs[NL80211_ATTR_BEACON_HEAD]) return -EINVAL; - err = nl80211_parse_beacon(info->attrs, ¶ms.beacon); + err = nl80211_parse_beacon(rdev, info->attrs, ¶ms.beacon); if (err) return err; @@ -4225,14 +4270,9 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } - if (info->attrs[NL80211_ATTR_HIDDEN_SSID]) { + if (info->attrs[NL80211_ATTR_HIDDEN_SSID]) params.hidden_ssid = nla_get_u32( info->attrs[NL80211_ATTR_HIDDEN_SSID]); - if (params.hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE && - params.hidden_ssid != NL80211_HIDDEN_SSID_ZERO_LEN && - params.hidden_ssid != NL80211_HIDDEN_SSID_ZERO_CONTENTS) - return -EINVAL; - } params.privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; @@ -4262,8 +4302,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) return -EINVAL; params.p2p_ctwindow = nla_get_u8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]); - if (params.p2p_ctwindow > 127) - return -EINVAL; if (params.p2p_ctwindow != 0 && !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN)) return -EINVAL; @@ -4275,8 +4313,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; tmp = nla_get_u8(info->attrs[NL80211_ATTR_P2P_OPPPS]); - if (tmp > 1) - return -EINVAL; params.p2p_opp_ps = tmp; if (params.p2p_opp_ps != 0 && !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS)) @@ -4379,7 +4415,7 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info) if (!wdev->beacon_interval) return -EINVAL; - err = nl80211_parse_beacon(info->attrs, ¶ms); + err = nl80211_parse_beacon(rdev, info->attrs, ¶ms); if (err) return err; @@ -4725,10 +4761,13 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, PUT_SINFO_U64(RX_DROP_MISC, rx_dropped_misc); PUT_SINFO_U64(BEACON_RX, rx_beacon); PUT_SINFO(BEACON_SIGNAL_AVG, rx_beacon_signal_avg, u8); - PUT_SINFO(ACK_SIGNAL, ack_signal, u8); + PUT_SINFO(RX_MPDUS, rx_mpdu_count, u32); + PUT_SINFO(FCS_ERROR_COUNT, fcs_err_count, u32); if (wiphy_ext_feature_isset(&rdev->wiphy, - NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT)) - PUT_SINFO(DATA_ACK_SIGNAL_AVG, avg_ack_signal, s8); + NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT)) { + PUT_SINFO(ACK_SIGNAL, ack_signal, u8); + PUT_SINFO(ACK_SIGNAL_AVG, avg_ack_signal, s8); + } #undef PUT_SINFO #undef PUT_SINFO_U64 @@ -4807,7 +4846,7 @@ static int nl80211_dump_station(struct sk_buff *skb, int err; rtnl_lock(); - err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); + err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); if (err) goto out_err; @@ -5212,17 +5251,11 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) else params.listen_interval = -1; - if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) { - u8 tmp; - - tmp = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]); - if (tmp >= NUM_NL80211_P2P_PS_STATUS) - return -EINVAL; - - params.support_p2p_ps = tmp; - } else { + if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) + params.support_p2p_ps = + nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]); + else params.support_p2p_ps = -1; - } if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; @@ -5252,38 +5285,23 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) if (parse_station_flags(info, dev->ieee80211_ptr->iftype, ¶ms)) return -EINVAL; - if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) { + if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) params.plink_action = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); - if (params.plink_action >= NUM_NL80211_PLINK_ACTIONS) - return -EINVAL; - } if (info->attrs[NL80211_ATTR_STA_PLINK_STATE]) { params.plink_state = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]); - if (params.plink_state >= NUM_NL80211_PLINK_STATES) - return -EINVAL; - if (info->attrs[NL80211_ATTR_MESH_PEER_AID]) { + if (info->attrs[NL80211_ATTR_MESH_PEER_AID]) params.peer_aid = nla_get_u16( info->attrs[NL80211_ATTR_MESH_PEER_AID]); - if (params.peer_aid > IEEE80211_MAX_AID) - return -EINVAL; - } params.sta_modify_mask |= STATION_PARAM_APPLY_PLINK_STATE; } - if (info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]) { - enum nl80211_mesh_power_mode pm = nla_get_u32( + if (info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]) + params.local_pm = nla_get_u32( info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]); - if (pm <= NL80211_MESH_POWER_UNKNOWN || - pm > NL80211_MESH_POWER_MAX) - return -EINVAL; - - params.local_pm = pm; - } - if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) { params.opmode_notif_used = true; params.opmode_notif = @@ -5360,13 +5378,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) { - u8 tmp; - - tmp = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]); - if (tmp >= NUM_NL80211_P2P_PS_STATUS) - return -EINVAL; - - params.support_p2p_ps = tmp; + params.support_p2p_ps = + nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]); } else { /* * if not specified, assume it's supported for P2P GO interface, @@ -5380,8 +5393,6 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]); else params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); - if (!params.aid || params.aid > IEEE80211_MAX_AID) - return -EINVAL; if (info->attrs[NL80211_ATTR_STA_CAPABILITY]) { params.capability = @@ -5421,12 +5432,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]); } - if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) { + if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) params.plink_action = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); - if (params.plink_action >= NUM_NL80211_PLINK_ACTIONS) - return -EINVAL; - } err = nl80211_parse_sta_channel_info(info, ¶ms); if (err) @@ -5658,7 +5666,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb, int err; rtnl_lock(); - err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); + err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); if (err) goto out_err; @@ -5854,7 +5862,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb, int err; rtnl_lock(); - err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); + err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); if (err) goto out_err; @@ -5936,9 +5944,7 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; params.p2p_ctwindow = - nla_get_s8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]); - if (params.p2p_ctwindow < 0) - return -EINVAL; + nla_get_u8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]); if (params.p2p_ctwindow != 0 && !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN)) return -EINVAL; @@ -5950,8 +5956,6 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; tmp = nla_get_u8(info->attrs[NL80211_ATTR_P2P_OPPPS]); - if (tmp > 1) - return -EINVAL; params.p2p_opp_ps = tmp; if (params.p2p_opp_ps && !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS)) @@ -6130,33 +6134,49 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, return -ENOBUFS; } -static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = { - [NL80211_MESHCONF_RETRY_TIMEOUT] = { .type = NLA_U16 }, - [NL80211_MESHCONF_CONFIRM_TIMEOUT] = { .type = NLA_U16 }, - [NL80211_MESHCONF_HOLDING_TIMEOUT] = { .type = NLA_U16 }, - [NL80211_MESHCONF_MAX_PEER_LINKS] = { .type = NLA_U16 }, - [NL80211_MESHCONF_MAX_RETRIES] = { .type = NLA_U8 }, - [NL80211_MESHCONF_TTL] = { .type = NLA_U8 }, - [NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 }, - [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 }, - [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = { .type = NLA_U32 }, +static const struct nla_policy +nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = { + [NL80211_MESHCONF_RETRY_TIMEOUT] = + NLA_POLICY_RANGE(NLA_U16, 1, 255), + [NL80211_MESHCONF_CONFIRM_TIMEOUT] = + NLA_POLICY_RANGE(NLA_U16, 1, 255), + [NL80211_MESHCONF_HOLDING_TIMEOUT] = + NLA_POLICY_RANGE(NLA_U16, 1, 255), + [NL80211_MESHCONF_MAX_PEER_LINKS] = + NLA_POLICY_RANGE(NLA_U16, 0, 255), + [NL80211_MESHCONF_MAX_RETRIES] = NLA_POLICY_MAX(NLA_U8, 16), + [NL80211_MESHCONF_TTL] = NLA_POLICY_MIN(NLA_U8, 1), + [NL80211_MESHCONF_ELEMENT_TTL] = NLA_POLICY_MIN(NLA_U8, 1), + [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = NLA_POLICY_MAX(NLA_U8, 1), + [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = + NLA_POLICY_RANGE(NLA_U32, 1, 255), [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 }, [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 }, - [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 }, + [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 }, - [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 }, - [NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL] = { .type = NLA_U16 }, - [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 }, - [NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 }, - [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 }, - [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 }, - [NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 }, - [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32 }, + [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = + NLA_POLICY_MIN(NLA_U16, 1), + [NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL] = + NLA_POLICY_MIN(NLA_U16, 1), + [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = + NLA_POLICY_MIN(NLA_U16, 1), + [NL80211_MESHCONF_HWMP_ROOTMODE] = NLA_POLICY_MAX(NLA_U8, 4), + [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = + NLA_POLICY_MIN(NLA_U16, 1), + [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = NLA_POLICY_MAX(NLA_U8, 1), + [NL80211_MESHCONF_FORWARDING] = NLA_POLICY_MAX(NLA_U8, 1), + [NL80211_MESHCONF_RSSI_THRESHOLD] = + NLA_POLICY_RANGE(NLA_S32, -255, 0), [NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16 }, [NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT] = { .type = NLA_U32 }, - [NL80211_MESHCONF_HWMP_ROOT_INTERVAL] = { .type = NLA_U16 }, - [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = { .type = NLA_U16 }, - [NL80211_MESHCONF_POWER_MODE] = { .type = NLA_U32 }, + [NL80211_MESHCONF_HWMP_ROOT_INTERVAL] = + NLA_POLICY_MIN(NLA_U16, 1), + [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = + NLA_POLICY_MIN(NLA_U16, 1), + [NL80211_MESHCONF_POWER_MODE] = + NLA_POLICY_RANGE(NLA_U32, + NL80211_MESH_POWER_ACTIVE, + NL80211_MESH_POWER_MAX), [NL80211_MESHCONF_AWAKE_WINDOW] = { .type = NLA_U16 }, [NL80211_MESHCONF_PLINK_TIMEOUT] = { .type = NLA_U32 }, }; @@ -6169,68 +6189,12 @@ static const struct nla_policy [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG }, [NL80211_MESH_SETUP_AUTH_PROTOCOL] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_USERSPACE_MPM] = { .type = NLA_FLAG }, - [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY, - .len = IEEE80211_MAX_DATA_LEN }, + [NL80211_MESH_SETUP_IE] = + NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr, + IEEE80211_MAX_DATA_LEN), [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG }, }; -static int nl80211_check_bool(const struct nlattr *nla, u8 min, u8 max, bool *out) -{ - u8 val = nla_get_u8(nla); - if (val < min || val > max) - return -EINVAL; - *out = val; - return 0; -} - -static int nl80211_check_u8(const struct nlattr *nla, u8 min, u8 max, u8 *out) -{ - u8 val = nla_get_u8(nla); - if (val < min || val > max) - return -EINVAL; - *out = val; - return 0; -} - -static int nl80211_check_u16(const struct nlattr *nla, u16 min, u16 max, u16 *out) -{ - u16 val = nla_get_u16(nla); - if (val < min || val > max) - return -EINVAL; - *out = val; - return 0; -} - -static int nl80211_check_u32(const struct nlattr *nla, u32 min, u32 max, u32 *out) -{ - u32 val = nla_get_u32(nla); - if (val < min || val > max) - return -EINVAL; - *out = val; - return 0; -} - -static int nl80211_check_s32(const struct nlattr *nla, s32 min, s32 max, s32 *out) -{ - s32 val = nla_get_s32(nla); - if (val < min || val > max) - return -EINVAL; - *out = val; - return 0; -} - -static int nl80211_check_power_mode(const struct nlattr *nla, - enum nl80211_mesh_power_mode min, - enum nl80211_mesh_power_mode max, - enum nl80211_mesh_power_mode *out) -{ - u32 val = nla_get_u32(nla); - if (val < min || val > max) - return -EINVAL; - *out = val; - return 0; -} - static int nl80211_parse_mesh_config(struct genl_info *info, struct mesh_config *cfg, u32 *mask_out) @@ -6239,13 +6203,12 @@ static int nl80211_parse_mesh_config(struct genl_info *info, u32 mask = 0; u16 ht_opmode; -#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \ -do { \ - if (tb[attr]) { \ - if (fn(tb[attr], min, max, &cfg->param)) \ - return -EINVAL; \ - mask |= (1 << (attr - 1)); \ - } \ +#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, mask, attr, fn) \ +do { \ + if (tb[attr]) { \ + cfg->param = fn(tb[attr]); \ + mask |= BIT((attr) - 1); \ + } \ } while (0) if (!info->attrs[NL80211_ATTR_MESH_CONFIG]) @@ -6260,75 +6223,73 @@ do { \ BUILD_BUG_ON(NL80211_MESHCONF_ATTR_MAX > 32); /* Fill in the params struct */ - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, 1, 255, - mask, NL80211_MESHCONF_RETRY_TIMEOUT, - nl80211_check_u16); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, 1, 255, - mask, NL80211_MESHCONF_CONFIRM_TIMEOUT, - nl80211_check_u16); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, 1, 255, - mask, NL80211_MESHCONF_HOLDING_TIMEOUT, - nl80211_check_u16); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, 0, 255, - mask, NL80211_MESHCONF_MAX_PEER_LINKS, - nl80211_check_u16); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, 0, 16, - mask, NL80211_MESHCONF_MAX_RETRIES, - nl80211_check_u8); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, 1, 255, - mask, NL80211_MESHCONF_TTL, nl80211_check_u8); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, 1, 255, - mask, NL80211_MESHCONF_ELEMENT_TTL, - nl80211_check_u8); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, 0, 1, - mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, - nl80211_check_bool); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, mask, + NL80211_MESHCONF_RETRY_TIMEOUT, nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, mask, + NL80211_MESHCONF_CONFIRM_TIMEOUT, + nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, mask, + NL80211_MESHCONF_HOLDING_TIMEOUT, + nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, mask, + NL80211_MESHCONF_MAX_PEER_LINKS, + nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, mask, + NL80211_MESHCONF_MAX_RETRIES, nla_get_u8); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, mask, + NL80211_MESHCONF_TTL, nla_get_u8); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, mask, + NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, mask, + NL80211_MESHCONF_AUTO_OPEN_PLINKS, + nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, - 1, 255, mask, + mask, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, - nl80211_check_u32); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, 0, 255, - mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, - nl80211_check_u8); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, 1, 65535, - mask, NL80211_MESHCONF_PATH_REFRESH_TIME, - nl80211_check_u32); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, 1, 65535, - mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, - nl80211_check_u16); + nla_get_u32); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, mask, + NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, + nla_get_u8); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, mask, + NL80211_MESHCONF_PATH_REFRESH_TIME, + nla_get_u32); + if (mask & BIT(NL80211_MESHCONF_PATH_REFRESH_TIME) && + (cfg->path_refresh_time < 1 || cfg->path_refresh_time > 65535)) + return -EINVAL; + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, mask, + NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, + nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, - 1, 65535, mask, + mask, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, - nl80211_check_u32); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, - 1, 65535, mask, + nla_get_u32); + if (mask & BIT(NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT) && + (cfg->dot11MeshHWMPactivePathTimeout < 1 || + cfg->dot11MeshHWMPactivePathTimeout > 65535)) + return -EINVAL; + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, - nl80211_check_u16); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval, - 1, 65535, mask, + nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval, mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, - nl80211_check_u16); + nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, - dot11MeshHWMPnetDiameterTraversalTime, - 1, 65535, mask, + dot11MeshHWMPnetDiameterTraversalTime, mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, - nl80211_check_u16); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, 0, 4, - mask, NL80211_MESHCONF_HWMP_ROOTMODE, - nl80211_check_u8); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, 1, 65535, - mask, NL80211_MESHCONF_HWMP_RANN_INTERVAL, - nl80211_check_u16); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, - dot11MeshGateAnnouncementProtocol, 0, 1, + nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, mask, + NL80211_MESHCONF_HWMP_ROOTMODE, nla_get_u8); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, mask, + NL80211_MESHCONF_HWMP_RANN_INTERVAL, + nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshGateAnnouncementProtocol, mask, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, - nl80211_check_bool); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 0, 1, - mask, NL80211_MESHCONF_FORWARDING, - nl80211_check_bool); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0, - mask, NL80211_MESHCONF_RSSI_THRESHOLD, - nl80211_check_s32); + nla_get_u8); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, mask, + NL80211_MESHCONF_FORWARDING, nla_get_u8); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, mask, + NL80211_MESHCONF_RSSI_THRESHOLD, + nla_get_s32); /* * Check HT operation mode based on * IEEE 802.11-2016 9.4.2.57 HT Operation element. @@ -6347,29 +6308,27 @@ do { \ cfg->ht_opmode = ht_opmode; mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1)); } - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, - 1, 65535, mask, - NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, - nl80211_check_u32); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, 1, 65535, - mask, NL80211_MESHCONF_HWMP_ROOT_INTERVAL, - nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, - dot11MeshHWMPconfirmationInterval, - 1, 65535, mask, + dot11MeshHWMPactivePathToRootTimeout, mask, + NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, + nla_get_u32); + if (mask & BIT(NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT) && + (cfg->dot11MeshHWMPactivePathToRootTimeout < 1 || + cfg->dot11MeshHWMPactivePathToRootTimeout > 65535)) + return -EINVAL; + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, mask, + NL80211_MESHCONF_HWMP_ROOT_INTERVAL, + nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPconfirmationInterval, + mask, NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, - nl80211_check_u16); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, power_mode, - NL80211_MESH_POWER_ACTIVE, - NL80211_MESH_POWER_MAX, - mask, NL80211_MESHCONF_POWER_MODE, - nl80211_check_power_mode); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, - 0, 65535, mask, - NL80211_MESHCONF_AWAKE_WINDOW, nl80211_check_u16); - FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 0, 0xffffffff, - mask, NL80211_MESHCONF_PLINK_TIMEOUT, - nl80211_check_u32); + nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, power_mode, mask, + NL80211_MESHCONF_POWER_MODE, nla_get_u32); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, mask, + NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, mask, + NL80211_MESHCONF_PLINK_TIMEOUT, nla_get_u32); if (mask_out) *mask_out = mask; @@ -6412,8 +6371,6 @@ static int nl80211_parse_mesh_setup(struct genl_info *info, if (tb[NL80211_MESH_SETUP_IE]) { struct nlattr *ieattr = tb[NL80211_MESH_SETUP_IE]; - if (!is_valid_ie_attr(ieattr)) - return -EINVAL; setup->ie = nla_data(ieattr); setup->ie_len = nla_len(ieattr); } @@ -7046,9 +7003,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) int err, tmp, n_ssids = 0, n_channels, i; size_t ie_len; - if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) - return -EINVAL; - wiphy = &rdev->wiphy; if (wdev->iftype == NL80211_IFTYPE_NAN) @@ -7402,9 +7356,6 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1]; s32 default_match_rssi = NL80211_SCAN_RSSI_THOLD_OFF; - if (!is_valid_ie_attr(attrs[NL80211_ATTR_IE])) - return ERR_PTR(-EINVAL); - if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { n_channels = validate_scan_freqs( attrs[NL80211_ATTR_SCAN_FREQUENCIES]); @@ -7764,7 +7715,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, */ if (want_multi && rdev->wiphy.max_sched_scan_reqs > 1) { while (!sched_scan_req->reqid) - sched_scan_req->reqid = rdev->wiphy.cookie_counter++; + sched_scan_req->reqid = cfg80211_assign_cookie(rdev); } err = rdev_sched_scan_start(rdev, dev, sched_scan_req); @@ -7940,7 +7891,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) if (!need_new_beacon) goto skip_beacons; - err = nl80211_parse_beacon(info->attrs, ¶ms.beacon_after); + err = nl80211_parse_beacon(rdev, info->attrs, ¶ms.beacon_after); if (err) return err; @@ -7950,7 +7901,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) if (err) return err; - err = nl80211_parse_beacon(csa_attrs, ¶ms.beacon_csa); + err = nl80211_parse_beacon(rdev, csa_attrs, ¶ms.beacon_csa); if (err) return err; @@ -8187,7 +8138,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb) int err; rtnl_lock(); - err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); + err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); if (err) { rtnl_unlock(); return err; @@ -8308,7 +8259,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb) bool radio_stats; rtnl_lock(); - res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); + res = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); if (res) goto out_err; @@ -8372,9 +8323,6 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) struct key_parse key; bool local_state_change; - if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) - return -EINVAL; - if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; @@ -8613,9 +8561,6 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid) return -EPERM; - if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) - return -EINVAL; - if (!info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_SSID] || !info->attrs[NL80211_ATTR_WIPHY_FREQ]) @@ -8739,9 +8684,6 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid) return -EPERM; - if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) - return -EINVAL; - if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; @@ -8790,9 +8732,6 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid) return -EPERM; - if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) - return -EINVAL; - if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; @@ -8867,9 +8806,6 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) memset(&ibss, 0, sizeof(ibss)); - if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) - return -EINVAL; - if (!info->attrs[NL80211_ATTR_SSID] || !nla_len(info->attrs[NL80211_ATTR_SSID])) return -EINVAL; @@ -9307,9 +9243,6 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) memset(&connect, 0, sizeof(connect)); - if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) - return -EINVAL; - if (!info->attrs[NL80211_ATTR_SSID] || !nla_len(info->attrs[NL80211_ATTR_SSID])) return -EINVAL; @@ -9368,11 +9301,6 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_MFP_OPTIONAL)) return -EOPNOTSUPP; - - if (connect.mfp != NL80211_MFP_REQUIRED && - connect.mfp != NL80211_MFP_NO && - connect.mfp != NL80211_MFP_OPTIONAL) - return -EINVAL; } else { connect.mfp = NL80211_MFP_NO; } @@ -9545,8 +9473,6 @@ static int nl80211_update_connect_params(struct sk_buff *skb, return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_IE]) { - if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) - return -EINVAL; connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]); connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); changed |= UPDATE_ASSOC_IES; @@ -10131,9 +10057,6 @@ static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info) ps_state = nla_get_u32(info->attrs[NL80211_ATTR_PS_STATE]); - if (ps_state != NL80211_PS_DISABLED && ps_state != NL80211_PS_ENABLED) - return -EINVAL; - wdev = dev->ieee80211_ptr; if (!rdev->ops->set_power_mgmt) @@ -10696,8 +10619,7 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg, if (!scan_plan) return -ENOBUFS; - if (!scan_plan || - nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_INTERVAL, + if (nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_INTERVAL, req->scan_plans[i].interval) || (req->scan_plans[i].iterations && nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_ITERATIONS, @@ -11295,9 +11217,6 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev, if (tb[NL80211_ATTR_COALESCE_RULE_CONDITION]) new_rule->condition = nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_CONDITION]); - if (new_rule->condition != NL80211_COALESCE_CONDITION_MATCH && - new_rule->condition != NL80211_COALESCE_CONDITION_NO_MATCH) - return -EINVAL; if (!tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN]) return -EINVAL; @@ -11650,8 +11569,6 @@ static int nl80211_start_nan(struct sk_buff *skb, struct genl_info *info) conf.master_pref = nla_get_u8(info->attrs[NL80211_ATTR_NAN_MASTER_PREF]); - if (!conf.master_pref) - return -EINVAL; if (info->attrs[NL80211_ATTR_BANDS]) { u32 bands = nla_get_u32(info->attrs[NL80211_ATTR_BANDS]); @@ -11769,7 +11686,7 @@ static int nl80211_nan_add_func(struct sk_buff *skb, if (!func) return -ENOMEM; - func->cookie = wdev->wiphy->cookie_counter++; + func->cookie = cfg80211_assign_cookie(rdev); if (!tb[NL80211_NAN_FUNC_TYPE] || nla_get_u8(tb[NL80211_NAN_FUNC_TYPE]) > NL80211_NAN_FUNC_MAX_TYPE) { @@ -12215,8 +12132,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MDID] || - !info->attrs[NL80211_ATTR_IE] || - !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) + !info->attrs[NL80211_ATTR_IE]) return -EINVAL; memset(&ft_params, 0, sizeof(ft_params)); @@ -12636,12 +12552,7 @@ static int nl80211_add_tx_ts(struct sk_buff *skb, struct genl_info *info) return -EINVAL; tsid = nla_get_u8(info->attrs[NL80211_ATTR_TSID]); - if (tsid >= IEEE80211_NUM_TIDS) - return -EINVAL; - up = nla_get_u8(info->attrs[NL80211_ATTR_USER_PRIO]); - if (up >= IEEE80211_NUM_UPS) - return -EINVAL; /* WMM uses TIDs 0-7 even for TSPEC */ if (tsid >= IEEE80211_FIRST_TSPEC_TSID) { @@ -12999,6 +12910,76 @@ static int nl80211_tx_control_port(struct sk_buff *skb, struct genl_info *info) return err; } +static int nl80211_get_ftm_responder_stats(struct sk_buff *skb, + struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_ftm_responder_stats ftm_stats = {}; + struct sk_buff *msg; + void *hdr; + struct nlattr *ftm_stats_attr; + int err; + + if (wdev->iftype != NL80211_IFTYPE_AP || !wdev->beacon_interval) + return -EOPNOTSUPP; + + err = rdev_get_ftm_responder_stats(rdev, dev, &ftm_stats); + if (err) + return err; + + if (!ftm_stats.filled) + return -ENODATA; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, + NL80211_CMD_GET_FTM_RESPONDER_STATS); + if (!hdr) + return -ENOBUFS; + + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) + goto nla_put_failure; + + ftm_stats_attr = nla_nest_start(msg, NL80211_ATTR_FTM_RESPONDER_STATS); + if (!ftm_stats_attr) + goto nla_put_failure; + +#define SET_FTM(field, name, type) \ + do { if ((ftm_stats.filled & BIT(NL80211_FTM_STATS_ ## name)) && \ + nla_put_ ## type(msg, NL80211_FTM_STATS_ ## name, \ + ftm_stats.field)) \ + goto nla_put_failure; } while (0) +#define SET_FTM_U64(field, name) \ + do { if ((ftm_stats.filled & BIT(NL80211_FTM_STATS_ ## name)) && \ + nla_put_u64_64bit(msg, NL80211_FTM_STATS_ ## name, \ + ftm_stats.field, NL80211_FTM_STATS_PAD)) \ + goto nla_put_failure; } while (0) + + SET_FTM(success_num, SUCCESS_NUM, u32); + SET_FTM(partial_num, PARTIAL_NUM, u32); + SET_FTM(failed_num, FAILED_NUM, u32); + SET_FTM(asap_num, ASAP_NUM, u32); + SET_FTM(non_asap_num, NON_ASAP_NUM, u32); + SET_FTM_U64(total_duration_ms, TOTAL_DURATION_MSEC); + SET_FTM(unknown_triggers_num, UNKNOWN_TRIGGERS_NUM, u32); + SET_FTM(reschedule_requests_num, RESCHEDULE_REQUESTS_NUM, u32); + SET_FTM(out_of_window_triggers_num, OUT_OF_WINDOW_TRIGGERS_NUM, u32); +#undef SET_FTM + + nla_nest_end(msg, ftm_stats_attr); + + genlmsg_end(msg, hdr); + return genlmsg_reply(msg, info); + +nla_put_failure: + nlmsg_free(msg); + return -ENOBUFS; +} + #define NL80211_FLAG_NEED_WIPHY 0x01 #define NL80211_FLAG_NEED_NETDEV 0x02 #define NL80211_FLAG_NEED_RTNL 0x04 @@ -13910,6 +13891,13 @@ static const struct genl_ops nl80211_ops[] = { .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, + { + .cmd = NL80211_CMD_GET_FTM_RESPONDER_STATS, + .doit = nl80211_get_ftm_responder_stats, + .policy = nl80211_policy, + .internal_flags = NL80211_FLAG_NEED_NETDEV | + NL80211_FLAG_NEED_RTNL, + }, }; static struct genl_family nl80211_fam __ro_after_init = { diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 364f5d67f05b..51380b5c32f2 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -1232,4 +1232,19 @@ rdev_external_auth(struct cfg80211_registered_device *rdev, return ret; } +static inline int +rdev_get_ftm_responder_stats(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct cfg80211_ftm_responder_stats *ftm_stats) +{ + int ret = -EOPNOTSUPP; + + trace_rdev_get_ftm_responder_stats(&rdev->wiphy, dev, ftm_stats); + if (rdev->ops->get_ftm_responder_stats) + ret = rdev->ops->get_ftm_responder_stats(&rdev->wiphy, dev, + ftm_stats); + trace_rdev_return_int(&rdev->wiphy, ret); + return ret; +} + #endif /* __CFG80211_RDEV_OPS */ diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 24cfa2776f50..ecfb1a06dbb2 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -847,22 +847,36 @@ static bool valid_regdb(const u8 *data, unsigned int size) return true; } -static void set_wmm_rule(struct ieee80211_reg_rule *rrule, - struct fwdb_wmm_rule *wmm) -{ - struct ieee80211_wmm_rule *rule = &rrule->wmm_rule; - unsigned int i; +static void set_wmm_rule(const struct fwdb_header *db, + const struct fwdb_country *country, + const struct fwdb_rule *rule, + struct ieee80211_reg_rule *rrule) +{ + struct ieee80211_wmm_rule *wmm_rule = &rrule->wmm_rule; + struct fwdb_wmm_rule *wmm; + unsigned int i, wmm_ptr; + + wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; + wmm = (void *)((u8 *)db + wmm_ptr); + + if (!valid_wmm(wmm)) { + pr_err("Invalid regulatory WMM rule %u-%u in domain %c%c\n", + be32_to_cpu(rule->start), be32_to_cpu(rule->end), + country->alpha2[0], country->alpha2[1]); + return; + } for (i = 0; i < IEEE80211_NUM_ACS; i++) { - rule->client[i].cw_min = + wmm_rule->client[i].cw_min = ecw2cw((wmm->client[i].ecw & 0xf0) >> 4); - rule->client[i].cw_max = ecw2cw(wmm->client[i].ecw & 0x0f); - rule->client[i].aifsn = wmm->client[i].aifsn; - rule->client[i].cot = 1000 * be16_to_cpu(wmm->client[i].cot); - rule->ap[i].cw_min = ecw2cw((wmm->ap[i].ecw & 0xf0) >> 4); - rule->ap[i].cw_max = ecw2cw(wmm->ap[i].ecw & 0x0f); - rule->ap[i].aifsn = wmm->ap[i].aifsn; - rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); + wmm_rule->client[i].cw_max = ecw2cw(wmm->client[i].ecw & 0x0f); + wmm_rule->client[i].aifsn = wmm->client[i].aifsn; + wmm_rule->client[i].cot = + 1000 * be16_to_cpu(wmm->client[i].cot); + wmm_rule->ap[i].cw_min = ecw2cw((wmm->ap[i].ecw & 0xf0) >> 4); + wmm_rule->ap[i].cw_max = ecw2cw(wmm->ap[i].ecw & 0x0f); + wmm_rule->ap[i].aifsn = wmm->ap[i].aifsn; + wmm_rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); } rrule->has_wmm = true; @@ -870,7 +884,7 @@ static void set_wmm_rule(struct ieee80211_reg_rule *rrule, static int __regdb_query_wmm(const struct fwdb_header *db, const struct fwdb_country *country, int freq, - struct ieee80211_reg_rule *rule) + struct ieee80211_reg_rule *rrule) { unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; struct fwdb_collection *coll = (void *)((u8 *)db + ptr); @@ -879,18 +893,14 @@ static int __regdb_query_wmm(const struct fwdb_header *db, for (i = 0; i < coll->n_rules; i++) { __be16 *rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2)); unsigned int rule_ptr = be16_to_cpu(rules_ptr[i]) << 2; - struct fwdb_rule *rrule = (void *)((u8 *)db + rule_ptr); - struct fwdb_wmm_rule *wmm; - unsigned int wmm_ptr; + struct fwdb_rule *rule = (void *)((u8 *)db + rule_ptr); - if (rrule->len < offsetofend(struct fwdb_rule, wmm_ptr)) + if (rule->len < offsetofend(struct fwdb_rule, wmm_ptr)) continue; - if (freq >= KHZ_TO_MHZ(be32_to_cpu(rrule->start)) && - freq <= KHZ_TO_MHZ(be32_to_cpu(rrule->end))) { - wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2; - wmm = (void *)((u8 *)db + wmm_ptr); - set_wmm_rule(rule, wmm); + if (freq >= KHZ_TO_MHZ(be32_to_cpu(rule->start)) && + freq <= KHZ_TO_MHZ(be32_to_cpu(rule->end))) { + set_wmm_rule(db, country, rule, rrule); return 0; } } @@ -972,12 +982,8 @@ static int regdb_query_country(const struct fwdb_header *db, if (rule->len >= offsetofend(struct fwdb_rule, cac_timeout)) rrule->dfs_cac_ms = 1000 * be16_to_cpu(rule->cac_timeout); - if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { - u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; - struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr); - - set_wmm_rule(rrule, wmm); - } + if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) + set_wmm_rule(db, country, rule, rrule); } return reg_schedule_apply(regdom); @@ -3186,13 +3192,59 @@ static void restore_regulatory_settings(bool reset_user) schedule_work(®_work); } +static bool is_wiphy_all_set_reg_flag(enum ieee80211_regulatory_flags flag) +{ + struct cfg80211_registered_device *rdev; + struct wireless_dev *wdev; + + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { + wdev_lock(wdev); + if (!(wdev->wiphy->regulatory_flags & flag)) { + wdev_unlock(wdev); + return false; + } + wdev_unlock(wdev); + } + } + + return true; +} + void regulatory_hint_disconnect(void) { + /* Restore of regulatory settings is not required when wiphy(s) + * ignore IE from connected access point but clearance of beacon hints + * is required when wiphy(s) supports beacon hints. + */ + if (is_wiphy_all_set_reg_flag(REGULATORY_COUNTRY_IE_IGNORE)) { + struct reg_beacon *reg_beacon, *btmp; + + if (is_wiphy_all_set_reg_flag(REGULATORY_DISABLE_BEACON_HINTS)) + return; + + spin_lock_bh(®_pending_beacons_lock); + list_for_each_entry_safe(reg_beacon, btmp, + ®_pending_beacons, list) { + list_del(®_beacon->list); + kfree(reg_beacon); + } + spin_unlock_bh(®_pending_beacons_lock); + + list_for_each_entry_safe(reg_beacon, btmp, + ®_beacon_list, list) { + list_del(®_beacon->list); + kfree(reg_beacon); + } + + return; + } + pr_debug("All devices are disconnected, going to restore regulatory settings\n"); restore_regulatory_settings(false); } -static bool freq_is_chan_12_13_14(u16 freq) +static bool freq_is_chan_12_13_14(u32 freq) { if (freq == ieee80211_channel_to_frequency(12, NL80211_BAND_2GHZ) || freq == ieee80211_channel_to_frequency(13, NL80211_BAND_2GHZ) || @@ -3779,6 +3831,15 @@ static int __init regulatory_init_db(void) { int err; + /* + * It's possible that - due to other bugs/issues - cfg80211 + * never called regulatory_init() below, or that it failed; + * in that case, don't try to do any further work here as + * it's doomed to lead to crashes. + */ + if (IS_ERR_OR_NULL(reg_pdev)) + return -EINVAL; + err = load_builtin_regdb_keys(); if (err) return err; diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 7c73510b161f..c6a9446b4e6b 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -112,7 +112,7 @@ } while (0) #define CHAN_ENTRY __field(enum nl80211_band, band) \ - __field(u16, center_freq) + __field(u32, center_freq) #define CHAN_ASSIGN(chan) \ do { \ if (chan) { \ @@ -2368,6 +2368,140 @@ TRACE_EVENT(rdev_external_auth, __entry->bssid, __entry->ssid, __entry->status) ); +TRACE_EVENT(rdev_start_radar_detection, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_chan_def *chandef, + u32 cac_time_ms), + TP_ARGS(wiphy, netdev, chandef, cac_time_ms), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + CHAN_DEF_ENTRY + __field(u32, cac_time_ms) + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + CHAN_DEF_ASSIGN(chandef); + __entry->cac_time_ms = cac_time_ms; + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT + ", cac_time_ms=%u", + WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG, + __entry->cac_time_ms) +); + +TRACE_EVENT(rdev_set_mcast_rate, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + int *mcast_rate), + TP_ARGS(wiphy, netdev, mcast_rate), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + __array(int, mcast_rate, NUM_NL80211_BANDS) + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + memcpy(__entry->mcast_rate, mcast_rate, + sizeof(int) * NUM_NL80211_BANDS); + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " + "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 60GHz=0x%x]", + WIPHY_PR_ARG, NETDEV_PR_ARG, + __entry->mcast_rate[NL80211_BAND_2GHZ], + __entry->mcast_rate[NL80211_BAND_5GHZ], + __entry->mcast_rate[NL80211_BAND_60GHZ]) +); + +TRACE_EVENT(rdev_set_coalesce, + TP_PROTO(struct wiphy *wiphy, struct cfg80211_coalesce *coalesce), + TP_ARGS(wiphy, coalesce), + TP_STRUCT__entry( + WIPHY_ENTRY + __field(int, n_rules) + ), + TP_fast_assign( + WIPHY_ASSIGN; + __entry->n_rules = coalesce ? coalesce->n_rules : 0; + ), + TP_printk(WIPHY_PR_FMT ", n_rules=%d", + WIPHY_PR_ARG, __entry->n_rules) +); + +DEFINE_EVENT(wiphy_wdev_evt, rdev_abort_scan, + TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), + TP_ARGS(wiphy, wdev) +); + +TRACE_EVENT(rdev_set_multicast_to_unicast, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + const bool enabled), + TP_ARGS(wiphy, netdev, enabled), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + __field(bool, enabled) + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + __entry->enabled = enabled; + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", unicast: %s", + WIPHY_PR_ARG, NETDEV_PR_ARG, + BOOL_TO_STR(__entry->enabled)) +); + +DEFINE_EVENT(wiphy_wdev_evt, rdev_get_txq_stats, + TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), + TP_ARGS(wiphy, wdev) +); + +TRACE_EVENT(rdev_get_ftm_responder_stats, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_ftm_responder_stats *ftm_stats), + + TP_ARGS(wiphy, netdev, ftm_stats), + + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + __field(u64, timestamp) + __field(u32, success_num) + __field(u32, partial_num) + __field(u32, failed_num) + __field(u32, asap_num) + __field(u32, non_asap_num) + __field(u64, duration) + __field(u32, unknown_triggers) + __field(u32, reschedule) + __field(u32, out_of_window) + ), + + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + __entry->success_num = ftm_stats->success_num; + __entry->partial_num = ftm_stats->partial_num; + __entry->failed_num = ftm_stats->failed_num; + __entry->asap_num = ftm_stats->asap_num; + __entry->non_asap_num = ftm_stats->non_asap_num; + __entry->duration = ftm_stats->total_duration_ms; + __entry->unknown_triggers = ftm_stats->unknown_triggers_num; + __entry->reschedule = ftm_stats->reschedule_requests_num; + __entry->out_of_window = ftm_stats->out_of_window_triggers_num; + ), + + TP_printk(WIPHY_PR_FMT "Ftm responder stats: success %u, partial %u, " + "failed %u, asap %u, non asap %u, total duration %llu, unknown " + "triggers %u, rescheduled %u, out of window %u", WIPHY_PR_ARG, + __entry->success_num, __entry->partial_num, __entry->failed_num, + __entry->asap_num, __entry->non_asap_num, __entry->duration, + __entry->unknown_triggers, __entry->reschedule, + __entry->out_of_window) +); + /************************************************************* * cfg80211 exported functions traces * *************************************************************/ @@ -3160,105 +3294,6 @@ TRACE_EVENT(cfg80211_stop_iface, TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) ); - -TRACE_EVENT(rdev_start_radar_detection, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, - struct cfg80211_chan_def *chandef, - u32 cac_time_ms), - TP_ARGS(wiphy, netdev, chandef, cac_time_ms), - TP_STRUCT__entry( - WIPHY_ENTRY - NETDEV_ENTRY - CHAN_DEF_ENTRY - __field(u32, cac_time_ms) - ), - TP_fast_assign( - WIPHY_ASSIGN; - NETDEV_ASSIGN; - CHAN_DEF_ASSIGN(chandef); - __entry->cac_time_ms = cac_time_ms; - ), - TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT - ", cac_time_ms=%u", - WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG, - __entry->cac_time_ms) -); - -TRACE_EVENT(rdev_set_mcast_rate, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, - int *mcast_rate), - TP_ARGS(wiphy, netdev, mcast_rate), - TP_STRUCT__entry( - WIPHY_ENTRY - NETDEV_ENTRY - __array(int, mcast_rate, NUM_NL80211_BANDS) - ), - TP_fast_assign( - WIPHY_ASSIGN; - NETDEV_ASSIGN; - memcpy(__entry->mcast_rate, mcast_rate, - sizeof(int) * NUM_NL80211_BANDS); - ), - TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " - "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 60GHz=0x%x]", - WIPHY_PR_ARG, NETDEV_PR_ARG, - __entry->mcast_rate[NL80211_BAND_2GHZ], - __entry->mcast_rate[NL80211_BAND_5GHZ], - __entry->mcast_rate[NL80211_BAND_60GHZ]) -); - -TRACE_EVENT(rdev_set_coalesce, - TP_PROTO(struct wiphy *wiphy, struct cfg80211_coalesce *coalesce), - TP_ARGS(wiphy, coalesce), - TP_STRUCT__entry( - WIPHY_ENTRY - __field(int, n_rules) - ), - TP_fast_assign( - WIPHY_ASSIGN; - __entry->n_rules = coalesce ? coalesce->n_rules : 0; - ), - TP_printk(WIPHY_PR_FMT ", n_rules=%d", - WIPHY_PR_ARG, __entry->n_rules) -); - -DEFINE_EVENT(wiphy_wdev_evt, rdev_abort_scan, - TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), - TP_ARGS(wiphy, wdev) -); - -TRACE_EVENT(rdev_set_multicast_to_unicast, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, - const bool enabled), - TP_ARGS(wiphy, netdev, enabled), - TP_STRUCT__entry( - WIPHY_ENTRY - NETDEV_ENTRY - __field(bool, enabled) - ), - TP_fast_assign( - WIPHY_ASSIGN; - NETDEV_ASSIGN; - __entry->enabled = enabled; - ), - TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", unicast: %s", - WIPHY_PR_ARG, NETDEV_PR_ARG, - BOOL_TO_STR(__entry->enabled)) -); - -TRACE_EVENT(rdev_get_txq_stats, - TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), - TP_ARGS(wiphy, wdev), - TP_STRUCT__entry( - WIPHY_ENTRY - WDEV_ENTRY - ), - TP_fast_assign( - WIPHY_ASSIGN; - WDEV_ASSIGN; - ), - TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) -); #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH diff --git a/net/wireless/util.c b/net/wireless/util.c index 959ed3acd240..ef14d80ca03e 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -5,17 +5,20 @@ * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation */ #include <linux/export.h> #include <linux/bitops.h> #include <linux/etherdevice.h> #include <linux/slab.h> +#include <linux/ieee80211.h> #include <net/cfg80211.h> #include <net/ip.h> #include <net/dsfield.h> #include <linux/if_vlan.h> #include <linux/mpls.h> #include <linux/gcd.h> +#include <linux/bitfield.h> #include "core.h" #include "rdev-ops.h" @@ -88,7 +91,7 @@ int ieee80211_channel_to_frequency(int chan, enum nl80211_band band) return 5000 + chan * 5; break; case NL80211_BAND_60GHZ: - if (chan < 5) + if (chan < 7) return 56160 + chan * 2160; break; default: @@ -109,7 +112,7 @@ int ieee80211_frequency_to_channel(int freq) return (freq - 4000) / 5; else if (freq <= 45000) /* DMG band lower limit */ return (freq - 5000) / 5; - else if (freq >= 58320 && freq <= 64800) + else if (freq >= 58320 && freq <= 70200) return (freq - 56160) / 2160; else return 0; @@ -1568,7 +1571,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, } /* 56.16 GHz, channel 1..4 */ - if (freq >= 56160 + 2160 * 1 && freq <= 56160 + 2160 * 4) { + if (freq >= 56160 + 2160 * 1 && freq <= 56160 + 2160 * 6) { if (chandef->width >= NL80211_CHAN_WIDTH_40) return false; @@ -1893,3 +1896,154 @@ EXPORT_SYMBOL(rfc1042_header); const unsigned char bridge_tunnel_header[] __aligned(2) = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; EXPORT_SYMBOL(bridge_tunnel_header); + +/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */ +struct iapp_layer2_update { + u8 da[ETH_ALEN]; /* broadcast */ + u8 sa[ETH_ALEN]; /* STA addr */ + __be16 len; /* 6 */ + u8 dsap; /* 0 */ + u8 ssap; /* 0 */ + u8 control; + u8 xid_info[3]; +} __packed; + +void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr) +{ + struct iapp_layer2_update *msg; + struct sk_buff *skb; + + /* Send Level 2 Update Frame to update forwarding tables in layer 2 + * bridge devices */ + + skb = dev_alloc_skb(sizeof(*msg)); + if (!skb) + return; + msg = skb_put(skb, sizeof(*msg)); + + /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID) + * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */ + + eth_broadcast_addr(msg->da); + ether_addr_copy(msg->sa, addr); + msg->len = htons(6); + msg->dsap = 0; + msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */ + msg->control = 0xaf; /* XID response lsb.1111F101. + * F=0 (no poll command; unsolicited frame) */ + msg->xid_info[0] = 0x81; /* XID format identifier */ + msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */ + msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */ + + skb->dev = dev; + skb->protocol = eth_type_trans(skb, dev); + memset(skb->cb, 0, sizeof(skb->cb)); + netif_rx_ni(skb); +} +EXPORT_SYMBOL(cfg80211_send_layer2_update); + +int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, + enum ieee80211_vht_chanwidth bw, + int mcs, bool ext_nss_bw_capable) +{ + u16 map = le16_to_cpu(cap->supp_mcs.rx_mcs_map); + int max_vht_nss = 0; + int ext_nss_bw; + int supp_width; + int i, mcs_encoding; + + if (map == 0xffff) + return 0; + + if (WARN_ON(mcs > 9)) + return 0; + if (mcs <= 7) + mcs_encoding = 0; + else if (mcs == 8) + mcs_encoding = 1; + else + mcs_encoding = 2; + + /* find max_vht_nss for the given MCS */ + for (i = 7; i >= 0; i--) { + int supp = (map >> (2 * i)) & 3; + + if (supp == 3) + continue; + + if (supp >= mcs_encoding) { + max_vht_nss = i; + break; + } + } + + if (!(cap->supp_mcs.tx_mcs_map & + cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE))) + return max_vht_nss; + + ext_nss_bw = le32_get_bits(cap->vht_cap_info, + IEEE80211_VHT_CAP_EXT_NSS_BW_MASK); + supp_width = le32_get_bits(cap->vht_cap_info, + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK); + + /* if not capable, treat ext_nss_bw as 0 */ + if (!ext_nss_bw_capable) + ext_nss_bw = 0; + + /* This is invalid */ + if (supp_width == 3) + return 0; + + /* This is an invalid combination so pretend nothing is supported */ + if (supp_width == 2 && (ext_nss_bw == 1 || ext_nss_bw == 2)) + return 0; + + /* + * Cover all the special cases according to IEEE 802.11-2016 + * Table 9-250. All other cases are either factor of 1 or not + * valid/supported. + */ + switch (bw) { + case IEEE80211_VHT_CHANWIDTH_USE_HT: + case IEEE80211_VHT_CHANWIDTH_80MHZ: + if ((supp_width == 1 || supp_width == 2) && + ext_nss_bw == 3) + return 2 * max_vht_nss; + break; + case IEEE80211_VHT_CHANWIDTH_160MHZ: + if (supp_width == 0 && + (ext_nss_bw == 1 || ext_nss_bw == 2)) + return DIV_ROUND_UP(max_vht_nss, 2); + if (supp_width == 0 && + ext_nss_bw == 3) + return DIV_ROUND_UP(3 * max_vht_nss, 4); + if (supp_width == 1 && + ext_nss_bw == 3) + return 2 * max_vht_nss; + break; + case IEEE80211_VHT_CHANWIDTH_80P80MHZ: + if (supp_width == 0 && + (ext_nss_bw == 1 || ext_nss_bw == 2)) + return 0; /* not possible */ + if (supp_width == 0 && + ext_nss_bw == 2) + return DIV_ROUND_UP(max_vht_nss, 2); + if (supp_width == 0 && + ext_nss_bw == 3) + return DIV_ROUND_UP(3 * max_vht_nss, 4); + if (supp_width == 1 && + ext_nss_bw == 0) + return 0; /* not possible */ + if (supp_width == 1 && + ext_nss_bw == 1) + return DIV_ROUND_UP(max_vht_nss, 2); + if (supp_width == 1 && + ext_nss_bw == 2) + return DIV_ROUND_UP(3 * max_vht_nss, 4); + break; + } + + /* not covered or invalid combination received */ + return max_vht_nss; +} +EXPORT_SYMBOL(ieee80211_get_vht_max_nss); diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index bfe2dbea480b..a264cf2accd0 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -32,37 +32,49 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) { unsigned long flags; - if (xs->dev) { - spin_lock_irqsave(&umem->xsk_list_lock, flags); - list_del_rcu(&xs->list); - spin_unlock_irqrestore(&umem->xsk_list_lock, flags); - - if (umem->zc) - synchronize_net(); - } + spin_lock_irqsave(&umem->xsk_list_lock, flags); + list_del_rcu(&xs->list); + spin_unlock_irqrestore(&umem->xsk_list_lock, flags); } -int xdp_umem_query(struct net_device *dev, u16 queue_id) +/* The umem is stored both in the _rx struct and the _tx struct as we do + * not know if the device has more tx queues than rx, or the opposite. + * This might also change during run time. + */ +static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, + u16 queue_id) { - struct netdev_bpf bpf; + if (queue_id < dev->real_num_rx_queues) + dev->_rx[queue_id].umem = umem; + if (queue_id < dev->real_num_tx_queues) + dev->_tx[queue_id].umem = umem; +} - ASSERT_RTNL(); +struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, + u16 queue_id) +{ + if (queue_id < dev->real_num_rx_queues) + return dev->_rx[queue_id].umem; + if (queue_id < dev->real_num_tx_queues) + return dev->_tx[queue_id].umem; - memset(&bpf, 0, sizeof(bpf)); - bpf.command = XDP_QUERY_XSK_UMEM; - bpf.xsk.queue_id = queue_id; + return NULL; +} - if (!dev->netdev_ops->ndo_bpf) - return 0; - return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem; +static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id) +{ + if (queue_id < dev->real_num_rx_queues) + dev->_rx[queue_id].umem = NULL; + if (queue_id < dev->real_num_tx_queues) + dev->_tx[queue_id].umem = NULL; } int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, - u32 queue_id, u16 flags) + u16 queue_id, u16 flags) { bool force_zc, force_copy; struct netdev_bpf bpf; - int err; + int err = 0; force_zc = flags & XDP_ZEROCOPY; force_copy = flags & XDP_COPY; @@ -70,19 +82,23 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, if (force_zc && force_copy) return -EINVAL; - if (force_copy) - return 0; - - if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit) - return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */ + rtnl_lock(); + if (xdp_get_umem_from_qid(dev, queue_id)) { + err = -EBUSY; + goto out_rtnl_unlock; + } - bpf.command = XDP_QUERY_XSK_UMEM; + xdp_reg_umem_at_qid(dev, umem, queue_id); + umem->dev = dev; + umem->queue_id = queue_id; + if (force_copy) + /* For copy-mode, we are done. */ + goto out_rtnl_unlock; - rtnl_lock(); - err = xdp_umem_query(dev, queue_id); - if (err) { - err = err < 0 ? -EOPNOTSUPP : -EBUSY; - goto err_rtnl_unlock; + if (!dev->netdev_ops->ndo_bpf || + !dev->netdev_ops->ndo_xsk_async_xmit) { + err = -EOPNOTSUPP; + goto err_unreg_umem; } bpf.command = XDP_SETUP_XSK_UMEM; @@ -91,18 +107,20 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, err = dev->netdev_ops->ndo_bpf(dev, &bpf); if (err) - goto err_rtnl_unlock; + goto err_unreg_umem; rtnl_unlock(); dev_hold(dev); - umem->dev = dev; - umem->queue_id = queue_id; umem->zc = true; return 0; -err_rtnl_unlock: +err_unreg_umem: + xdp_clear_umem_at_qid(dev, queue_id); + if (!force_zc) + err = 0; /* fallback to copy mode */ +out_rtnl_unlock: rtnl_unlock(); - return force_zc ? err : 0; /* fail or fallback */ + return err; } static void xdp_umem_clear_dev(struct xdp_umem *umem) @@ -110,7 +128,7 @@ static void xdp_umem_clear_dev(struct xdp_umem *umem) struct netdev_bpf bpf; int err; - if (umem->dev) { + if (umem->zc) { bpf.command = XDP_SETUP_XSK_UMEM; bpf.xsk.umem = NULL; bpf.xsk.queue_id = umem->queue_id; @@ -121,9 +139,17 @@ static void xdp_umem_clear_dev(struct xdp_umem *umem) if (err) WARN(1, "failed to disable umem!\n"); + } + + if (umem->dev) { + rtnl_lock(); + xdp_clear_umem_at_qid(umem->dev, umem->queue_id); + rtnl_unlock(); + } + if (umem->zc) { dev_put(umem->dev); - umem->dev = NULL; + umem->zc = false; } } @@ -167,6 +193,8 @@ static void xdp_umem_release(struct xdp_umem *umem) umem->cq = NULL; } + xsk_reuseq_destroy(umem); + xdp_umem_unpin_pages(umem); task = get_pid_task(umem->pid, PIDTYPE_PID); @@ -314,8 +342,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) umem->pid = get_task_pid(current, PIDTYPE_PID); umem->address = (unsigned long)addr; - umem->props.chunk_mask = ~((u64)chunk_size - 1); - umem->props.size = size; + umem->chunk_mask = ~((u64)chunk_size - 1); + umem->size = size; umem->headroom = headroom; umem->chunk_size_nohr = chunk_size - headroom; umem->npgs = size / PAGE_SIZE; diff --git a/net/xdp/xdp_umem.h b/net/xdp/xdp_umem.h index f11560334f88..27603227601b 100644 --- a/net/xdp/xdp_umem.h +++ b/net/xdp/xdp_umem.h @@ -8,18 +8,8 @@ #include <net/xdp_sock.h> -static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) -{ - return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1)); -} - -static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) -{ - return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); -} - int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, - u32 queue_id, u16 flags); + u16 queue_id, u16 flags); bool xdp_umem_validate_queues(struct xdp_umem *umem); void xdp_get_umem(struct xdp_umem *umem); void xdp_put_umem(struct xdp_umem *umem); diff --git a/net/xdp/xdp_umem_props.h b/net/xdp/xdp_umem_props.h deleted file mode 100644 index 40eab10dfc49..000000000000 --- a/net/xdp/xdp_umem_props.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* XDP user-space packet buffer - * Copyright(c) 2018 Intel Corporation. - */ - -#ifndef XDP_UMEM_PROPS_H_ -#define XDP_UMEM_PROPS_H_ - -struct xdp_umem_props { - u64 chunk_mask; - u64 size; -}; - -#endif /* XDP_UMEM_PROPS_H_ */ diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 4e937cd7c17d..0577cd49aa72 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -55,20 +55,30 @@ EXPORT_SYMBOL(xsk_umem_discard_addr); static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) { - void *buffer; + void *to_buf, *from_buf; + u32 metalen; u64 addr; int err; if (!xskq_peek_addr(xs->umem->fq, &addr) || - len > xs->umem->chunk_size_nohr) { + len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) { xs->rx_dropped++; return -ENOSPC; } addr += xs->umem->headroom; - buffer = xdp_umem_get_data(xs->umem, addr); - memcpy(buffer, xdp->data, len); + if (unlikely(xdp_data_meta_unsupported(xdp))) { + from_buf = xdp->data; + metalen = 0; + } else { + from_buf = xdp->data_meta; + metalen = xdp->data - xdp->data_meta; + } + + to_buf = xdp_umem_get_data(xs->umem, addr); + memcpy(to_buf, from_buf, len + metalen); + addr += metalen; err = xskq_produce_batch_desc(xs->rx, addr, len); if (!err) { xskq_discard_addr(xs->umem->fq); @@ -111,6 +121,7 @@ void xsk_flush(struct xdp_sock *xs) int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) { + u32 metalen = xdp->data - xdp->data_meta; u32 len = xdp->data_end - xdp->data; void *buffer; u64 addr; @@ -120,7 +131,7 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) return -EINVAL; if (!xskq_peek_addr(xs->umem->fq, &addr) || - len > xs->umem->chunk_size_nohr) { + len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) { xs->rx_dropped++; return -ENOSPC; } @@ -128,7 +139,8 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) addr += xs->umem->headroom; buffer = xdp_umem_get_data(xs->umem, addr); - memcpy(buffer, xdp->data, len); + memcpy(buffer, xdp->data_meta, len + metalen); + addr += metalen; err = xskq_produce_batch_desc(xs->rx, addr, len); if (!err) { xskq_discard_addr(xs->umem->fq); @@ -343,12 +355,18 @@ static int xsk_release(struct socket *sock) local_bh_enable(); if (xs->dev) { + struct net_device *dev = xs->dev; + /* Wait for driver to stop using the xdp socket. */ - synchronize_net(); - dev_put(xs->dev); + xdp_del_sk_umem(xs->umem, xs); xs->dev = NULL; + synchronize_net(); + dev_put(dev); } + xskq_destroy(xs->rx); + xskq_destroy(xs->tx); + sock_orphan(sk); sock->sk = NULL; @@ -407,13 +425,6 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) } qid = sxdp->sxdp_queue_id; - - if ((xs->rx && qid >= dev->real_num_rx_queues) || - (xs->tx && qid >= dev->real_num_tx_queues)) { - err = -EINVAL; - goto out_unlock; - } - flags = sxdp->sxdp_flags; if (flags & XDP_SHARED_UMEM) { @@ -458,8 +469,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) goto out_unlock; } else { /* This xsk has its own umem. */ - xskq_set_umem(xs->umem->fq, &xs->umem->props); - xskq_set_umem(xs->umem->cq, &xs->umem->props); + xskq_set_umem(xs->umem->fq, xs->umem->size, + xs->umem->chunk_mask); + xskq_set_umem(xs->umem->cq, xs->umem->size, + xs->umem->chunk_mask); err = xdp_umem_assign_dev(xs->umem, dev, qid, flags); if (err) @@ -469,8 +482,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) xs->dev = dev; xs->zc = xs->umem->zc; xs->queue_id = qid; - xskq_set_umem(xs->rx, &xs->umem->props); - xskq_set_umem(xs->tx, &xs->umem->props); + xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask); + xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask); xdp_add_sk_umem(xs->umem, xs); out_unlock: @@ -707,9 +720,6 @@ static void xsk_destruct(struct sock *sk) if (!sock_flag(sk, SOCK_DEAD)) return; - xskq_destroy(xs->rx); - xskq_destroy(xs->tx); - xdp_del_sk_umem(xs->umem, xs); xdp_put_umem(xs->umem); sk_refcnt_debug_dec(sk); diff --git a/net/xdp/xsk_queue.c b/net/xdp/xsk_queue.c index 6c32e92e98fc..b66504592d9b 100644 --- a/net/xdp/xsk_queue.c +++ b/net/xdp/xsk_queue.c @@ -3,16 +3,19 @@ * Copyright(c) 2018 Intel Corporation. */ +#include <linux/log2.h> #include <linux/slab.h> +#include <linux/overflow.h> #include "xsk_queue.h" -void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props) +void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask) { if (!q) return; - q->umem_props = *umem_props; + q->size = size; + q->chunk_mask = chunk_mask; } static u32 xskq_umem_get_ring_size(struct xsk_queue *q) @@ -61,3 +64,56 @@ void xskq_destroy(struct xsk_queue *q) page_frag_free(q->ring); kfree(q); } + +struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) +{ + struct xdp_umem_fq_reuse *newq; + + /* Check for overflow */ + if (nentries > (u32)roundup_pow_of_two(nentries)) + return NULL; + nentries = roundup_pow_of_two(nentries); + + newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL); + if (!newq) + return NULL; + memset(newq, 0, offsetof(typeof(*newq), handles)); + + newq->nentries = nentries; + return newq; +} +EXPORT_SYMBOL_GPL(xsk_reuseq_prepare); + +struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, + struct xdp_umem_fq_reuse *newq) +{ + struct xdp_umem_fq_reuse *oldq = umem->fq_reuse; + + if (!oldq) { + umem->fq_reuse = newq; + return NULL; + } + + if (newq->nentries < oldq->length) + return newq; + + memcpy(newq->handles, oldq->handles, + array_size(oldq->length, sizeof(u64))); + newq->length = oldq->length; + + umem->fq_reuse = newq; + return oldq; +} +EXPORT_SYMBOL_GPL(xsk_reuseq_swap); + +void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) +{ + kvfree(rq); +} +EXPORT_SYMBOL_GPL(xsk_reuseq_free); + +void xsk_reuseq_destroy(struct xdp_umem *umem) +{ + xsk_reuseq_free(umem->fq_reuse); + umem->fq_reuse = NULL; +} diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 8a64b150be54..bcb5cbb40419 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -31,7 +31,8 @@ struct xdp_umem_ring { }; struct xsk_queue { - struct xdp_umem_props umem_props; + u64 chunk_mask; + u64 size; u32 ring_mask; u32 nentries; u32 prod_head; @@ -78,7 +79,7 @@ static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt) static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr) { - if (addr >= q->umem_props.size) { + if (addr >= q->size) { q->invalid_descs++; return false; } @@ -92,7 +93,7 @@ static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr) struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; unsigned int idx = q->cons_tail & q->ring_mask; - *addr = READ_ONCE(ring->desc[idx]) & q->umem_props.chunk_mask; + *addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask; if (xskq_is_valid_addr(q, *addr)) return addr; @@ -173,8 +174,8 @@ static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d) if (!xskq_is_valid_addr(q, d->addr)) return false; - if (((d->addr + d->len) & q->umem_props.chunk_mask) != - (d->addr & q->umem_props.chunk_mask)) { + if (((d->addr + d->len) & q->chunk_mask) != + (d->addr & q->chunk_mask)) { q->invalid_descs++; return false; } @@ -253,8 +254,11 @@ static inline bool xskq_empty_desc(struct xsk_queue *q) return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries; } -void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props); +void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask); struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); void xskq_destroy(struct xsk_queue *q_ops); +/* Executed by the core when the entire UMEM gets freed */ +void xsk_reuseq_destroy(struct xdp_umem *umem); + #endif /* _LINUX_XSK_QUEUE_H */ diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 5611b7521020..144c137886b1 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -99,7 +99,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur do { struct sk_buff *nskb = skb2->next; - skb2->next = NULL; + skb_mark_not_on_list(skb2); xo = xfrm_offload(skb2); xo->flags |= XFRM_DEV_RESUME; @@ -192,9 +192,13 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, err = dev->xfrmdev_ops->xdo_dev_state_add(x); if (err) { + xso->num_exthdrs = 0; + xso->flags = 0; xso->dev = NULL; dev_put(dev); - return err; + + if (err != -EOPNOTSUPP) + return err; } return 0; diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 31acc6f33d98..dc5b20bf29cf 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -469,9 +469,9 @@ static int xfrmi4_err(struct sk_buff *skb, u32 info) } if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) - ipv4_update_pmtu(skb, net, info, 0, 0, protocol, 0); + ipv4_update_pmtu(skb, net, info, 0, protocol); else - ipv4_redirect(skb, net, 0, 0, protocol, 0); + ipv4_redirect(skb, net, 0, protocol); xfrm_state_put(x); return 0; @@ -742,7 +742,7 @@ nla_put_failure: return -EMSGSIZE; } -struct net *xfrmi_get_link_net(const struct net_device *dev) +static struct net *xfrmi_get_link_net(const struct net_device *dev) { struct xfrm_if *xi = netdev_priv(dev); diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 261995d37ced..4ae87c5ce2e3 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -193,7 +193,7 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb struct sk_buff *nskb = segs->next; int err; - segs->next = NULL; + skb_mark_not_on_list(segs); err = xfrm_output2(net, sk, segs); if (unlikely(err)) { diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index df7ca2dabc48..ca7a207b81a9 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1007,7 +1007,7 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) int err; err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX, xfrma_policy, - NULL); + cb->extack); if (err < 0) return err; diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 36f9f41d094b..be0a961450bc 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -153,6 +153,7 @@ always += tcp_cong_kern.o always += tcp_iw_kern.o always += tcp_clamp_kern.o always += tcp_basertt_kern.o +always += tcp_tos_reflect_kern.o always += xdp_redirect_kern.o always += xdp_redirect_map_kern.o always += xdp_redirect_cpu_kern.o diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c index 904e775d1a44..e6d7e0fe155b 100644 --- a/samples/bpf/bpf_load.c +++ b/samples/bpf/bpf_load.c @@ -16,7 +16,6 @@ #include <linux/netlink.h> #include <linux/rtnetlink.h> #include <linux/types.h> -#include <sys/types.h> #include <sys/socket.h> #include <sys/syscall.h> #include <sys/ioctl.h> diff --git a/samples/bpf/sampleip_user.c b/samples/bpf/sampleip_user.c index 60c2b73d1b4d..216c7ecbbbe9 100644 --- a/samples/bpf/sampleip_user.c +++ b/samples/bpf/sampleip_user.c @@ -9,7 +9,6 @@ */ #include <stdio.h> #include <stdlib.h> -#include <stdio.h> #include <unistd.h> #include <errno.h> #include <signal.h> diff --git a/samples/bpf/sockex2_kern.c b/samples/bpf/sockex2_kern.c index f58acfc92556..f2f9dbc021b0 100644 --- a/samples/bpf/sockex2_kern.c +++ b/samples/bpf/sockex2_kern.c @@ -14,7 +14,7 @@ struct vlan_hdr { __be16 h_vlan_encapsulated_proto; }; -struct bpf_flow_keys { +struct flow_key_record { __be32 src; __be32 dst; union { @@ -59,7 +59,7 @@ static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off) } static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto, - struct bpf_flow_keys *flow) + struct flow_key_record *flow) { __u64 verlen; @@ -83,7 +83,7 @@ static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto } static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto, - struct bpf_flow_keys *flow) + struct flow_key_record *flow) { *ip_proto = load_byte(skb, nhoff + offsetof(struct ipv6hdr, nexthdr)); @@ -96,7 +96,8 @@ static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_pro return nhoff; } -static inline bool flow_dissector(struct __sk_buff *skb, struct bpf_flow_keys *flow) +static inline bool flow_dissector(struct __sk_buff *skb, + struct flow_key_record *flow) { __u64 nhoff = ETH_HLEN; __u64 ip_proto; @@ -198,7 +199,7 @@ struct bpf_map_def SEC("maps") hash_map = { SEC("socket2") int bpf_prog2(struct __sk_buff *skb) { - struct bpf_flow_keys flow = {}; + struct flow_key_record flow = {}; struct pair *value; u32 key; diff --git a/samples/bpf/sockex3_kern.c b/samples/bpf/sockex3_kern.c index 95907f8d2b17..c527b57d3ec8 100644 --- a/samples/bpf/sockex3_kern.c +++ b/samples/bpf/sockex3_kern.c @@ -61,7 +61,7 @@ struct vlan_hdr { __be16 h_vlan_encapsulated_proto; }; -struct bpf_flow_keys { +struct flow_key_record { __be32 src; __be32 dst; union { @@ -88,7 +88,7 @@ static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off) } struct globals { - struct bpf_flow_keys flow; + struct flow_key_record flow; }; struct bpf_map_def SEC("maps") percpu_map = { @@ -114,14 +114,14 @@ struct pair { struct bpf_map_def SEC("maps") hash_map = { .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(struct bpf_flow_keys), + .key_size = sizeof(struct flow_key_record), .value_size = sizeof(struct pair), .max_entries = 1024, }; static void update_stats(struct __sk_buff *skb, struct globals *g) { - struct bpf_flow_keys key = g->flow; + struct flow_key_record key = g->flow; struct pair *value; value = bpf_map_lookup_elem(&hash_map, &key); diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c index 5ba3ae9d180b..9d02e0404719 100644 --- a/samples/bpf/sockex3_user.c +++ b/samples/bpf/sockex3_user.c @@ -13,7 +13,7 @@ #define PARSE_IP_PROG_FD (prog_fd[0]) #define PROG_ARRAY_FD (map_fd[0]) -struct bpf_flow_keys { +struct flow_key_record { __be32 src; __be32 dst; union { @@ -64,7 +64,7 @@ int main(int argc, char **argv) (void) f; for (i = 0; i < 5; i++) { - struct bpf_flow_keys key = {}, next_key; + struct flow_key_record key = {}, next_key; struct pair value; sleep(1); diff --git a/samples/bpf/tcp_tos_reflect_kern.c b/samples/bpf/tcp_tos_reflect_kern.c new file mode 100644 index 000000000000..d51dab19eca6 --- /dev/null +++ b/samples/bpf/tcp_tos_reflect_kern.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 Facebook + * + * BPF program to automatically reflect TOS option from received syn packet + * + * Use load_sock_ops to load this BPF program. + */ + +#include <uapi/linux/bpf.h> +#include <uapi/linux/tcp.h> +#include <uapi/linux/if_ether.h> +#include <uapi/linux/if_packet.h> +#include <uapi/linux/ip.h> +#include <uapi/linux/ipv6.h> +#include <uapi/linux/in.h> +#include <linux/socket.h> +#include "bpf_helpers.h" +#include "bpf_endian.h" + +#define DEBUG 1 + +#define bpf_printk(fmt, ...) \ +({ \ + char ____fmt[] = fmt; \ + bpf_trace_printk(____fmt, sizeof(____fmt), \ + ##__VA_ARGS__); \ +}) + +SEC("sockops") +int bpf_basertt(struct bpf_sock_ops *skops) +{ + char header[sizeof(struct ipv6hdr)]; + struct ipv6hdr *hdr6; + struct iphdr *hdr; + int hdr_size = 0; + int save_syn = 1; + int tos = 0; + int rv = 0; + int op; + + op = (int) skops->op; + +#ifdef DEBUG + bpf_printk("BPF command: %d\n", op); +#endif + switch (op) { + case BPF_SOCK_OPS_TCP_LISTEN_CB: + rv = bpf_setsockopt(skops, SOL_TCP, TCP_SAVE_SYN, + &save_syn, sizeof(save_syn)); + break; + case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: + if (skops->family == AF_INET) + hdr_size = sizeof(struct iphdr); + else + hdr_size = sizeof(struct ipv6hdr); + rv = bpf_getsockopt(skops, SOL_TCP, TCP_SAVED_SYN, + header, hdr_size); + if (!rv) { + if (skops->family == AF_INET) { + hdr = (struct iphdr *) header; + tos = hdr->tos; + if (tos != 0) + bpf_setsockopt(skops, SOL_IP, IP_TOS, + &tos, sizeof(tos)); + } else { + hdr6 = (struct ipv6hdr *) header; + tos = ((hdr6->priority) << 4 | + (hdr6->flow_lbl[0]) >> 4); + if (tos) + bpf_setsockopt(skops, SOL_IPV6, + IPV6_TCLASS, + &tos, sizeof(tos)); + } + rv = 0; + } + break; + default: + rv = -1; + } +#ifdef DEBUG + bpf_printk("Returning %d\n", rv); +#endif + skops->reply = rv; + return 1; +} +char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c index 180f9d813bca..d7b68ef5ba79 100644 --- a/samples/bpf/test_cgrp2_attach2.c +++ b/samples/bpf/test_cgrp2_attach2.c @@ -209,7 +209,7 @@ static int map_fd = -1; static int prog_load_cnt(int verdict, int val) { - int cgroup_storage_fd; + int cgroup_storage_fd, percpu_cgroup_storage_fd; if (map_fd < 0) map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); @@ -225,6 +225,14 @@ static int prog_load_cnt(int verdict, int val) return -1; } + percpu_cgroup_storage_fd = bpf_create_map( + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, + sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); + if (percpu_cgroup_storage_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + return -1; + } + struct bpf_insn prog[] = { BPF_MOV32_IMM(BPF_REG_0, 0), BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ @@ -235,11 +243,20 @@ static int prog_load_cnt(int verdict, int val) BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ + BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd), BPF_MOV64_IMM(BPF_REG_2, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), BPF_MOV64_IMM(BPF_REG_1, val), BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0), + + BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), + BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ BPF_EXIT_INSN(), }; diff --git a/samples/bpf/test_current_task_under_cgroup_user.c b/samples/bpf/test_current_task_under_cgroup_user.c index 4be4874ca2bc..2259f997a26c 100644 --- a/samples/bpf/test_current_task_under_cgroup_user.c +++ b/samples/bpf/test_current_task_under_cgroup_user.c @@ -11,7 +11,6 @@ #include <unistd.h> #include <bpf/bpf.h> #include "bpf_load.h" -#include <linux/bpf.h> #include "cgroup_helpers.h" #define CGROUP_PATH "/my-cgroup" diff --git a/samples/bpf/tracex3_user.c b/samples/bpf/tracex3_user.c index 6c6b10f4c3ee..56466d010139 100644 --- a/samples/bpf/tracex3_user.c +++ b/samples/bpf/tracex3_user.c @@ -17,8 +17,6 @@ #include "bpf_load.h" #include "bpf_util.h" -#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x))) - #define SLOTS 100 static void clear_stats(int fd) diff --git a/samples/bpf/xdpsock_kern.c b/samples/bpf/xdpsock_kern.c index d8806c41362e..b8ccd0802b3f 100644 --- a/samples/bpf/xdpsock_kern.c +++ b/samples/bpf/xdpsock_kern.c @@ -16,7 +16,7 @@ struct bpf_map_def SEC("maps") xsks_map = { .type = BPF_MAP_TYPE_XSKMAP, .key_size = sizeof(int), .value_size = sizeof(int), - .max_entries = 4, + .max_entries = MAX_SOCKS, }; struct bpf_map_def SEC("maps") rr_map = { diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index 4914788b6727..57ecadc58403 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -118,7 +118,6 @@ struct xdpsock { unsigned long prev_tx_npkts; }; -#define MAX_SOCKS 4 static int num_socks; struct xdpsock *xsks[MAX_SOCKS]; @@ -596,7 +595,7 @@ static void dump_stats(void) prev_time = now; - for (i = 0; i < num_socks; i++) { + for (i = 0; i < num_socks && xsks[i]; i++) { char *fmt = "%-15s %'-11.0f %'-11lu\n"; double rx_pps, tx_pps; @@ -649,6 +648,8 @@ static struct option long_options[] = { {"xdp-skb", no_argument, 0, 'S'}, {"xdp-native", no_argument, 0, 'N'}, {"interval", required_argument, 0, 'n'}, + {"zero-copy", no_argument, 0, 'z'}, + {"copy", no_argument, 0, 'c'}, {0, 0, 0, 0} }; @@ -667,6 +668,8 @@ static void usage(const char *prog) " -S, --xdp-skb=n Use XDP skb-mod\n" " -N, --xdp-native=n Enfore XDP native mode\n" " -n, --interval=n Specify statistics update interval (default 1 sec).\n" + " -z, --zero-copy Force zero-copy mode.\n" + " -c, --copy Force copy mode.\n" "\n"; fprintf(stderr, str, prog); exit(EXIT_FAILURE); @@ -679,7 +682,7 @@ static void parse_command_line(int argc, char **argv) opterr = 0; for (;;) { - c = getopt_long(argc, argv, "rtli:q:psSNn:", long_options, + c = getopt_long(argc, argv, "rtli:q:psSNn:cz", long_options, &option_index); if (c == -1) break; @@ -716,6 +719,12 @@ static void parse_command_line(int argc, char **argv) case 'n': opt_interval = atoi(optarg); break; + case 'z': + opt_xdp_bind_flags |= XDP_ZEROCOPY; + break; + case 'c': + opt_xdp_bind_flags |= XDP_COPY; + break; default: usage(basename(argv[0])); } diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst new file mode 100644 index 000000000000..408ec30d8872 --- /dev/null +++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst @@ -0,0 +1,139 @@ +================ +bpftool-net +================ +------------------------------------------------------------------------------- +tool for inspection of netdev/tc related bpf prog attachments +------------------------------------------------------------------------------- + +:Manual section: 8 + +SYNOPSIS +======== + + **bpftool** [*OPTIONS*] **net** *COMMAND* + + *OPTIONS* := { [{ **-j** | **--json** }] [{ **-p** | **--pretty** }] } + + *COMMANDS* := + { **show** | **list** } [ **dev** name ] | **help** + +NET COMMANDS +============ + +| **bpftool** **net { show | list } [ dev name ]** +| **bpftool** **net help** + +DESCRIPTION +=========== + **bpftool net { show | list } [ dev name ]** + List bpf program attachments in the kernel networking subsystem. + + Currently, only device driver xdp attachments and tc filter + classification/action attachments are implemented, i.e., for + program types **BPF_PROG_TYPE_SCHED_CLS**, + **BPF_PROG_TYPE_SCHED_ACT** and **BPF_PROG_TYPE_XDP**. + For programs attached to a particular cgroup, e.g., + **BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**, + **BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, + users can use **bpftool cgroup** to dump cgroup attachments. + For sk_{filter, skb, msg, reuseport} and lwt/seg6 + bpf programs, users should consult other tools, e.g., iproute2. + + The current output will start with all xdp program attachments, followed by + all tc class/qdisc bpf program attachments. Both xdp programs and + tc programs are ordered based on ifindex number. If multiple bpf + programs attached to the same networking device through **tc filter**, + the order will be first all bpf programs attached to tc classes, then + all bpf programs attached to non clsact qdiscs, and finally all + bpf programs attached to root and clsact qdisc. + + **bpftool net help** + Print short help message. + +OPTIONS +======= + -h, --help + Print short generic help message (similar to **bpftool help**). + + -v, --version + Print version number (similar to **bpftool version**). + + -j, --json + Generate JSON output. For commands that cannot produce JSON, this + option has no effect. + + -p, --pretty + Generate human-readable JSON output. Implies **-j**. + +EXAMPLES +======== + +| **# bpftool net** + +:: + + xdp: + eth0(2) driver id 198 + + tc: + eth0(2) htb name prefix_matcher.o:[cls_prefix_matcher_htb] id 111727 act [] + eth0(2) clsact/ingress fbflow_icmp id 130246 act [] + eth0(2) clsact/egress prefix_matcher.o:[cls_prefix_matcher_clsact] id 111726 + eth0(2) clsact/egress cls_fg_dscp id 108619 act [] + eth0(2) clsact/egress fbflow_egress id 130245 + +| +| **# bpftool -jp net** + +:: + + [{ + "xdp": [{ + "devname": "eth0", + "ifindex": 2, + "mode": "driver", + "id": 198 + } + ], + "tc": [{ + "devname": "eth0", + "ifindex": 2, + "kind": "htb", + "name": "prefix_matcher.o:[cls_prefix_matcher_htb]", + "id": 111727, + "act": [] + },{ + "devname": "eth0", + "ifindex": 2, + "kind": "clsact/ingress", + "name": "fbflow_icmp", + "id": 130246, + "act": [] + },{ + "devname": "eth0", + "ifindex": 2, + "kind": "clsact/egress", + "name": "prefix_matcher.o:[cls_prefix_matcher_clsact]", + "id": 111726, + },{ + "devname": "eth0", + "ifindex": 2, + "kind": "clsact/egress", + "name": "cls_fg_dscp", + "id": 108619, + "act": [] + },{ + "devname": "eth0", + "ifindex": 2, + "kind": "clsact/egress", + "name": "fbflow_egress", + "id": 130245, + } + ] + } + ] + + +SEE ALSO +======== + **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8) diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst index b6f5d560460d..8dda77daeda9 100644 --- a/tools/bpf/bpftool/Documentation/bpftool.rst +++ b/tools/bpf/bpftool/Documentation/bpftool.rst @@ -16,7 +16,7 @@ SYNOPSIS **bpftool** **version** - *OBJECT* := { **map** | **program** | **cgroup** | **perf** } + *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** } *OPTIONS* := { { **-V** | **--version** } | { **-h** | **--help** } | { **-j** | **--json** } [{ **-p** | **--pretty** }] } @@ -32,6 +32,8 @@ SYNOPSIS *PERF-COMMANDS* := { **show** | **list** | **help** } + *NET-COMMANDS* := { **show** | **list** | **help** } + DESCRIPTION =========== *bpftool* allows for inspection and simple modification of BPF objects @@ -58,4 +60,4 @@ OPTIONS SEE ALSO ======== **bpftool-map**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8) - **bpftool-perf**\ (8) + **bpftool-perf**\ (8), **bpftool-net**\ (8) diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 598066c40191..df1060b852c1 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -494,10 +494,10 @@ _bpftool() _filedir return 0 ;; - tree) - _filedir - return 0 - ;; + tree) + _filedir + return 0 + ;; attach|detach) local ATTACH_TYPES='ingress egress sock_create sock_ops \ device bind4 bind6 post_bind4 post_bind6 connect4 \ @@ -552,6 +552,15 @@ _bpftool() ;; esac ;; + net) + case $command in + *) + [[ $prev == $object ]] && \ + COMPREPLY=( $( compgen -W 'help \ + show list' -- "$cur" ) ) + ;; + esac + ;; esac } && complete -F _bpftool bpftool diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index d15a62be6cf0..79dc3f193547 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -85,7 +85,7 @@ static int do_help(int argc, char **argv) " %s batch file FILE\n" " %s version\n" "\n" - " OBJECT := { prog | map | cgroup | perf }\n" + " OBJECT := { prog | map | cgroup | perf | net }\n" " " HELP_SPEC_OPTIONS "\n" "", bin_name, bin_name, bin_name); @@ -215,6 +215,7 @@ static const struct cmd cmds[] = { { "map", do_map }, { "cgroup", do_cgroup }, { "perf", do_perf }, + { "net", do_net }, { "version", do_version }, { 0 } }; diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 238e734d75b3..40492cdc4e53 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -136,6 +136,7 @@ int do_map(int argc, char **arg); int do_event_pipe(int argc, char **argv); int do_cgroup(int argc, char **arg); int do_perf(int argc, char **arg); +int do_net(int argc, char **arg); int prog_parse_fd(int *argc, char ***argv); int map_parse_fd(int *argc, char ***argv); @@ -165,4 +166,11 @@ struct btf_dumper { */ int btf_dumper_type(const struct btf_dumper *d, __u32 type_id, const void *data); + +struct nlattr; +struct ifinfomsg; +struct tcmsg; +int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb); +int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind, + const char *devname, int ifindex); #endif diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index b455930a3eaf..6003e9598973 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -71,13 +71,16 @@ static const char * const map_type_name[] = { [BPF_MAP_TYPE_XSKMAP] = "xskmap", [BPF_MAP_TYPE_SOCKHASH] = "sockhash", [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage", + [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray", + [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage", }; static bool map_is_per_cpu(__u32 type) { return type == BPF_MAP_TYPE_PERCPU_HASH || type == BPF_MAP_TYPE_PERCPU_ARRAY || - type == BPF_MAP_TYPE_LRU_PERCPU_HASH; + type == BPF_MAP_TYPE_LRU_PERCPU_HASH || + type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE; } static bool map_is_map_of_maps(__u32 type) @@ -170,9 +173,28 @@ static int do_dump_btf(const struct btf_dumper *d, if (ret) goto err_end_obj; - jsonw_name(d->jw, "value"); + if (!map_is_per_cpu(map_info->type)) { + jsonw_name(d->jw, "value"); + ret = btf_dumper_type(d, map_info->btf_value_type_id, value); + } else { + unsigned int i, n, step; - ret = btf_dumper_type(d, map_info->btf_value_type_id, value); + jsonw_name(d->jw, "values"); + jsonw_start_array(d->jw); + n = get_possible_cpus(); + step = round_up(map_info->value_size, 8); + for (i = 0; i < n; i++) { + jsonw_start_object(d->jw); + jsonw_int_field(d->jw, "cpu", i); + jsonw_name(d->jw, "value"); + ret = btf_dumper_type(d, map_info->btf_value_type_id, + value + i * step); + jsonw_end_object(d->jw); + if (ret) + break; + } + jsonw_end_array(d->jw); + } err_end_obj: /* end of key-value pair */ @@ -299,6 +321,16 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key, jsonw_end_object(json_wtr); } jsonw_end_array(json_wtr); + if (btf) { + struct btf_dumper d = { + .btf = btf, + .jw = json_wtr, + .is_plain_text = false, + }; + + jsonw_name(json_wtr, "formatted"); + do_dump_btf(&d, info, key, value); + } } jsonw_end_object(json_wtr); @@ -644,12 +676,6 @@ static int do_dump(int argc, char **argv) if (fd < 0) return -1; - if (map_is_map_of_maps(info.type) || map_is_map_of_progs(info.type)) { - p_err("Dumping maps of maps and program maps not supported"); - close(fd); - return -1; - } - key = malloc(info.key_size); value = alloc_value(&info); if (!key || !value) { @@ -703,7 +729,9 @@ static int do_dump(int argc, char **argv) } else { print_entry_plain(&info, key, value); } - } else { + num_elems++; + } else if (!map_is_map_of_maps(info.type) && + !map_is_map_of_progs(info.type)) { if (json_output) { jsonw_name(json_wtr, "key"); print_hex_data_json(key, info.key_size); @@ -720,7 +748,6 @@ static int do_dump(int argc, char **argv) } prev_key = key; - num_elems++; } if (json_output) diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c new file mode 100644 index 000000000000..d441bb7035ca --- /dev/null +++ b/tools/bpf/bpftool/net.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (C) 2018 Facebook + +#define _GNU_SOURCE +#include <errno.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <libbpf.h> +#include <net/if.h> +#include <linux/if.h> +#include <linux/rtnetlink.h> +#include <linux/tc_act/tc_bpf.h> +#include <sys/socket.h> + +#include <bpf.h> +#include <nlattr.h> +#include "main.h" +#include "netlink_dumper.h" + +struct ip_devname_ifindex { + char devname[64]; + int ifindex; +}; + +struct bpf_netdev_t { + struct ip_devname_ifindex *devices; + int used_len; + int array_len; + int filter_idx; +}; + +struct tc_kind_handle { + char kind[64]; + int handle; +}; + +struct bpf_tcinfo_t { + struct tc_kind_handle *handle_array; + int used_len; + int array_len; + bool is_qdisc; +}; + +struct bpf_filter_t { + const char *kind; + const char *devname; + int ifindex; +}; + +static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb) +{ + struct bpf_netdev_t *netinfo = cookie; + struct ifinfomsg *ifinfo = msg; + + if (netinfo->filter_idx > 0 && netinfo->filter_idx != ifinfo->ifi_index) + return 0; + + if (netinfo->used_len == netinfo->array_len) { + netinfo->devices = realloc(netinfo->devices, + (netinfo->array_len + 16) * + sizeof(struct ip_devname_ifindex)); + if (!netinfo->devices) + return -ENOMEM; + + netinfo->array_len += 16; + } + netinfo->devices[netinfo->used_len].ifindex = ifinfo->ifi_index; + snprintf(netinfo->devices[netinfo->used_len].devname, + sizeof(netinfo->devices[netinfo->used_len].devname), + "%s", + tb[IFLA_IFNAME] + ? libbpf_nla_getattr_str(tb[IFLA_IFNAME]) + : ""); + netinfo->used_len++; + + return do_xdp_dump(ifinfo, tb); +} + +static int dump_class_qdisc_nlmsg(void *cookie, void *msg, struct nlattr **tb) +{ + struct bpf_tcinfo_t *tcinfo = cookie; + struct tcmsg *info = msg; + + if (tcinfo->is_qdisc) { + /* skip clsact qdisc */ + if (tb[TCA_KIND] && + strcmp(libbpf_nla_data(tb[TCA_KIND]), "clsact") == 0) + return 0; + if (info->tcm_handle == 0) + return 0; + } + + if (tcinfo->used_len == tcinfo->array_len) { + tcinfo->handle_array = realloc(tcinfo->handle_array, + (tcinfo->array_len + 16) * sizeof(struct tc_kind_handle)); + if (!tcinfo->handle_array) + return -ENOMEM; + + tcinfo->array_len += 16; + } + tcinfo->handle_array[tcinfo->used_len].handle = info->tcm_handle; + snprintf(tcinfo->handle_array[tcinfo->used_len].kind, + sizeof(tcinfo->handle_array[tcinfo->used_len].kind), + "%s", + tb[TCA_KIND] + ? libbpf_nla_getattr_str(tb[TCA_KIND]) + : "unknown"); + tcinfo->used_len++; + + return 0; +} + +static int dump_filter_nlmsg(void *cookie, void *msg, struct nlattr **tb) +{ + const struct bpf_filter_t *filter_info = cookie; + + return do_filter_dump((struct tcmsg *)msg, tb, filter_info->kind, + filter_info->devname, filter_info->ifindex); +} + +static int show_dev_tc_bpf(int sock, unsigned int nl_pid, + struct ip_devname_ifindex *dev) +{ + struct bpf_filter_t filter_info; + struct bpf_tcinfo_t tcinfo; + int i, handle, ret = 0; + + tcinfo.handle_array = NULL; + tcinfo.used_len = 0; + tcinfo.array_len = 0; + + tcinfo.is_qdisc = false; + ret = libbpf_nl_get_class(sock, nl_pid, dev->ifindex, + dump_class_qdisc_nlmsg, &tcinfo); + if (ret) + goto out; + + tcinfo.is_qdisc = true; + ret = libbpf_nl_get_qdisc(sock, nl_pid, dev->ifindex, + dump_class_qdisc_nlmsg, &tcinfo); + if (ret) + goto out; + + filter_info.devname = dev->devname; + filter_info.ifindex = dev->ifindex; + for (i = 0; i < tcinfo.used_len; i++) { + filter_info.kind = tcinfo.handle_array[i].kind; + ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, + tcinfo.handle_array[i].handle, + dump_filter_nlmsg, &filter_info); + if (ret) + goto out; + } + + /* root, ingress and egress handle */ + handle = TC_H_ROOT; + filter_info.kind = "root"; + ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, handle, + dump_filter_nlmsg, &filter_info); + if (ret) + goto out; + + handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS); + filter_info.kind = "clsact/ingress"; + ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, handle, + dump_filter_nlmsg, &filter_info); + if (ret) + goto out; + + handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_EGRESS); + filter_info.kind = "clsact/egress"; + ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, handle, + dump_filter_nlmsg, &filter_info); + if (ret) + goto out; + +out: + free(tcinfo.handle_array); + return 0; +} + +static int do_show(int argc, char **argv) +{ + int i, sock, ret, filter_idx = -1; + struct bpf_netdev_t dev_array; + unsigned int nl_pid; + char err_buf[256]; + + if (argc == 2) { + if (strcmp(argv[0], "dev") != 0) + usage(); + filter_idx = if_nametoindex(argv[1]); + if (filter_idx == 0) { + fprintf(stderr, "invalid dev name %s\n", argv[1]); + return -1; + } + } else if (argc != 0) { + usage(); + } + + sock = libbpf_netlink_open(&nl_pid); + if (sock < 0) { + fprintf(stderr, "failed to open netlink sock\n"); + return -1; + } + + dev_array.devices = NULL; + dev_array.used_len = 0; + dev_array.array_len = 0; + dev_array.filter_idx = filter_idx; + + if (json_output) + jsonw_start_array(json_wtr); + NET_START_OBJECT; + NET_START_ARRAY("xdp", "%s:\n"); + ret = libbpf_nl_get_link(sock, nl_pid, dump_link_nlmsg, &dev_array); + NET_END_ARRAY("\n"); + + if (!ret) { + NET_START_ARRAY("tc", "%s:\n"); + for (i = 0; i < dev_array.used_len; i++) { + ret = show_dev_tc_bpf(sock, nl_pid, + &dev_array.devices[i]); + if (ret) + break; + } + NET_END_ARRAY("\n"); + } + NET_END_OBJECT; + if (json_output) + jsonw_end_array(json_wtr); + + if (ret) { + if (json_output) + jsonw_null(json_wtr); + libbpf_strerror(ret, err_buf, sizeof(err_buf)); + fprintf(stderr, "Error: %s\n", err_buf); + } + free(dev_array.devices); + close(sock); + return ret; +} + +static int do_help(int argc, char **argv) +{ + if (json_output) { + jsonw_null(json_wtr); + return 0; + } + + fprintf(stderr, + "Usage: %s %s { show | list } [dev <devname>]\n" + " %s %s help\n" + "Note: Only xdp and tc attachments are supported now.\n" + " For progs attached to cgroups, use \"bpftool cgroup\"\n" + " to dump program attachments. For program types\n" + " sk_{filter,skb,msg,reuseport} and lwt/seg6, please\n" + " consult iproute2.\n", + bin_name, argv[-2], bin_name, argv[-2]); + + return 0; +} + +static const struct cmd cmds[] = { + { "show", do_show }, + { "list", do_show }, + { "help", do_help }, + { 0 } +}; + +int do_net(int argc, char **argv) +{ + return cmd_select(cmds, argc, argv, do_help); +} diff --git a/tools/bpf/bpftool/netlink_dumper.c b/tools/bpf/bpftool/netlink_dumper.c new file mode 100644 index 000000000000..4e9f4531269f --- /dev/null +++ b/tools/bpf/bpftool/netlink_dumper.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (C) 2018 Facebook + +#include <stdlib.h> +#include <string.h> +#include <libbpf.h> +#include <linux/rtnetlink.h> +#include <linux/tc_act/tc_bpf.h> + +#include <nlattr.h> +#include "main.h" +#include "netlink_dumper.h" + +static void xdp_dump_prog_id(struct nlattr **tb, int attr, + const char *mode, + bool new_json_object) +{ + if (!tb[attr]) + return; + + if (new_json_object) + NET_START_OBJECT + NET_DUMP_STR("mode", " %s", mode); + NET_DUMP_UINT("id", " id %u", libbpf_nla_getattr_u32(tb[attr])) + if (new_json_object) + NET_END_OBJECT +} + +static int do_xdp_dump_one(struct nlattr *attr, unsigned int ifindex, + const char *name) +{ + struct nlattr *tb[IFLA_XDP_MAX + 1]; + unsigned char mode; + + if (libbpf_nla_parse_nested(tb, IFLA_XDP_MAX, attr, NULL) < 0) + return -1; + + if (!tb[IFLA_XDP_ATTACHED]) + return 0; + + mode = libbpf_nla_getattr_u8(tb[IFLA_XDP_ATTACHED]); + if (mode == XDP_ATTACHED_NONE) + return 0; + + NET_START_OBJECT; + if (name) + NET_DUMP_STR("devname", "%s", name); + NET_DUMP_UINT("ifindex", "(%d)", ifindex); + + if (mode == XDP_ATTACHED_MULTI) { + if (json_output) { + jsonw_name(json_wtr, "multi_attachments"); + jsonw_start_array(json_wtr); + } + xdp_dump_prog_id(tb, IFLA_XDP_SKB_PROG_ID, "generic", true); + xdp_dump_prog_id(tb, IFLA_XDP_DRV_PROG_ID, "driver", true); + xdp_dump_prog_id(tb, IFLA_XDP_HW_PROG_ID, "offload", true); + if (json_output) + jsonw_end_array(json_wtr); + } else if (mode == XDP_ATTACHED_DRV) { + xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "driver", false); + } else if (mode == XDP_ATTACHED_SKB) { + xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "generic", false); + } else if (mode == XDP_ATTACHED_HW) { + xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "offload", false); + } + + NET_END_OBJECT_FINAL; + return 0; +} + +int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb) +{ + if (!tb[IFLA_XDP]) + return 0; + + return do_xdp_dump_one(tb[IFLA_XDP], ifinfo->ifi_index, + libbpf_nla_getattr_str(tb[IFLA_IFNAME])); +} + +static int do_bpf_dump_one_act(struct nlattr *attr) +{ + struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; + + if (libbpf_nla_parse_nested(tb, TCA_ACT_BPF_MAX, attr, NULL) < 0) + return -LIBBPF_ERRNO__NLPARSE; + + if (!tb[TCA_ACT_BPF_PARMS]) + return -LIBBPF_ERRNO__NLPARSE; + + NET_START_OBJECT_NESTED2; + if (tb[TCA_ACT_BPF_NAME]) + NET_DUMP_STR("name", "%s", + libbpf_nla_getattr_str(tb[TCA_ACT_BPF_NAME])); + if (tb[TCA_ACT_BPF_ID]) + NET_DUMP_UINT("id", " id %u", + libbpf_nla_getattr_u32(tb[TCA_ACT_BPF_ID])); + NET_END_OBJECT_NESTED; + return 0; +} + +static int do_dump_one_act(struct nlattr *attr) +{ + struct nlattr *tb[TCA_ACT_MAX + 1]; + + if (!attr) + return 0; + + if (libbpf_nla_parse_nested(tb, TCA_ACT_MAX, attr, NULL) < 0) + return -LIBBPF_ERRNO__NLPARSE; + + if (tb[TCA_ACT_KIND] && + strcmp(libbpf_nla_data(tb[TCA_ACT_KIND]), "bpf") == 0) + return do_bpf_dump_one_act(tb[TCA_ACT_OPTIONS]); + + return 0; +} + +static int do_bpf_act_dump(struct nlattr *attr) +{ + struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; + int act, ret; + + if (libbpf_nla_parse_nested(tb, TCA_ACT_MAX_PRIO, attr, NULL) < 0) + return -LIBBPF_ERRNO__NLPARSE; + + NET_START_ARRAY("act", " %s ["); + for (act = 0; act <= TCA_ACT_MAX_PRIO; act++) { + ret = do_dump_one_act(tb[act]); + if (ret) + break; + } + NET_END_ARRAY("] "); + + return ret; +} + +static int do_bpf_filter_dump(struct nlattr *attr) +{ + struct nlattr *tb[TCA_BPF_MAX + 1]; + int ret; + + if (libbpf_nla_parse_nested(tb, TCA_BPF_MAX, attr, NULL) < 0) + return -LIBBPF_ERRNO__NLPARSE; + + if (tb[TCA_BPF_NAME]) + NET_DUMP_STR("name", " %s", + libbpf_nla_getattr_str(tb[TCA_BPF_NAME])); + if (tb[TCA_BPF_ID]) + NET_DUMP_UINT("id", " id %u", + libbpf_nla_getattr_u32(tb[TCA_BPF_ID])); + if (tb[TCA_BPF_ACT]) { + ret = do_bpf_act_dump(tb[TCA_BPF_ACT]); + if (ret) + return ret; + } + + return 0; +} + +int do_filter_dump(struct tcmsg *info, struct nlattr **tb, const char *kind, + const char *devname, int ifindex) +{ + int ret = 0; + + if (tb[TCA_OPTIONS] && + strcmp(libbpf_nla_data(tb[TCA_KIND]), "bpf") == 0) { + NET_START_OBJECT; + if (devname[0] != '\0') + NET_DUMP_STR("devname", "%s", devname); + NET_DUMP_UINT("ifindex", "(%u)", ifindex); + NET_DUMP_STR("kind", " %s", kind); + ret = do_bpf_filter_dump(tb[TCA_OPTIONS]); + NET_END_OBJECT_FINAL; + } + + return ret; +} diff --git a/tools/bpf/bpftool/netlink_dumper.h b/tools/bpf/bpftool/netlink_dumper.h new file mode 100644 index 000000000000..e3516b586a34 --- /dev/null +++ b/tools/bpf/bpftool/netlink_dumper.h @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (C) 2018 Facebook + +#ifndef _NETLINK_DUMPER_H_ +#define _NETLINK_DUMPER_H_ + +#define NET_START_OBJECT \ +{ \ + if (json_output) \ + jsonw_start_object(json_wtr); \ +} + +#define NET_START_OBJECT_NESTED(name) \ +{ \ + if (json_output) { \ + jsonw_name(json_wtr, name); \ + jsonw_start_object(json_wtr); \ + } else { \ + fprintf(stdout, "%s {", name); \ + } \ +} + +#define NET_START_OBJECT_NESTED2 \ +{ \ + if (json_output) \ + jsonw_start_object(json_wtr); \ + else \ + fprintf(stdout, "{"); \ +} + +#define NET_END_OBJECT_NESTED \ +{ \ + if (json_output) \ + jsonw_end_object(json_wtr); \ + else \ + fprintf(stdout, "}"); \ +} + +#define NET_END_OBJECT \ +{ \ + if (json_output) \ + jsonw_end_object(json_wtr); \ +} + +#define NET_END_OBJECT_FINAL \ +{ \ + if (json_output) \ + jsonw_end_object(json_wtr); \ + else \ + fprintf(stdout, "\n"); \ +} + +#define NET_START_ARRAY(name, fmt_str) \ +{ \ + if (json_output) { \ + jsonw_name(json_wtr, name); \ + jsonw_start_array(json_wtr); \ + } else { \ + fprintf(stdout, fmt_str, name); \ + } \ +} + +#define NET_END_ARRAY(endstr) \ +{ \ + if (json_output) \ + jsonw_end_array(json_wtr); \ + else \ + fprintf(stdout, "%s", endstr); \ +} + +#define NET_DUMP_UINT(name, fmt_str, val) \ +{ \ + if (json_output) \ + jsonw_uint_field(json_wtr, name, val); \ + else \ + fprintf(stdout, fmt_str, val); \ +} + +#define NET_DUMP_STR(name, fmt_str, str) \ +{ \ + if (json_output) \ + jsonw_string_field(json_wtr, name, str);\ + else \ + fprintf(stdout, fmt_str, str); \ +} + +#define NET_DUMP_STR_ONLY(str) \ +{ \ + if (json_output) \ + jsonw_string(json_wtr, str); \ + else \ + fprintf(stdout, "%s ", str); \ +} + +#endif diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index dce960d22106..b1cd3bc8db70 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -74,6 +74,7 @@ static const char * const prog_type_name[] = { [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr", [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2", + [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector", }; static void print_boot_time(__u64 nsecs, char *buf, unsigned int size) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 66917a4eba27..f9187b41dff6 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -127,6 +127,7 @@ enum bpf_map_type { BPF_MAP_TYPE_SOCKHASH, BPF_MAP_TYPE_CGROUP_STORAGE, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, }; enum bpf_prog_type { @@ -152,6 +153,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_LWT_SEG6LOCAL, BPF_PROG_TYPE_LIRC_MODE2, BPF_PROG_TYPE_SK_REUSEPORT, + BPF_PROG_TYPE_FLOW_DISSECTOR, }; enum bpf_attach_type { @@ -172,6 +174,7 @@ enum bpf_attach_type { BPF_CGROUP_UDP4_SENDMSG, BPF_CGROUP_UDP6_SENDMSG, BPF_LIRC_MODE2, + BPF_FLOW_DISSECTOR, __MAX_BPF_ATTACH_TYPE }; @@ -2141,6 +2144,77 @@ union bpf_attr { * request in the skb. * Return * 0 on success, or a negative error in case of failure. + * + * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * Description + * Look for TCP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-NULL, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is zero, then the socket lookup table in the + * netns associated with the *ctx* will be used. For the TC hooks, + * this in the netns of the device in the skb. For socket hooks, + * this in the netns of the socket. If *netns* is non-zero, then + * it specifies the ID of the netns relative to the netns + * associated with the *ctx*. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * Return + * Pointer to *struct bpf_sock*, or NULL in case of failure. + * + * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * Description + * Look for UDP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-NULL, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is zero, then the socket lookup table in the + * netns associated with the *ctx* will be used. For the TC hooks, + * this in the netns of the device in the skb. For socket hooks, + * this in the netns of the socket. If *netns* is non-zero, then + * it specifies the ID of the netns relative to the netns + * associated with the *ctx*. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * Return + * Pointer to *struct bpf_sock*, or NULL in case of failure. + * + * int bpf_sk_release(struct bpf_sock *sk) + * Description + * Release the reference held by *sock*. *sock* must be a non-NULL + * pointer that was returned from bpf_sk_lookup_xxx\ (). + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2226,7 +2300,10 @@ union bpf_attr { FN(get_current_cgroup_id), \ FN(get_local_storage), \ FN(sk_select_reuseport), \ - FN(skb_ancestor_cgroup_id), + FN(skb_ancestor_cgroup_id), \ + FN(sk_lookup_tcp), \ + FN(sk_lookup_udp), \ + FN(sk_release), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -2333,6 +2410,7 @@ struct __sk_buff { /* ... here. */ __u32 data_meta; + struct bpf_flow_keys *flow_keys; }; struct bpf_tunnel_key { @@ -2395,6 +2473,23 @@ struct bpf_sock { */ }; +struct bpf_sock_tuple { + union { + struct { + __be32 saddr; + __be32 daddr; + __be16 sport; + __be16 dport; + } ipv4; + struct { + __be32 saddr[4]; + __be32 daddr[4]; + __be16 sport; + __be16 dport; + } ipv6; + }; +}; + #define XDP_PACKET_HEADROOM 256 /* User return codes for XDP prog type. @@ -2778,4 +2873,27 @@ enum bpf_task_fd_type { BPF_FD_TYPE_URETPROBE, /* filename + offset */ }; +struct bpf_flow_keys { + __u16 nhoff; + __u16 thoff; + __u16 addr_proto; /* ETH_P_* of valid addrs */ + __u8 is_frag; + __u8 is_first_frag; + __u8 is_encap; + __u8 ip_proto; + __be16 n_proto; + __be16 sport; + __be16 dport; + union { + struct { + __be32 ipv4_src; + __be32 ipv4_dst; + }; + struct { + __u32 ipv6_src[4]; /* in6_addr; network order */ + __u32 ipv6_dst[4]; /* in6_addr; network order */ + }; + }; +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 43391e2d1153..58faab897201 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -161,6 +161,7 @@ enum { IFLA_EVENT, IFLA_NEW_NETNSID, IFLA_IF_NETNSID, + IFLA_TARGET_NETNSID = IFLA_IF_NETNSID, /* new alias */ IFLA_CARRIER_UP_COUNT, IFLA_CARRIER_DOWN_COUNT, IFLA_NEW_IFINDEX, @@ -554,6 +555,7 @@ enum { IFLA_GENEVE_UDP_ZERO_CSUM6_TX, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, IFLA_GENEVE_LABEL, + IFLA_GENEVE_TTL_INHERIT, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index 6eb9bacd1948..7bc31c905018 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1 +1 @@ -libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o +libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index d49902e818b5..6ad27257fd67 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) # Most of this file is copied from tools/lib/traceevent/Makefile BPF_VERSION = 0 diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 60aa4ca8b2c5..d70a255cb05e 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: LGPL-2.1 +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * common eBPF ELF operations. @@ -28,16 +28,8 @@ #include <linux/bpf.h> #include "bpf.h" #include "libbpf.h" -#include "nlattr.h" -#include <linux/rtnetlink.h> -#include <linux/if_link.h> -#include <sys/socket.h> #include <errno.h> -#ifndef SOL_NETLINK -#define SOL_NETLINK 270 -#endif - /* * When building perf, unistd.h is overridden. __NR_bpf is * required to be defined explicitly. @@ -499,127 +491,6 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd) return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr)); } -int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags) -{ - struct sockaddr_nl sa; - int sock, seq = 0, len, ret = -1; - char buf[4096]; - struct nlattr *nla, *nla_xdp; - struct { - struct nlmsghdr nh; - struct ifinfomsg ifinfo; - char attrbuf[64]; - } req; - struct nlmsghdr *nh; - struct nlmsgerr *err; - socklen_t addrlen; - int one = 1; - - memset(&sa, 0, sizeof(sa)); - sa.nl_family = AF_NETLINK; - - sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); - if (sock < 0) { - return -errno; - } - - if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK, - &one, sizeof(one)) < 0) { - fprintf(stderr, "Netlink error reporting not supported\n"); - } - - if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) { - ret = -errno; - goto cleanup; - } - - addrlen = sizeof(sa); - if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) { - ret = -errno; - goto cleanup; - } - - if (addrlen != sizeof(sa)) { - ret = -LIBBPF_ERRNO__INTERNAL; - goto cleanup; - } - - memset(&req, 0, sizeof(req)); - req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)); - req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; - req.nh.nlmsg_type = RTM_SETLINK; - req.nh.nlmsg_pid = 0; - req.nh.nlmsg_seq = ++seq; - req.ifinfo.ifi_family = AF_UNSPEC; - req.ifinfo.ifi_index = ifindex; - - /* started nested attribute for XDP */ - nla = (struct nlattr *)(((char *)&req) - + NLMSG_ALIGN(req.nh.nlmsg_len)); - nla->nla_type = NLA_F_NESTED | IFLA_XDP; - nla->nla_len = NLA_HDRLEN; - - /* add XDP fd */ - nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); - nla_xdp->nla_type = IFLA_XDP_FD; - nla_xdp->nla_len = NLA_HDRLEN + sizeof(int); - memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd)); - nla->nla_len += nla_xdp->nla_len; - - /* if user passed in any flags, add those too */ - if (flags) { - nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); - nla_xdp->nla_type = IFLA_XDP_FLAGS; - nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags); - memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags)); - nla->nla_len += nla_xdp->nla_len; - } - - req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len); - - if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) { - ret = -errno; - goto cleanup; - } - - len = recv(sock, buf, sizeof(buf), 0); - if (len < 0) { - ret = -errno; - goto cleanup; - } - - for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len); - nh = NLMSG_NEXT(nh, len)) { - if (nh->nlmsg_pid != sa.nl_pid) { - ret = -LIBBPF_ERRNO__WRNGPID; - goto cleanup; - } - if (nh->nlmsg_seq != seq) { - ret = -LIBBPF_ERRNO__INVSEQ; - goto cleanup; - } - switch (nh->nlmsg_type) { - case NLMSG_ERROR: - err = (struct nlmsgerr *)NLMSG_DATA(nh); - if (!err->error) - continue; - ret = err->error; - nla_dump_errormsg(nh); - goto cleanup; - case NLMSG_DONE: - break; - default: - break; - } - } - - ret = 0; - -cleanup: - close(sock); - return ret; -} - int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, bool do_log) { diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 6f38164b2618..87520a87a75f 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: LGPL-2.1 */ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * common eBPF ELF operations. @@ -20,8 +20,8 @@ * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see <http://www.gnu.org/licenses> */ -#ifndef __BPF_BPF_H -#define __BPF_BPF_H +#ifndef __LIBBPF_BPF_H +#define __LIBBPF_BPF_H #include <linux/bpf.h> #include <stdbool.h> @@ -111,4 +111,4 @@ int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, __u64 *probe_addr); -#endif +#endif /* __LIBBPF_BPF_H */ diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index cf94b0770522..449591aa9900 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: LGPL-2.1 +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2018 Facebook */ #include <stdlib.h> diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 4897e0724d4e..6db5462bb2ef 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -1,8 +1,8 @@ -/* SPDX-License-Identifier: LGPL-2.1 */ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* Copyright (c) 2018 Facebook */ -#ifndef __BPF_BTF_H -#define __BPF_BTF_H +#ifndef __LIBBPF_BTF_H +#define __LIBBPF_BTF_H #include <linux/types.h> @@ -23,4 +23,4 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id); int btf__fd(const struct btf *btf); const char *btf__name_by_offset(const struct btf *btf, __u32 offset); -#endif +#endif /* __LIBBPF_BTF_H */ diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index bdb94939fd60..ceb918c14d80 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: LGPL-2.1 +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * Common eBPF ELF object loading operations. @@ -7,19 +7,6 @@ * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> * Copyright (C) 2015 Huawei Inc. * Copyright (C) 2017 Nicira, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; - * version 2.1 of the License (not later!) - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, see <http://www.gnu.org/licenses> */ #define _GNU_SOURCE @@ -228,7 +215,7 @@ struct bpf_object { }; #define obj_elf_valid(o) ((o)->efile.elf) -static void bpf_program__unload(struct bpf_program *prog) +void bpf_program__unload(struct bpf_program *prog) { int i; @@ -470,7 +457,8 @@ static int bpf_object__elf_init(struct bpf_object *obj) obj->efile.fd = open(obj->path, O_RDONLY); if (obj->efile.fd < 0) { char errmsg[STRERR_BUFSIZE]; - char *cp = str_error(errno, errmsg, sizeof(errmsg)); + char *cp = libbpf_strerror_r(errno, errmsg, + sizeof(errmsg)); pr_warning("failed to open %s: %s\n", obj->path, cp); return -errno; @@ -811,7 +799,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj) data->d_size, name, idx); if (err) { char errmsg[STRERR_BUFSIZE]; - char *cp = str_error(-err, errmsg, sizeof(errmsg)); + char *cp = libbpf_strerror_r(-err, errmsg, + sizeof(errmsg)); pr_warning("failed to alloc program %s (%s): %s", name, obj->path, cp); @@ -1140,7 +1129,7 @@ bpf_object__create_maps(struct bpf_object *obj) *pfd = bpf_create_map_xattr(&create_attr); if (*pfd < 0 && create_attr.btf_key_type_id) { - cp = str_error(errno, errmsg, sizeof(errmsg)); + cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", map->name, cp, errno); create_attr.btf_fd = 0; @@ -1155,7 +1144,7 @@ bpf_object__create_maps(struct bpf_object *obj) size_t j; err = *pfd; - cp = str_error(errno, errmsg, sizeof(errmsg)); + cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("failed to create map (name: '%s'): %s\n", map->name, cp); for (j = 0; j < i; j++) @@ -1339,7 +1328,7 @@ load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type, } ret = -LIBBPF_ERRNO__LOAD; - cp = str_error(errno, errmsg, sizeof(errmsg)); + cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("load bpf program failed: %s\n", cp); if (log_buf && log_buf[0] != '\0') { @@ -1375,9 +1364,9 @@ out: return ret; } -static int +int bpf_program__load(struct bpf_program *prog, - char *license, u32 kern_version) + char *license, __u32 kern_version) { int err = 0, fd, i; @@ -1502,6 +1491,7 @@ static bool bpf_prog_type__needs_kver(enum bpf_prog_type type) case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: case BPF_PROG_TYPE_LIRC_MODE2: case BPF_PROG_TYPE_SK_REUSEPORT: + case BPF_PROG_TYPE_FLOW_DISSECTOR: return false; case BPF_PROG_TYPE_UNSPEC: case BPF_PROG_TYPE_KPROBE: @@ -1654,7 +1644,7 @@ static int check_path(const char *path) dir = dirname(dname); if (statfs(dir, &st_fs)) { - cp = str_error(errno, errmsg, sizeof(errmsg)); + cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("failed to statfs %s: %s\n", dir, cp); err = -errno; } @@ -1690,7 +1680,7 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path, } if (bpf_obj_pin(prog->instances.fds[instance], path)) { - cp = str_error(errno, errmsg, sizeof(errmsg)); + cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("failed to pin program: %s\n", cp); return -errno; } @@ -1708,7 +1698,7 @@ static int make_dir(const char *path) err = -errno; if (err) { - cp = str_error(-err, errmsg, sizeof(errmsg)); + cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); pr_warning("failed to mkdir %s: %s\n", path, cp); } return err; @@ -1770,7 +1760,7 @@ int bpf_map__pin(struct bpf_map *map, const char *path) } if (bpf_obj_pin(map->fd, path)) { - cp = str_error(errno, errmsg, sizeof(errmsg)); + cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("failed to pin map: %s\n", cp); return -errno; } @@ -2084,57 +2074,90 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog, prog->expected_attach_type = type; } -#define BPF_PROG_SEC_FULL(string, ptype, atype) \ - { string, sizeof(string) - 1, ptype, atype } +#define BPF_PROG_SEC_IMPL(string, ptype, eatype, atype) \ + { string, sizeof(string) - 1, ptype, eatype, atype } + +/* Programs that can NOT be attached. */ +#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, -EINVAL) -#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_FULL(string, ptype, 0) +/* Programs that can be attached. */ +#define BPF_APROG_SEC(string, ptype, atype) \ + BPF_PROG_SEC_IMPL(string, ptype, 0, atype) -#define BPF_S_PROG_SEC(string, ptype) \ - BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK, ptype) +/* Programs that must specify expected attach type at load time. */ +#define BPF_EAPROG_SEC(string, ptype, eatype) \ + BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype) -#define BPF_SA_PROG_SEC(string, ptype) \ - BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, ptype) +/* Programs that can be attached but attach type can't be identified by section + * name. Kept for backward compatibility. + */ +#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype) static const struct { const char *sec; size_t len; enum bpf_prog_type prog_type; enum bpf_attach_type expected_attach_type; + enum bpf_attach_type attach_type; } section_names[] = { - BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER), - BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE), - BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE), - BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS), - BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT), - BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT), - BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT), - BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), - BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), - BPF_PROG_SEC("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), - BPF_PROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK), - BPF_PROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE), - BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), - BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), - BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT), - BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL), - BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS), - BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB), - BPF_PROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG), - BPF_PROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2), - BPF_SA_PROG_SEC("cgroup/bind4", BPF_CGROUP_INET4_BIND), - BPF_SA_PROG_SEC("cgroup/bind6", BPF_CGROUP_INET6_BIND), - BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT), - BPF_SA_PROG_SEC("cgroup/connect6", BPF_CGROUP_INET6_CONNECT), - BPF_SA_PROG_SEC("cgroup/sendmsg4", BPF_CGROUP_UDP4_SENDMSG), - BPF_SA_PROG_SEC("cgroup/sendmsg6", BPF_CGROUP_UDP6_SENDMSG), - BPF_S_PROG_SEC("cgroup/post_bind4", BPF_CGROUP_INET4_POST_BIND), - BPF_S_PROG_SEC("cgroup/post_bind6", BPF_CGROUP_INET6_POST_BIND), + BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER), + BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE), + BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE), + BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS), + BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT), + BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT), + BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT), + BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), + BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), + BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), + BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), + BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT), + BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL), + BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB, + BPF_CGROUP_INET_INGRESS), + BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB, + BPF_CGROUP_INET_EGRESS), + BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), + BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK, + BPF_CGROUP_INET_SOCK_CREATE), + BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK, + BPF_CGROUP_INET4_POST_BIND), + BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK, + BPF_CGROUP_INET6_POST_BIND), + BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE, + BPF_CGROUP_DEVICE), + BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS, + BPF_CGROUP_SOCK_OPS), + BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB, + BPF_SK_SKB_STREAM_PARSER), + BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB, + BPF_SK_SKB_STREAM_VERDICT), + BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB), + BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG, + BPF_SK_MSG_VERDICT), + BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2, + BPF_LIRC_MODE2), + BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR, + BPF_FLOW_DISSECTOR), + BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, + BPF_CGROUP_INET4_BIND), + BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, + BPF_CGROUP_INET6_BIND), + BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, + BPF_CGROUP_INET4_CONNECT), + BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, + BPF_CGROUP_INET6_CONNECT), + BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, + BPF_CGROUP_UDP4_SENDMSG), + BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, + BPF_CGROUP_UDP6_SENDMSG), }; +#undef BPF_PROG_SEC_IMPL #undef BPF_PROG_SEC -#undef BPF_PROG_SEC_FULL -#undef BPF_S_PROG_SEC -#undef BPF_SA_PROG_SEC +#undef BPF_APROG_SEC +#undef BPF_EAPROG_SEC +#undef BPF_APROG_COMPAT int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, enum bpf_attach_type *expected_attach_type) @@ -2154,6 +2177,25 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, return -EINVAL; } +int libbpf_attach_type_by_name(const char *name, + enum bpf_attach_type *attach_type) +{ + int i; + + if (!name) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(section_names); i++) { + if (strncmp(name, section_names[i].sec, section_names[i].len)) + continue; + if (section_names[i].attach_type == -EINVAL) + return -EINVAL; + *attach_type = section_names[i].attach_type; + return 0; + } + return -EINVAL; +} + static int bpf_program__identify_section(struct bpf_program *prog, enum bpf_prog_type *prog_type, @@ -2336,7 +2378,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, bpf_program__set_expected_attach_type(prog, expected_attach_type); - if (!bpf_program__is_function_storage(prog, obj) && !first_prog) + if (!first_prog) first_prog = prog; } diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 96c55fac54c3..8af8d3663991 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: LGPL-2.1 */ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * Common eBPF ELF object loading operations. @@ -6,22 +6,9 @@ * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> * Copyright (C) 2015 Huawei Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; - * version 2.1 of the License (not later!) - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, see <http://www.gnu.org/licenses> */ -#ifndef __BPF_LIBBPF_H -#define __BPF_LIBBPF_H +#ifndef __LIBBPF_LIBBPF_H +#define __LIBBPF_LIBBPF_H #include <stdio.h> #include <stdint.h> @@ -46,6 +33,7 @@ enum libbpf_errno { LIBBPF_ERRNO__PROGTYPE, /* Kernel doesn't support this program type */ LIBBPF_ERRNO__WRNGPID, /* Wrong pid in netlink message */ LIBBPF_ERRNO__INVSEQ, /* Invalid netlink sequence */ + LIBBPF_ERRNO__NLPARSE, /* netlink parsing error */ __LIBBPF_ERRNO__END, }; @@ -103,6 +91,8 @@ void *bpf_object__priv(struct bpf_object *prog); int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, enum bpf_attach_type *expected_attach_type); +int libbpf_attach_type_by_name(const char *name, + enum bpf_attach_type *attach_type); /* Accessors of bpf_program */ struct bpf_program; @@ -125,10 +115,13 @@ void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex); const char *bpf_program__title(struct bpf_program *prog, bool needs_copy); +int bpf_program__load(struct bpf_program *prog, char *license, + __u32 kern_version); int bpf_program__fd(struct bpf_program *prog); int bpf_program__pin_instance(struct bpf_program *prog, const char *path, int instance); int bpf_program__pin(struct bpf_program *prog, const char *path); +void bpf_program__unload(struct bpf_program *prog); struct bpf_insn; @@ -297,4 +290,16 @@ int bpf_perf_event_read_simple(void *mem, unsigned long size, unsigned long page_size, void **buf, size_t *buf_len, bpf_perf_event_print_t fn, void *priv); -#endif + +struct nlattr; +typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb); +int libbpf_netlink_open(unsigned int *nl_pid); +int libbpf_nl_get_link(int sock, unsigned int nl_pid, + libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie); +int libbpf_nl_get_class(int sock, unsigned int nl_pid, int ifindex, + libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie); +int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex, + libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie); +int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle, + libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie); +#endif /* __LIBBPF_LIBBPF_H */ diff --git a/tools/lib/bpf/libbpf_errno.c b/tools/lib/bpf/libbpf_errno.c index d9ba851bd7f9..d83b17f8435c 100644 --- a/tools/lib/bpf/libbpf_errno.c +++ b/tools/lib/bpf/libbpf_errno.c @@ -1,23 +1,10 @@ -// SPDX-License-Identifier: LGPL-2.1 +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> * Copyright (C) 2015 Huawei Inc. * Copyright (C) 2017 Nicira, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; - * version 2.1 of the License (not later!) - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, see <http://www.gnu.org/licenses> */ #include <stdio.h> @@ -42,6 +29,7 @@ static const char *libbpf_strerror_table[NR_ERRNO] = { [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type", [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message", [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence", + [ERRCODE_OFFSET(NLPARSE)] = "Incorrect netlink message parsing", }; int libbpf_strerror(int err, char *buf, size_t size) diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c new file mode 100644 index 000000000000..0ce67aea8f3b --- /dev/null +++ b/tools/lib/bpf/netlink.c @@ -0,0 +1,337 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (c) 2018 Facebook */ + +#include <stdlib.h> +#include <memory.h> +#include <unistd.h> +#include <linux/bpf.h> +#include <linux/rtnetlink.h> +#include <sys/socket.h> +#include <errno.h> +#include <time.h> + +#include "bpf.h" +#include "libbpf.h" +#include "nlattr.h" + +#ifndef SOL_NETLINK +#define SOL_NETLINK 270 +#endif + +typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t, + void *cookie); + +int libbpf_netlink_open(__u32 *nl_pid) +{ + struct sockaddr_nl sa; + socklen_t addrlen; + int one = 1, ret; + int sock; + + memset(&sa, 0, sizeof(sa)); + sa.nl_family = AF_NETLINK; + + sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); + if (sock < 0) + return -errno; + + if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK, + &one, sizeof(one)) < 0) { + fprintf(stderr, "Netlink error reporting not supported\n"); + } + + if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) { + ret = -errno; + goto cleanup; + } + + addrlen = sizeof(sa); + if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) { + ret = -errno; + goto cleanup; + } + + if (addrlen != sizeof(sa)) { + ret = -LIBBPF_ERRNO__INTERNAL; + goto cleanup; + } + + *nl_pid = sa.nl_pid; + return sock; + +cleanup: + close(sock); + return ret; +} + +static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq, + __dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn, + void *cookie) +{ + bool multipart = true; + struct nlmsgerr *err; + struct nlmsghdr *nh; + char buf[4096]; + int len, ret; + + while (multipart) { + multipart = false; + len = recv(sock, buf, sizeof(buf), 0); + if (len < 0) { + ret = -errno; + goto done; + } + + if (len == 0) + break; + + for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len); + nh = NLMSG_NEXT(nh, len)) { + if (nh->nlmsg_pid != nl_pid) { + ret = -LIBBPF_ERRNO__WRNGPID; + goto done; + } + if (nh->nlmsg_seq != seq) { + ret = -LIBBPF_ERRNO__INVSEQ; + goto done; + } + if (nh->nlmsg_flags & NLM_F_MULTI) + multipart = true; + switch (nh->nlmsg_type) { + case NLMSG_ERROR: + err = (struct nlmsgerr *)NLMSG_DATA(nh); + if (!err->error) + continue; + ret = err->error; + libbpf_nla_dump_errormsg(nh); + goto done; + case NLMSG_DONE: + return 0; + default: + break; + } + if (_fn) { + ret = _fn(nh, fn, cookie); + if (ret) + return ret; + } + } + } + ret = 0; +done: + return ret; +} + +int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags) +{ + int sock, seq = 0, ret; + struct nlattr *nla, *nla_xdp; + struct { + struct nlmsghdr nh; + struct ifinfomsg ifinfo; + char attrbuf[64]; + } req; + __u32 nl_pid; + + sock = libbpf_netlink_open(&nl_pid); + if (sock < 0) + return sock; + + memset(&req, 0, sizeof(req)); + req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)); + req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; + req.nh.nlmsg_type = RTM_SETLINK; + req.nh.nlmsg_pid = 0; + req.nh.nlmsg_seq = ++seq; + req.ifinfo.ifi_family = AF_UNSPEC; + req.ifinfo.ifi_index = ifindex; + + /* started nested attribute for XDP */ + nla = (struct nlattr *)(((char *)&req) + + NLMSG_ALIGN(req.nh.nlmsg_len)); + nla->nla_type = NLA_F_NESTED | IFLA_XDP; + nla->nla_len = NLA_HDRLEN; + + /* add XDP fd */ + nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); + nla_xdp->nla_type = IFLA_XDP_FD; + nla_xdp->nla_len = NLA_HDRLEN + sizeof(int); + memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd)); + nla->nla_len += nla_xdp->nla_len; + + /* if user passed in any flags, add those too */ + if (flags) { + nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); + nla_xdp->nla_type = IFLA_XDP_FLAGS; + nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags); + memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags)); + nla->nla_len += nla_xdp->nla_len; + } + + req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len); + + if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) { + ret = -errno; + goto cleanup; + } + ret = bpf_netlink_recv(sock, nl_pid, seq, NULL, NULL, NULL); + +cleanup: + close(sock); + return ret; +} + +static int __dump_link_nlmsg(struct nlmsghdr *nlh, + libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie) +{ + struct nlattr *tb[IFLA_MAX + 1], *attr; + struct ifinfomsg *ifi = NLMSG_DATA(nlh); + int len; + + len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)); + attr = (struct nlattr *) ((void *) ifi + NLMSG_ALIGN(sizeof(*ifi))); + if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0) + return -LIBBPF_ERRNO__NLPARSE; + + return dump_link_nlmsg(cookie, ifi, tb); +} + +int libbpf_nl_get_link(int sock, unsigned int nl_pid, + libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie) +{ + struct { + struct nlmsghdr nlh; + struct ifinfomsg ifm; + } req = { + .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)), + .nlh.nlmsg_type = RTM_GETLINK, + .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, + .ifm.ifi_family = AF_PACKET, + }; + int seq = time(NULL); + + req.nlh.nlmsg_seq = seq; + if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0) + return -errno; + + return bpf_netlink_recv(sock, nl_pid, seq, __dump_link_nlmsg, + dump_link_nlmsg, cookie); +} + +static int __dump_class_nlmsg(struct nlmsghdr *nlh, + libbpf_dump_nlmsg_t dump_class_nlmsg, + void *cookie) +{ + struct nlattr *tb[TCA_MAX + 1], *attr; + struct tcmsg *t = NLMSG_DATA(nlh); + int len; + + len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t)); + attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t))); + if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0) + return -LIBBPF_ERRNO__NLPARSE; + + return dump_class_nlmsg(cookie, t, tb); +} + +int libbpf_nl_get_class(int sock, unsigned int nl_pid, int ifindex, + libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie) +{ + struct { + struct nlmsghdr nlh; + struct tcmsg t; + } req = { + .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)), + .nlh.nlmsg_type = RTM_GETTCLASS, + .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, + .t.tcm_family = AF_UNSPEC, + .t.tcm_ifindex = ifindex, + }; + int seq = time(NULL); + + req.nlh.nlmsg_seq = seq; + if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0) + return -errno; + + return bpf_netlink_recv(sock, nl_pid, seq, __dump_class_nlmsg, + dump_class_nlmsg, cookie); +} + +static int __dump_qdisc_nlmsg(struct nlmsghdr *nlh, + libbpf_dump_nlmsg_t dump_qdisc_nlmsg, + void *cookie) +{ + struct nlattr *tb[TCA_MAX + 1], *attr; + struct tcmsg *t = NLMSG_DATA(nlh); + int len; + + len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t)); + attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t))); + if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0) + return -LIBBPF_ERRNO__NLPARSE; + + return dump_qdisc_nlmsg(cookie, t, tb); +} + +int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex, + libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie) +{ + struct { + struct nlmsghdr nlh; + struct tcmsg t; + } req = { + .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)), + .nlh.nlmsg_type = RTM_GETQDISC, + .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, + .t.tcm_family = AF_UNSPEC, + .t.tcm_ifindex = ifindex, + }; + int seq = time(NULL); + + req.nlh.nlmsg_seq = seq; + if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0) + return -errno; + + return bpf_netlink_recv(sock, nl_pid, seq, __dump_qdisc_nlmsg, + dump_qdisc_nlmsg, cookie); +} + +static int __dump_filter_nlmsg(struct nlmsghdr *nlh, + libbpf_dump_nlmsg_t dump_filter_nlmsg, + void *cookie) +{ + struct nlattr *tb[TCA_MAX + 1], *attr; + struct tcmsg *t = NLMSG_DATA(nlh); + int len; + + len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t)); + attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t))); + if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0) + return -LIBBPF_ERRNO__NLPARSE; + + return dump_filter_nlmsg(cookie, t, tb); +} + +int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle, + libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie) +{ + struct { + struct nlmsghdr nlh; + struct tcmsg t; + } req = { + .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)), + .nlh.nlmsg_type = RTM_GETTFILTER, + .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, + .t.tcm_family = AF_UNSPEC, + .t.tcm_ifindex = ifindex, + .t.tcm_parent = handle, + }; + int seq = time(NULL); + + req.nlh.nlmsg_seq = seq; + if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0) + return -errno; + + return bpf_netlink_recv(sock, nl_pid, seq, __dump_filter_nlmsg, + dump_filter_nlmsg, cookie); +} diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c index 4719434278b2..1e69c0c8d413 100644 --- a/tools/lib/bpf/nlattr.c +++ b/tools/lib/bpf/nlattr.c @@ -1,13 +1,8 @@ -// SPDX-License-Identifier: LGPL-2.1 +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * NETLINK Netlink attributes * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation version 2.1 - * of the License. - * * Copyright (c) 2003-2013 Thomas Graf <tgraf@suug.ch> */ @@ -17,20 +12,15 @@ #include <string.h> #include <stdio.h> -static uint16_t nla_attr_minlen[NLA_TYPE_MAX+1] = { - [NLA_U8] = sizeof(uint8_t), - [NLA_U16] = sizeof(uint16_t), - [NLA_U32] = sizeof(uint32_t), - [NLA_U64] = sizeof(uint64_t), - [NLA_STRING] = 1, - [NLA_FLAG] = 0, +static uint16_t nla_attr_minlen[LIBBPF_NLA_TYPE_MAX+1] = { + [LIBBPF_NLA_U8] = sizeof(uint8_t), + [LIBBPF_NLA_U16] = sizeof(uint16_t), + [LIBBPF_NLA_U32] = sizeof(uint32_t), + [LIBBPF_NLA_U64] = sizeof(uint64_t), + [LIBBPF_NLA_STRING] = 1, + [LIBBPF_NLA_FLAG] = 0, }; -static int nla_len(const struct nlattr *nla) -{ - return nla->nla_len - NLA_HDRLEN; -} - static struct nlattr *nla_next(const struct nlattr *nla, int *remaining) { int totlen = NLA_ALIGN(nla->nla_len); @@ -46,20 +36,15 @@ static int nla_ok(const struct nlattr *nla, int remaining) nla->nla_len <= remaining; } -static void *nla_data(const struct nlattr *nla) -{ - return (char *) nla + NLA_HDRLEN; -} - static int nla_type(const struct nlattr *nla) { return nla->nla_type & NLA_TYPE_MASK; } static int validate_nla(struct nlattr *nla, int maxtype, - struct nla_policy *policy) + struct libbpf_nla_policy *policy) { - struct nla_policy *pt; + struct libbpf_nla_policy *pt; unsigned int minlen = 0; int type = nla_type(nla); @@ -68,23 +53,24 @@ static int validate_nla(struct nlattr *nla, int maxtype, pt = &policy[type]; - if (pt->type > NLA_TYPE_MAX) + if (pt->type > LIBBPF_NLA_TYPE_MAX) return 0; if (pt->minlen) minlen = pt->minlen; - else if (pt->type != NLA_UNSPEC) + else if (pt->type != LIBBPF_NLA_UNSPEC) minlen = nla_attr_minlen[pt->type]; - if (nla_len(nla) < minlen) + if (libbpf_nla_len(nla) < minlen) return -1; - if (pt->maxlen && nla_len(nla) > pt->maxlen) + if (pt->maxlen && libbpf_nla_len(nla) > pt->maxlen) return -1; - if (pt->type == NLA_STRING) { - char *data = nla_data(nla); - if (data[nla_len(nla) - 1] != '\0') + if (pt->type == LIBBPF_NLA_STRING) { + char *data = libbpf_nla_data(nla); + + if (data[libbpf_nla_len(nla) - 1] != '\0') return -1; } @@ -114,15 +100,15 @@ static inline int nlmsg_len(const struct nlmsghdr *nlh) * @see nla_validate * @return 0 on success or a negative error code. */ -static int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, - struct nla_policy *policy) +int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, + int len, struct libbpf_nla_policy *policy) { struct nlattr *nla; int rem, err; memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); - nla_for_each_attr(nla, head, len, rem) { + libbpf_nla_for_each_attr(nla, head, len, rem) { int type = nla_type(nla); if (type > maxtype) @@ -146,12 +132,33 @@ errout: return err; } +/** + * Create attribute index based on nested attribute + * @arg tb Index array to be filled (maxtype+1 elements). + * @arg maxtype Maximum attribute type expected and accepted. + * @arg nla Nested Attribute. + * @arg policy Attribute validation policy. + * + * Feeds the stream of attributes nested into the specified attribute + * to libbpf_nla_parse(). + * + * @see libbpf_nla_parse + * @return 0 on success or a negative error code. + */ +int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype, + struct nlattr *nla, + struct libbpf_nla_policy *policy) +{ + return libbpf_nla_parse(tb, maxtype, libbpf_nla_data(nla), + libbpf_nla_len(nla), policy); +} + /* dump netlink extended ack error message */ -int nla_dump_errormsg(struct nlmsghdr *nlh) +int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh) { - struct nla_policy extack_policy[NLMSGERR_ATTR_MAX + 1] = { - [NLMSGERR_ATTR_MSG] = { .type = NLA_STRING }, - [NLMSGERR_ATTR_OFFS] = { .type = NLA_U32 }, + struct libbpf_nla_policy extack_policy[NLMSGERR_ATTR_MAX + 1] = { + [NLMSGERR_ATTR_MSG] = { .type = LIBBPF_NLA_STRING }, + [NLMSGERR_ATTR_OFFS] = { .type = LIBBPF_NLA_U32 }, }; struct nlattr *tb[NLMSGERR_ATTR_MAX + 1], *attr; struct nlmsgerr *err; @@ -172,14 +179,15 @@ int nla_dump_errormsg(struct nlmsghdr *nlh) attr = (struct nlattr *) ((void *) err + hlen); alen = nlh->nlmsg_len - hlen; - if (nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen, extack_policy) != 0) { + if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen, + extack_policy) != 0) { fprintf(stderr, "Failed to parse extended error attributes\n"); return 0; } if (tb[NLMSGERR_ATTR_MSG]) - errmsg = (char *) nla_data(tb[NLMSGERR_ATTR_MSG]); + errmsg = (char *) libbpf_nla_data(tb[NLMSGERR_ATTR_MSG]); fprintf(stderr, "Kernel error message: %s\n", errmsg); diff --git a/tools/lib/bpf/nlattr.h b/tools/lib/bpf/nlattr.h index 931a71f68f93..6cc3ac91690f 100644 --- a/tools/lib/bpf/nlattr.h +++ b/tools/lib/bpf/nlattr.h @@ -1,18 +1,13 @@ -/* SPDX-License-Identifier: LGPL-2.1 */ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * NETLINK Netlink attributes * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation version 2.1 - * of the License. - * * Copyright (c) 2003-2013 Thomas Graf <tgraf@suug.ch> */ -#ifndef __NLATTR_H -#define __NLATTR_H +#ifndef __LIBBPF_NLATTR_H +#define __LIBBPF_NLATTR_H #include <stdint.h> #include <linux/netlink.h> @@ -23,19 +18,19 @@ * Standard attribute types to specify validation policy */ enum { - NLA_UNSPEC, /**< Unspecified type, binary data chunk */ - NLA_U8, /**< 8 bit integer */ - NLA_U16, /**< 16 bit integer */ - NLA_U32, /**< 32 bit integer */ - NLA_U64, /**< 64 bit integer */ - NLA_STRING, /**< NUL terminated character string */ - NLA_FLAG, /**< Flag */ - NLA_MSECS, /**< Micro seconds (64bit) */ - NLA_NESTED, /**< Nested attributes */ - __NLA_TYPE_MAX, + LIBBPF_NLA_UNSPEC, /**< Unspecified type, binary data chunk */ + LIBBPF_NLA_U8, /**< 8 bit integer */ + LIBBPF_NLA_U16, /**< 16 bit integer */ + LIBBPF_NLA_U32, /**< 32 bit integer */ + LIBBPF_NLA_U64, /**< 64 bit integer */ + LIBBPF_NLA_STRING, /**< NUL terminated character string */ + LIBBPF_NLA_FLAG, /**< Flag */ + LIBBPF_NLA_MSECS, /**< Micro seconds (64bit) */ + LIBBPF_NLA_NESTED, /**< Nested attributes */ + __LIBBPF_NLA_TYPE_MAX, }; -#define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1) +#define LIBBPF_NLA_TYPE_MAX (__LIBBPF_NLA_TYPE_MAX - 1) /** * @ingroup attr @@ -43,8 +38,8 @@ enum { * * See section @core_doc{core_attr_parse,Attribute Parsing} for more details. */ -struct nla_policy { - /** Type of attribute or NLA_UNSPEC */ +struct libbpf_nla_policy { + /** Type of attribute or LIBBPF_NLA_UNSPEC */ uint16_t type; /** Minimal length of payload required */ @@ -62,11 +57,50 @@ struct nla_policy { * @arg len length of attribute stream * @arg rem initialized to len, holds bytes currently remaining in stream */ -#define nla_for_each_attr(pos, head, len, rem) \ +#define libbpf_nla_for_each_attr(pos, head, len, rem) \ for (pos = head, rem = len; \ nla_ok(pos, rem); \ pos = nla_next(pos, &(rem))) -int nla_dump_errormsg(struct nlmsghdr *nlh); +/** + * libbpf_nla_data - head of payload + * @nla: netlink attribute + */ +static inline void *libbpf_nla_data(const struct nlattr *nla) +{ + return (char *) nla + NLA_HDRLEN; +} + +static inline uint8_t libbpf_nla_getattr_u8(const struct nlattr *nla) +{ + return *(uint8_t *)libbpf_nla_data(nla); +} + +static inline uint32_t libbpf_nla_getattr_u32(const struct nlattr *nla) +{ + return *(uint32_t *)libbpf_nla_data(nla); +} + +static inline const char *libbpf_nla_getattr_str(const struct nlattr *nla) +{ + return (const char *)libbpf_nla_data(nla); +} + +/** + * libbpf_nla_len - length of payload + * @nla: netlink attribute + */ +static inline int libbpf_nla_len(const struct nlattr *nla) +{ + return nla->nla_len - NLA_HDRLEN; +} + +int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, + int len, struct libbpf_nla_policy *policy); +int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype, + struct nlattr *nla, + struct libbpf_nla_policy *policy); + +int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh); -#endif /* __NLATTR_H */ +#endif /* __LIBBPF_NLATTR_H */ diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c index b8798114a357..00e48ac5b806 100644 --- a/tools/lib/bpf/str_error.c +++ b/tools/lib/bpf/str_error.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: LGPL-2.1 +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) #undef _GNU_SOURCE #include <string.h> #include <stdio.h> @@ -9,7 +9,7 @@ * libc, while checking strerror_r() return to avoid having to check this in * all places calling it. */ -char *str_error(int err, char *dst, int len) +char *libbpf_strerror_r(int err, char *dst, int len) { int ret = strerror_r(err, dst, len); if (ret) diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h index 355b1db571d1..a139334d57b6 100644 --- a/tools/lib/bpf/str_error.h +++ b/tools/lib/bpf/str_error.h @@ -1,6 +1,6 @@ -// SPDX-License-Identifier: LGPL-2.1 -#ifndef BPF_STR_ERROR -#define BPF_STR_ERROR +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __LIBBPF_STR_ERROR_H +#define __LIBBPF_STR_ERROR_H -char *str_error(int err, char *dst, int len); -#endif // BPF_STR_ERROR +char *libbpf_strerror_r(int err, char *dst, int len); +#endif /* __LIBBPF_STR_ERROR_H */ diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 49938d72cf63..8a60c9b9892d 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -19,3 +19,9 @@ test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user +test_skb_cgroup_id_user +test_socket_cookie +test_cgroup_storage +test_select_reuseport +test_flow_dissector +flow_dissector_load diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index fff7fb1285fc..1381ab81099c 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -23,7 +23,8 @@ $(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \ test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \ - test_socket_cookie test_cgroup_storage test_select_reuseport + test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \ + test_netcnt TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \ @@ -35,7 +36,7 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test test_get_stack_rawtp.o test_sockmap_kern.o test_sockhash_kern.o \ test_lwt_seg6local.o sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \ get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \ - test_skb_cgroup_id_kern.o + test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o test_sk_lookup_kern.o # Order correspond to 'make run_tests' order TEST_PROGS := test_kmod.sh \ @@ -47,10 +48,12 @@ TEST_PROGS := test_kmod.sh \ test_tunnel.sh \ test_lwt_seg6local.sh \ test_lirc_mode2.sh \ - test_skb_cgroup_id.sh + test_skb_cgroup_id.sh \ + test_flow_dissector.sh # Compile but not part of 'make run_tests' -TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user +TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ + flow_dissector_load test_flow_dissector include ../lib.mk @@ -70,6 +73,7 @@ $(OUTPUT)/test_tcpbpf_user: cgroup_helpers.c $(OUTPUT)/test_progs: trace_helpers.c $(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c $(OUTPUT)/test_cgroup_storage: cgroup_helpers.c +$(OUTPUT)/test_netcnt: cgroup_helpers.c .PHONY: force diff --git a/tools/testing/selftests/bpf/bpf_flow.c b/tools/testing/selftests/bpf/bpf_flow.c new file mode 100644 index 000000000000..107350a7821d --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_flow.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <limits.h> +#include <stddef.h> +#include <stdbool.h> +#include <string.h> +#include <linux/pkt_cls.h> +#include <linux/bpf.h> +#include <linux/in.h> +#include <linux/if_ether.h> +#include <linux/icmp.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/if_packet.h> +#include <sys/socket.h> +#include <linux/if_tunnel.h> +#include <linux/mpls.h> +#include "bpf_helpers.h" +#include "bpf_endian.h" + +int _version SEC("version") = 1; +#define PROG(F) SEC(#F) int bpf_func_##F + +/* These are the identifiers of the BPF programs that will be used in tail + * calls. Name is limited to 16 characters, with the terminating character and + * bpf_func_ above, we have only 6 to work with, anything after will be cropped. + */ +enum { + IP, + IPV6, + IPV6OP, /* Destination/Hop-by-Hop Options IPv6 Extension header */ + IPV6FR, /* Fragmentation IPv6 Extension Header */ + MPLS, + VLAN, +}; + +#define IP_MF 0x2000 +#define IP_OFFSET 0x1FFF +#define IP6_MF 0x0001 +#define IP6_OFFSET 0xFFF8 + +struct vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +struct gre_hdr { + __be16 flags; + __be16 proto; +}; + +struct frag_hdr { + __u8 nexthdr; + __u8 reserved; + __be16 frag_off; + __be32 identification; +}; + +struct bpf_map_def SEC("maps") jmp_table = { + .type = BPF_MAP_TYPE_PROG_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u32), + .max_entries = 8 +}; + +static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb, + __u16 hdr_size, + void *buffer) +{ + void *data_end = (void *)(long)skb->data_end; + void *data = (void *)(long)skb->data; + __u16 nhoff = skb->flow_keys->nhoff; + __u8 *hdr; + + /* Verifies this variable offset does not overflow */ + if (nhoff > (USHRT_MAX - hdr_size)) + return NULL; + + hdr = data + nhoff; + if (hdr + hdr_size <= data_end) + return hdr; + + if (bpf_skb_load_bytes(skb, nhoff, buffer, hdr_size)) + return NULL; + + return buffer; +} + +/* Dispatches on ETHERTYPE */ +static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto) +{ + struct bpf_flow_keys *keys = skb->flow_keys; + + keys->n_proto = proto; + switch (proto) { + case bpf_htons(ETH_P_IP): + bpf_tail_call(skb, &jmp_table, IP); + break; + case bpf_htons(ETH_P_IPV6): + bpf_tail_call(skb, &jmp_table, IPV6); + break; + case bpf_htons(ETH_P_MPLS_MC): + case bpf_htons(ETH_P_MPLS_UC): + bpf_tail_call(skb, &jmp_table, MPLS); + break; + case bpf_htons(ETH_P_8021Q): + case bpf_htons(ETH_P_8021AD): + bpf_tail_call(skb, &jmp_table, VLAN); + break; + default: + /* Protocol not supported */ + return BPF_DROP; + } + + return BPF_DROP; +} + +SEC("dissect") +int _dissect(struct __sk_buff *skb) +{ + if (!skb->vlan_present) + return parse_eth_proto(skb, skb->protocol); + else + return parse_eth_proto(skb, skb->vlan_proto); +} + +/* Parses on IPPROTO_* */ +static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) +{ + struct bpf_flow_keys *keys = skb->flow_keys; + void *data_end = (void *)(long)skb->data_end; + struct icmphdr *icmp, _icmp; + struct gre_hdr *gre, _gre; + struct ethhdr *eth, _eth; + struct tcphdr *tcp, _tcp; + struct udphdr *udp, _udp; + + keys->ip_proto = proto; + switch (proto) { + case IPPROTO_ICMP: + icmp = bpf_flow_dissect_get_header(skb, sizeof(*icmp), &_icmp); + if (!icmp) + return BPF_DROP; + return BPF_OK; + case IPPROTO_IPIP: + keys->is_encap = true; + return parse_eth_proto(skb, bpf_htons(ETH_P_IP)); + case IPPROTO_IPV6: + keys->is_encap = true; + return parse_eth_proto(skb, bpf_htons(ETH_P_IPV6)); + case IPPROTO_GRE: + gre = bpf_flow_dissect_get_header(skb, sizeof(*gre), &_gre); + if (!gre) + return BPF_DROP; + + if (bpf_htons(gre->flags & GRE_VERSION)) + /* Only inspect standard GRE packets with version 0 */ + return BPF_OK; + + keys->nhoff += sizeof(*gre); /* Step over GRE Flags and Proto */ + if (GRE_IS_CSUM(gre->flags)) + keys->nhoff += 4; /* Step over chksum and Padding */ + if (GRE_IS_KEY(gre->flags)) + keys->nhoff += 4; /* Step over key */ + if (GRE_IS_SEQ(gre->flags)) + keys->nhoff += 4; /* Step over sequence number */ + + keys->is_encap = true; + + if (gre->proto == bpf_htons(ETH_P_TEB)) { + eth = bpf_flow_dissect_get_header(skb, sizeof(*eth), + &_eth); + if (!eth) + return BPF_DROP; + + keys->nhoff += sizeof(*eth); + + return parse_eth_proto(skb, eth->h_proto); + } else { + return parse_eth_proto(skb, gre->proto); + } + case IPPROTO_TCP: + tcp = bpf_flow_dissect_get_header(skb, sizeof(*tcp), &_tcp); + if (!tcp) + return BPF_DROP; + + if (tcp->doff < 5) + return BPF_DROP; + + if ((__u8 *)tcp + (tcp->doff << 2) > data_end) + return BPF_DROP; + + keys->thoff = keys->nhoff; + keys->sport = tcp->source; + keys->dport = tcp->dest; + return BPF_OK; + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + udp = bpf_flow_dissect_get_header(skb, sizeof(*udp), &_udp); + if (!udp) + return BPF_DROP; + + keys->thoff = keys->nhoff; + keys->sport = udp->source; + keys->dport = udp->dest; + return BPF_OK; + default: + return BPF_DROP; + } + + return BPF_DROP; +} + +static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr) +{ + struct bpf_flow_keys *keys = skb->flow_keys; + + keys->ip_proto = nexthdr; + switch (nexthdr) { + case IPPROTO_HOPOPTS: + case IPPROTO_DSTOPTS: + bpf_tail_call(skb, &jmp_table, IPV6OP); + break; + case IPPROTO_FRAGMENT: + bpf_tail_call(skb, &jmp_table, IPV6FR); + break; + default: + return parse_ip_proto(skb, nexthdr); + } + + return BPF_DROP; +} + +PROG(IP)(struct __sk_buff *skb) +{ + void *data_end = (void *)(long)skb->data_end; + struct bpf_flow_keys *keys = skb->flow_keys; + void *data = (void *)(long)skb->data; + struct iphdr *iph, _iph; + bool done = false; + + iph = bpf_flow_dissect_get_header(skb, sizeof(*iph), &_iph); + if (!iph) + return BPF_DROP; + + /* IP header cannot be smaller than 20 bytes */ + if (iph->ihl < 5) + return BPF_DROP; + + keys->addr_proto = ETH_P_IP; + keys->ipv4_src = iph->saddr; + keys->ipv4_dst = iph->daddr; + + keys->nhoff += iph->ihl << 2; + if (data + keys->nhoff > data_end) + return BPF_DROP; + + if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) { + keys->is_frag = true; + if (iph->frag_off & bpf_htons(IP_OFFSET)) + /* From second fragment on, packets do not have headers + * we can parse. + */ + done = true; + else + keys->is_first_frag = true; + } + + if (done) + return BPF_OK; + + return parse_ip_proto(skb, iph->protocol); +} + +PROG(IPV6)(struct __sk_buff *skb) +{ + struct bpf_flow_keys *keys = skb->flow_keys; + struct ipv6hdr *ip6h, _ip6h; + + ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h); + if (!ip6h) + return BPF_DROP; + + keys->addr_proto = ETH_P_IPV6; + memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr)); + + keys->nhoff += sizeof(struct ipv6hdr); + + return parse_ipv6_proto(skb, ip6h->nexthdr); +} + +PROG(IPV6OP)(struct __sk_buff *skb) +{ + struct ipv6_opt_hdr *ip6h, _ip6h; + + ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h); + if (!ip6h) + return BPF_DROP; + + /* hlen is in 8-octets and does not include the first 8 bytes + * of the header + */ + skb->flow_keys->nhoff += (1 + ip6h->hdrlen) << 3; + + return parse_ipv6_proto(skb, ip6h->nexthdr); +} + +PROG(IPV6FR)(struct __sk_buff *skb) +{ + struct bpf_flow_keys *keys = skb->flow_keys; + struct frag_hdr *fragh, _fragh; + + fragh = bpf_flow_dissect_get_header(skb, sizeof(*fragh), &_fragh); + if (!fragh) + return BPF_DROP; + + keys->nhoff += sizeof(*fragh); + keys->is_frag = true; + if (!(fragh->frag_off & bpf_htons(IP6_OFFSET))) + keys->is_first_frag = true; + + return parse_ipv6_proto(skb, fragh->nexthdr); +} + +PROG(MPLS)(struct __sk_buff *skb) +{ + struct mpls_label *mpls, _mpls; + + mpls = bpf_flow_dissect_get_header(skb, sizeof(*mpls), &_mpls); + if (!mpls) + return BPF_DROP; + + return BPF_OK; +} + +PROG(VLAN)(struct __sk_buff *skb) +{ + struct bpf_flow_keys *keys = skb->flow_keys; + struct vlan_hdr *vlan, _vlan; + __be16 proto; + + /* Peek back to see if single or double-tagging */ + if (bpf_skb_load_bytes(skb, keys->nhoff - sizeof(proto), &proto, + sizeof(proto))) + return BPF_DROP; + + /* Account for double-tagging */ + if (proto == bpf_htons(ETH_P_8021AD)) { + vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); + if (!vlan) + return BPF_DROP; + + if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q)) + return BPF_DROP; + + keys->nhoff += sizeof(*vlan); + } + + vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); + if (!vlan) + return BPF_DROP; + + keys->nhoff += sizeof(*vlan); + /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/ + if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) || + vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q)) + return BPF_DROP; + + return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto); +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index e4be7730222d..1d407b3494f9 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -143,6 +143,18 @@ static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) = (void *) BPF_FUNC_skb_cgroup_id; static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) = (void *) BPF_FUNC_skb_ancestor_cgroup_id; +static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, + struct bpf_sock_tuple *tuple, + int size, unsigned int netns_id, + unsigned long long flags) = + (void *) BPF_FUNC_sk_lookup_tcp; +static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, + struct bpf_sock_tuple *tuple, + int size, unsigned int netns_id, + unsigned long long flags) = + (void *) BPF_FUNC_sk_lookup_udp; +static int (*bpf_sk_release)(struct bpf_sock *sk) = + (void *) BPF_FUNC_sk_release; /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index b4994a94968b..3655508f95fd 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -18,3 +18,4 @@ CONFIG_CRYPTO_HMAC=m CONFIG_CRYPTO_SHA256=m CONFIG_VXLAN=y CONFIG_GENEVE=y +CONFIG_NET_CLS_FLOWER=m diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c new file mode 100644 index 000000000000..d3273b5b3173 --- /dev/null +++ b/tools/testing/selftests/bpf/flow_dissector_load.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <error.h> +#include <errno.h> +#include <getopt.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <unistd.h> +#include <bpf/bpf.h> +#include <bpf/libbpf.h> + +const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector"; +const char *cfg_map_name = "jmp_table"; +bool cfg_attach = true; +char *cfg_section_name; +char *cfg_path_name; + +static void load_and_attach_program(void) +{ + struct bpf_program *prog, *main_prog; + struct bpf_map *prog_array; + int i, fd, prog_fd, ret; + struct bpf_object *obj; + int prog_array_fd; + + ret = bpf_prog_load(cfg_path_name, BPF_PROG_TYPE_FLOW_DISSECTOR, &obj, + &prog_fd); + if (ret) + error(1, 0, "bpf_prog_load %s", cfg_path_name); + + main_prog = bpf_object__find_program_by_title(obj, cfg_section_name); + if (!main_prog) + error(1, 0, "bpf_object__find_program_by_title %s", + cfg_section_name); + + prog_fd = bpf_program__fd(main_prog); + if (prog_fd < 0) + error(1, 0, "bpf_program__fd"); + + prog_array = bpf_object__find_map_by_name(obj, cfg_map_name); + if (!prog_array) + error(1, 0, "bpf_object__find_map_by_name %s", cfg_map_name); + + prog_array_fd = bpf_map__fd(prog_array); + if (prog_array_fd < 0) + error(1, 0, "bpf_map__fd %s", cfg_map_name); + + i = 0; + bpf_object__for_each_program(prog, obj) { + fd = bpf_program__fd(prog); + if (fd < 0) + error(1, 0, "bpf_program__fd"); + + if (fd != prog_fd) { + printf("%d: %s\n", i, bpf_program__title(prog, false)); + bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY); + ++i; + } + } + + ret = bpf_prog_attach(prog_fd, 0 /* Ignore */, BPF_FLOW_DISSECTOR, 0); + if (ret) + error(1, 0, "bpf_prog_attach %s", cfg_path_name); + + ret = bpf_object__pin(obj, cfg_pin_path); + if (ret) + error(1, 0, "bpf_object__pin %s", cfg_pin_path); + +} + +static void detach_program(void) +{ + char command[64]; + int ret; + + ret = bpf_prog_detach(0, BPF_FLOW_DISSECTOR); + if (ret) + error(1, 0, "bpf_prog_detach"); + + /* To unpin, it is necessary and sufficient to just remove this dir */ + sprintf(command, "rm -r %s", cfg_pin_path); + ret = system(command); + if (ret) + error(1, errno, command); +} + +static void parse_opts(int argc, char **argv) +{ + bool attach = false; + bool detach = false; + int c; + + while ((c = getopt(argc, argv, "adp:s:")) != -1) { + switch (c) { + case 'a': + if (detach) + error(1, 0, "attach/detach are exclusive"); + attach = true; + break; + case 'd': + if (attach) + error(1, 0, "attach/detach are exclusive"); + detach = true; + break; + case 'p': + if (cfg_path_name) + error(1, 0, "only one prog name can be given"); + + cfg_path_name = optarg; + break; + case 's': + if (cfg_section_name) + error(1, 0, "only one section can be given"); + + cfg_section_name = optarg; + break; + } + } + + if (detach) + cfg_attach = false; + + if (cfg_attach && !cfg_path_name) + error(1, 0, "must provide a path to the BPF program"); + + if (cfg_attach && !cfg_section_name) + error(1, 0, "must provide a section name"); +} + +int main(int argc, char **argv) +{ + parse_opts(argc, argv); + if (cfg_attach) + load_and_attach_program(); + else + detach_program(); + return 0; +} diff --git a/tools/testing/selftests/bpf/netcnt_common.h b/tools/testing/selftests/bpf/netcnt_common.h new file mode 100644 index 000000000000..81084c1c2c23 --- /dev/null +++ b/tools/testing/selftests/bpf/netcnt_common.h @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef __NETCNT_COMMON_H +#define __NETCNT_COMMON_H + +#include <linux/types.h> + +#define MAX_PERCPU_PACKETS 32 + +struct percpu_net_cnt { + __u64 packets; + __u64 bytes; + + __u64 prev_ts; + + __u64 prev_packets; + __u64 prev_bytes; +}; + +struct net_cnt { + __u64 packets; + __u64 bytes; +}; + +#endif diff --git a/tools/testing/selftests/bpf/netcnt_prog.c b/tools/testing/selftests/bpf/netcnt_prog.c new file mode 100644 index 000000000000..1198abca1360 --- /dev/null +++ b/tools/testing/selftests/bpf/netcnt_prog.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <linux/version.h> + +#include "bpf_helpers.h" +#include "netcnt_common.h" + +#define MAX_BPS (3 * 1024 * 1024) + +#define REFRESH_TIME_NS 100000000 +#define NS_PER_SEC 1000000000 + +struct bpf_map_def SEC("maps") percpu_netcnt = { + .type = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, + .key_size = sizeof(struct bpf_cgroup_storage_key), + .value_size = sizeof(struct percpu_net_cnt), +}; + +struct bpf_map_def SEC("maps") netcnt = { + .type = BPF_MAP_TYPE_CGROUP_STORAGE, + .key_size = sizeof(struct bpf_cgroup_storage_key), + .value_size = sizeof(struct net_cnt), +}; + +SEC("cgroup/skb") +int bpf_nextcnt(struct __sk_buff *skb) +{ + struct percpu_net_cnt *percpu_cnt; + char fmt[] = "%d %llu %llu\n"; + struct net_cnt *cnt; + __u64 ts, dt; + int ret; + + cnt = bpf_get_local_storage(&netcnt, 0); + percpu_cnt = bpf_get_local_storage(&percpu_netcnt, 0); + + percpu_cnt->packets++; + percpu_cnt->bytes += skb->len; + + if (percpu_cnt->packets > MAX_PERCPU_PACKETS) { + __sync_fetch_and_add(&cnt->packets, + percpu_cnt->packets); + percpu_cnt->packets = 0; + + __sync_fetch_and_add(&cnt->bytes, + percpu_cnt->bytes); + percpu_cnt->bytes = 0; + } + + ts = bpf_ktime_get_ns(); + dt = ts - percpu_cnt->prev_ts; + + dt *= MAX_BPS; + dt /= NS_PER_SEC; + + if (cnt->bytes + percpu_cnt->bytes - percpu_cnt->prev_bytes < dt) + ret = 1; + else + ret = 0; + + if (dt > REFRESH_TIME_NS) { + percpu_cnt->prev_ts = ts; + percpu_cnt->prev_packets = cnt->packets; + percpu_cnt->prev_bytes = cnt->bytes; + } + + return !!ret; +} + +char _license[] SEC("license") = "GPL"; +__u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 6b5cfeb7a9cc..f42b3396d622 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -4,6 +4,7 @@ #include <linux/bpf.h> #include <linux/btf.h> #include <linux/err.h> +#include <linux/kernel.h> #include <bpf/bpf.h> #include <sys/resource.h> #include <libelf.h> @@ -45,7 +46,6 @@ static int count_result(int err) return err; } -#define min(a, b) ((a) < (b) ? (a) : (b)) #define __printf(a, b) __attribute__((format(printf, a, b))) __printf(1, 2) @@ -130,6 +130,7 @@ struct btf_raw_test { bool map_create_err; bool ordered_map; bool lossless_map; + bool percpu_map; int hdr_len_delta; int type_off_delta; int str_off_delta; @@ -2157,6 +2158,7 @@ static struct btf_pprint_test_meta { const char *map_name; bool ordered_map; bool lossless_map; + bool percpu_map; } pprint_tests_meta[] = { { .descr = "BTF pretty print array", @@ -2164,6 +2166,7 @@ static struct btf_pprint_test_meta { .map_name = "pprint_test_array", .ordered_map = true, .lossless_map = true, + .percpu_map = false, }, { @@ -2172,6 +2175,7 @@ static struct btf_pprint_test_meta { .map_name = "pprint_test_hash", .ordered_map = false, .lossless_map = true, + .percpu_map = false, }, { @@ -2180,30 +2184,83 @@ static struct btf_pprint_test_meta { .map_name = "pprint_test_lru_hash", .ordered_map = false, .lossless_map = false, + .percpu_map = false, +}, + +{ + .descr = "BTF pretty print percpu array", + .map_type = BPF_MAP_TYPE_PERCPU_ARRAY, + .map_name = "pprint_test_percpu_array", + .ordered_map = true, + .lossless_map = true, + .percpu_map = true, +}, + +{ + .descr = "BTF pretty print percpu hash", + .map_type = BPF_MAP_TYPE_PERCPU_HASH, + .map_name = "pprint_test_percpu_hash", + .ordered_map = false, + .lossless_map = true, + .percpu_map = true, +}, + +{ + .descr = "BTF pretty print lru percpu hash", + .map_type = BPF_MAP_TYPE_LRU_PERCPU_HASH, + .map_name = "pprint_test_lru_percpu_hash", + .ordered_map = false, + .lossless_map = false, + .percpu_map = true, }, }; -static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i) +static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i, + int num_cpus, int rounded_value_size) { - v->ui32 = i; - v->si32 = -i; - v->unused_bits2a = 3; - v->bits28 = i; - v->unused_bits2b = 3; - v->ui64 = i; - v->aenum = i & 0x03; + int cpu; + + for (cpu = 0; cpu < num_cpus; cpu++) { + v->ui32 = i + cpu; + v->si32 = -i; + v->unused_bits2a = 3; + v->bits28 = i; + v->unused_bits2b = 3; + v->ui64 = i; + v->aenum = i & 0x03; + v = (void *)v + rounded_value_size; + } } +static int check_line(const char *expected_line, int nexpected_line, + int expected_line_len, const char *line) +{ + if (CHECK(nexpected_line == expected_line_len, + "expected_line is too long")) + return -1; + + if (strcmp(expected_line, line)) { + fprintf(stderr, "unexpected pprint output\n"); + fprintf(stderr, "expected: %s", expected_line); + fprintf(stderr, " read: %s", line); + return -1; + } + + return 0; +} + + static int do_test_pprint(void) { const struct btf_raw_test *test = &pprint_test_template; struct bpf_create_map_attr create_attr = {}; + bool ordered_map, lossless_map, percpu_map; + int err, ret, num_cpus, rounded_value_size; + struct pprint_mapv *mapv = NULL; unsigned int key, nr_read_elems; - bool ordered_map, lossless_map; int map_fd = -1, btf_fd = -1; - struct pprint_mapv mapv = {}; unsigned int raw_btf_size; char expected_line[255]; FILE *pin_file = NULL; @@ -2212,7 +2269,6 @@ static int do_test_pprint(void) char *line = NULL; uint8_t *raw_btf; ssize_t nread; - int err, ret; fprintf(stderr, "%s......", test->descr); raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types, @@ -2261,9 +2317,18 @@ static int do_test_pprint(void) if (CHECK(err, "bpf_obj_pin(%s): errno:%d.", pin_path, errno)) goto done; + percpu_map = test->percpu_map; + num_cpus = percpu_map ? bpf_num_possible_cpus() : 1; + rounded_value_size = round_up(sizeof(struct pprint_mapv), 8); + mapv = calloc(num_cpus, rounded_value_size); + if (CHECK(!mapv, "mapv allocation failure")) { + err = -1; + goto done; + } + for (key = 0; key < test->max_entries; key++) { - set_pprint_mapv(&mapv, key); - bpf_map_update_elem(map_fd, &key, &mapv, 0); + set_pprint_mapv(mapv, key, num_cpus, rounded_value_size); + bpf_map_update_elem(map_fd, &key, mapv, 0); } pin_file = fopen(pin_path, "r"); @@ -2286,33 +2351,74 @@ static int do_test_pprint(void) ordered_map = test->ordered_map; lossless_map = test->lossless_map; do { + struct pprint_mapv *cmapv; ssize_t nexpected_line; unsigned int next_key; + int cpu; next_key = ordered_map ? nr_read_elems : atoi(line); - set_pprint_mapv(&mapv, next_key); - nexpected_line = snprintf(expected_line, sizeof(expected_line), - "%u: {%u,0,%d,0x%x,0x%x,0x%x,{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s}\n", - next_key, - mapv.ui32, mapv.si32, - mapv.unused_bits2a, mapv.bits28, mapv.unused_bits2b, - mapv.ui64, - mapv.ui8a[0], mapv.ui8a[1], mapv.ui8a[2], mapv.ui8a[3], - mapv.ui8a[4], mapv.ui8a[5], mapv.ui8a[6], mapv.ui8a[7], - pprint_enum_str[mapv.aenum]); - - if (CHECK(nexpected_line == sizeof(expected_line), - "expected_line is too long")) { - err = -1; - goto done; + set_pprint_mapv(mapv, next_key, num_cpus, rounded_value_size); + cmapv = mapv; + + for (cpu = 0; cpu < num_cpus; cpu++) { + if (percpu_map) { + /* for percpu map, the format looks like: + * <key>: { + * cpu0: <value_on_cpu0> + * cpu1: <value_on_cpu1> + * ... + * cpun: <value_on_cpun> + * } + * + * let us verify the line containing the key here. + */ + if (cpu == 0) { + nexpected_line = snprintf(expected_line, + sizeof(expected_line), + "%u: {\n", + next_key); + + err = check_line(expected_line, nexpected_line, + sizeof(expected_line), line); + if (err == -1) + goto done; + } + + /* read value@cpu */ + nread = getline(&line, &line_len, pin_file); + if (nread < 0) + break; + } + + nexpected_line = snprintf(expected_line, sizeof(expected_line), + "%s%u: {%u,0,%d,0x%x,0x%x,0x%x," + "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s}\n", + percpu_map ? "\tcpu" : "", + percpu_map ? cpu : next_key, + cmapv->ui32, cmapv->si32, + cmapv->unused_bits2a, + cmapv->bits28, + cmapv->unused_bits2b, + cmapv->ui64, + cmapv->ui8a[0], cmapv->ui8a[1], + cmapv->ui8a[2], cmapv->ui8a[3], + cmapv->ui8a[4], cmapv->ui8a[5], + cmapv->ui8a[6], cmapv->ui8a[7], + pprint_enum_str[cmapv->aenum]); + + err = check_line(expected_line, nexpected_line, + sizeof(expected_line), line); + if (err == -1) + goto done; + + cmapv = (void *)cmapv + rounded_value_size; } - if (strcmp(expected_line, line)) { - err = -1; - fprintf(stderr, "unexpected pprint output\n"); - fprintf(stderr, "expected: %s", expected_line); - fprintf(stderr, " read: %s", line); - goto done; + if (percpu_map) { + /* skip the last bracket for the percpu map */ + nread = getline(&line, &line_len, pin_file); + if (nread < 0) + break; } nread = getline(&line, &line_len, pin_file); @@ -2334,6 +2440,8 @@ static int do_test_pprint(void) err = 0; done: + if (mapv) + free(mapv); if (!err) fprintf(stderr, "OK"); if (*btf_log_buf && (err || args.always_log)) @@ -2361,6 +2469,7 @@ static int test_pprint(void) pprint_test_template.map_name = pprint_tests_meta[i].map_name; pprint_test_template.ordered_map = pprint_tests_meta[i].ordered_map; pprint_test_template.lossless_map = pprint_tests_meta[i].lossless_map; + pprint_test_template.percpu_map = pprint_tests_meta[i].percpu_map; err |= count_result(do_test_pprint()); } diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c index 4e196e3bfecf..f44834155f25 100644 --- a/tools/testing/selftests/bpf/test_cgroup_storage.c +++ b/tools/testing/selftests/bpf/test_cgroup_storage.c @@ -4,6 +4,7 @@ #include <linux/filter.h> #include <stdio.h> #include <stdlib.h> +#include <sys/sysinfo.h> #include "bpf_rlimit.h" #include "cgroup_helpers.h" @@ -15,6 +16,14 @@ char bpf_log_buf[BPF_LOG_BUF_SIZE]; int main(int argc, char **argv) { struct bpf_insn prog[] = { + BPF_LD_MAP_FD(BPF_REG_1, 0), /* percpu map fd */ + BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), + BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */ BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, @@ -28,9 +37,18 @@ int main(int argc, char **argv) }; size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); int error = EXIT_FAILURE; - int map_fd, prog_fd, cgroup_fd; + int map_fd, percpu_map_fd, prog_fd, cgroup_fd; struct bpf_cgroup_storage_key key; unsigned long long value; + unsigned long long *percpu_value; + int cpu, nproc; + + nproc = get_nprocs_conf(); + percpu_value = malloc(sizeof(*percpu_value) * nproc); + if (!percpu_value) { + printf("Not enough memory for per-cpu area (%d cpus)\n", nproc); + goto err; + } map_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, sizeof(key), sizeof(value), 0, 0); @@ -39,7 +57,15 @@ int main(int argc, char **argv) goto out; } - prog[0].imm = map_fd; + percpu_map_fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, + sizeof(key), sizeof(value), 0, 0); + if (percpu_map_fd < 0) { + printf("Failed to create map: %s\n", strerror(errno)); + goto out; + } + + prog[0].imm = percpu_map_fd; + prog[7].imm = map_fd; prog_fd = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, prog, insns_cnt, "GPL", 0, bpf_log_buf, BPF_LOG_BUF_SIZE); @@ -77,7 +103,15 @@ int main(int argc, char **argv) } if (bpf_map_lookup_elem(map_fd, &key, &value)) { - printf("Failed to lookup cgroup storage\n"); + printf("Failed to lookup cgroup storage 0\n"); + goto err; + } + + for (cpu = 0; cpu < nproc; cpu++) + percpu_value[cpu] = 1000; + + if (bpf_map_update_elem(percpu_map_fd, &key, percpu_value, 0)) { + printf("Failed to update the data in the cgroup storage\n"); goto err; } @@ -120,11 +154,31 @@ int main(int argc, char **argv) goto err; } + /* Check the final value of the counter in the percpu local storage */ + + for (cpu = 0; cpu < nproc; cpu++) + percpu_value[cpu] = 0; + + if (bpf_map_lookup_elem(percpu_map_fd, &key, percpu_value)) { + printf("Failed to lookup the per-cpu cgroup storage\n"); + goto err; + } + + value = 0; + for (cpu = 0; cpu < nproc; cpu++) + value += percpu_value[cpu]; + + if (value != nproc * 1000 + 6) { + printf("Unexpected data in the per-cpu cgroup storage\n"); + goto err; + } + error = 0; printf("test_cgroup_storage:PASS\n"); err: cleanup_cgroup_environment(); + free(percpu_value); out: return error; diff --git a/tools/testing/selftests/bpf/test_flow_dissector.c b/tools/testing/selftests/bpf/test_flow_dissector.c new file mode 100644 index 000000000000..12b784afba31 --- /dev/null +++ b/tools/testing/selftests/bpf/test_flow_dissector.c @@ -0,0 +1,782 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Inject packets with all sorts of encapsulation into the kernel. + * + * IPv4/IPv6 outer layer 3 + * GRE/GUE/BARE outer layer 4, where bare is IPIP/SIT/IPv4-in-IPv6/.. + * IPv4/IPv6 inner layer 3 + */ + +#define _GNU_SOURCE + +#include <stddef.h> +#include <arpa/inet.h> +#include <asm/byteorder.h> +#include <error.h> +#include <errno.h> +#include <linux/if_packet.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/ipv6.h> +#include <netinet/ip.h> +#include <netinet/in.h> +#include <netinet/udp.h> +#include <poll.h> +#include <stdbool.h> +#include <stdlib.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> +#include <sys/socket.h> +#include <sys/stat.h> +#include <sys/time.h> +#include <sys/types.h> +#include <unistd.h> + +#define CFG_PORT_INNER 8000 + +/* Add some protocol definitions that do not exist in userspace */ + +struct grehdr { + uint16_t unused; + uint16_t protocol; +} __attribute__((packed)); + +struct guehdr { + union { + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 hlen:5, + control:1, + version:2; +#elif defined (__BIG_ENDIAN_BITFIELD) + __u8 version:2, + control:1, + hlen:5; +#else +#error "Please fix <asm/byteorder.h>" +#endif + __u8 proto_ctype; + __be16 flags; + }; + __be32 word; + }; +}; + +static uint8_t cfg_dsfield_inner; +static uint8_t cfg_dsfield_outer; +static uint8_t cfg_encap_proto; +static bool cfg_expect_failure = false; +static int cfg_l3_extra = AF_UNSPEC; /* optional SIT prefix */ +static int cfg_l3_inner = AF_UNSPEC; +static int cfg_l3_outer = AF_UNSPEC; +static int cfg_num_pkt = 10; +static int cfg_num_secs = 0; +static char cfg_payload_char = 'a'; +static int cfg_payload_len = 100; +static int cfg_port_gue = 6080; +static bool cfg_only_rx; +static bool cfg_only_tx; +static int cfg_src_port = 9; + +static char buf[ETH_DATA_LEN]; + +#define INIT_ADDR4(name, addr4, port) \ + static struct sockaddr_in name = { \ + .sin_family = AF_INET, \ + .sin_port = __constant_htons(port), \ + .sin_addr.s_addr = __constant_htonl(addr4), \ + }; + +#define INIT_ADDR6(name, addr6, port) \ + static struct sockaddr_in6 name = { \ + .sin6_family = AF_INET6, \ + .sin6_port = __constant_htons(port), \ + .sin6_addr = addr6, \ + }; + +INIT_ADDR4(in_daddr4, INADDR_LOOPBACK, CFG_PORT_INNER) +INIT_ADDR4(in_saddr4, INADDR_LOOPBACK + 2, 0) +INIT_ADDR4(out_daddr4, INADDR_LOOPBACK, 0) +INIT_ADDR4(out_saddr4, INADDR_LOOPBACK + 1, 0) +INIT_ADDR4(extra_daddr4, INADDR_LOOPBACK, 0) +INIT_ADDR4(extra_saddr4, INADDR_LOOPBACK + 1, 0) + +INIT_ADDR6(in_daddr6, IN6ADDR_LOOPBACK_INIT, CFG_PORT_INNER) +INIT_ADDR6(in_saddr6, IN6ADDR_LOOPBACK_INIT, 0) +INIT_ADDR6(out_daddr6, IN6ADDR_LOOPBACK_INIT, 0) +INIT_ADDR6(out_saddr6, IN6ADDR_LOOPBACK_INIT, 0) +INIT_ADDR6(extra_daddr6, IN6ADDR_LOOPBACK_INIT, 0) +INIT_ADDR6(extra_saddr6, IN6ADDR_LOOPBACK_INIT, 0) + +static unsigned long util_gettime(void) +{ + struct timeval tv; + + gettimeofday(&tv, NULL); + return (tv.tv_sec * 1000) + (tv.tv_usec / 1000); +} + +static void util_printaddr(const char *msg, struct sockaddr *addr) +{ + unsigned long off = 0; + char nbuf[INET6_ADDRSTRLEN]; + + switch (addr->sa_family) { + case PF_INET: + off = __builtin_offsetof(struct sockaddr_in, sin_addr); + break; + case PF_INET6: + off = __builtin_offsetof(struct sockaddr_in6, sin6_addr); + break; + default: + error(1, 0, "printaddr: unsupported family %u\n", + addr->sa_family); + } + + if (!inet_ntop(addr->sa_family, ((void *) addr) + off, nbuf, + sizeof(nbuf))) + error(1, errno, "inet_ntop"); + + fprintf(stderr, "%s: %s\n", msg, nbuf); +} + +static unsigned long add_csum_hword(const uint16_t *start, int num_u16) +{ + unsigned long sum = 0; + int i; + + for (i = 0; i < num_u16; i++) + sum += start[i]; + + return sum; +} + +static uint16_t build_ip_csum(const uint16_t *start, int num_u16, + unsigned long sum) +{ + sum += add_csum_hword(start, num_u16); + + while (sum >> 16) + sum = (sum & 0xffff) + (sum >> 16); + + return ~sum; +} + +static void build_ipv4_header(void *header, uint8_t proto, + uint32_t src, uint32_t dst, + int payload_len, uint8_t tos) +{ + struct iphdr *iph = header; + + iph->ihl = 5; + iph->version = 4; + iph->tos = tos; + iph->ttl = 8; + iph->tot_len = htons(sizeof(*iph) + payload_len); + iph->id = htons(1337); + iph->protocol = proto; + iph->saddr = src; + iph->daddr = dst; + iph->check = build_ip_csum((void *) iph, iph->ihl << 1, 0); +} + +static void ipv6_set_dsfield(struct ipv6hdr *ip6h, uint8_t dsfield) +{ + uint16_t val, *ptr = (uint16_t *)ip6h; + + val = ntohs(*ptr); + val &= 0xF00F; + val |= ((uint16_t) dsfield) << 4; + *ptr = htons(val); +} + +static void build_ipv6_header(void *header, uint8_t proto, + struct sockaddr_in6 *src, + struct sockaddr_in6 *dst, + int payload_len, uint8_t dsfield) +{ + struct ipv6hdr *ip6h = header; + + ip6h->version = 6; + ip6h->payload_len = htons(payload_len); + ip6h->nexthdr = proto; + ip6h->hop_limit = 8; + ipv6_set_dsfield(ip6h, dsfield); + + memcpy(&ip6h->saddr, &src->sin6_addr, sizeof(ip6h->saddr)); + memcpy(&ip6h->daddr, &dst->sin6_addr, sizeof(ip6h->daddr)); +} + +static uint16_t build_udp_v4_csum(const struct iphdr *iph, + const struct udphdr *udph, + int num_words) +{ + unsigned long pseudo_sum; + int num_u16 = sizeof(iph->saddr); /* halfwords: twice byte len */ + + pseudo_sum = add_csum_hword((void *) &iph->saddr, num_u16); + pseudo_sum += htons(IPPROTO_UDP); + pseudo_sum += udph->len; + return build_ip_csum((void *) udph, num_words, pseudo_sum); +} + +static uint16_t build_udp_v6_csum(const struct ipv6hdr *ip6h, + const struct udphdr *udph, + int num_words) +{ + unsigned long pseudo_sum; + int num_u16 = sizeof(ip6h->saddr); /* halfwords: twice byte len */ + + pseudo_sum = add_csum_hword((void *) &ip6h->saddr, num_u16); + pseudo_sum += htons(ip6h->nexthdr); + pseudo_sum += ip6h->payload_len; + return build_ip_csum((void *) udph, num_words, pseudo_sum); +} + +static void build_udp_header(void *header, int payload_len, + uint16_t dport, int family) +{ + struct udphdr *udph = header; + int len = sizeof(*udph) + payload_len; + + udph->source = htons(cfg_src_port); + udph->dest = htons(dport); + udph->len = htons(len); + udph->check = 0; + if (family == AF_INET) + udph->check = build_udp_v4_csum(header - sizeof(struct iphdr), + udph, len >> 1); + else + udph->check = build_udp_v6_csum(header - sizeof(struct ipv6hdr), + udph, len >> 1); +} + +static void build_gue_header(void *header, uint8_t proto) +{ + struct guehdr *gueh = header; + + gueh->proto_ctype = proto; +} + +static void build_gre_header(void *header, uint16_t proto) +{ + struct grehdr *greh = header; + + greh->protocol = htons(proto); +} + +static int l3_length(int family) +{ + if (family == AF_INET) + return sizeof(struct iphdr); + else + return sizeof(struct ipv6hdr); +} + +static int build_packet(void) +{ + int ol3_len = 0, ol4_len = 0, il3_len = 0, il4_len = 0; + int el3_len = 0; + + if (cfg_l3_extra) + el3_len = l3_length(cfg_l3_extra); + + /* calculate header offsets */ + if (cfg_encap_proto) { + ol3_len = l3_length(cfg_l3_outer); + + if (cfg_encap_proto == IPPROTO_GRE) + ol4_len = sizeof(struct grehdr); + else if (cfg_encap_proto == IPPROTO_UDP) + ol4_len = sizeof(struct udphdr) + sizeof(struct guehdr); + } + + il3_len = l3_length(cfg_l3_inner); + il4_len = sizeof(struct udphdr); + + if (el3_len + ol3_len + ol4_len + il3_len + il4_len + cfg_payload_len >= + sizeof(buf)) + error(1, 0, "packet too large\n"); + + /* + * Fill packet from inside out, to calculate correct checksums. + * But create ip before udp headers, as udp uses ip for pseudo-sum. + */ + memset(buf + el3_len + ol3_len + ol4_len + il3_len + il4_len, + cfg_payload_char, cfg_payload_len); + + /* add zero byte for udp csum padding */ + buf[el3_len + ol3_len + ol4_len + il3_len + il4_len + cfg_payload_len] = 0; + + switch (cfg_l3_inner) { + case PF_INET: + build_ipv4_header(buf + el3_len + ol3_len + ol4_len, + IPPROTO_UDP, + in_saddr4.sin_addr.s_addr, + in_daddr4.sin_addr.s_addr, + il4_len + cfg_payload_len, + cfg_dsfield_inner); + break; + case PF_INET6: + build_ipv6_header(buf + el3_len + ol3_len + ol4_len, + IPPROTO_UDP, + &in_saddr6, &in_daddr6, + il4_len + cfg_payload_len, + cfg_dsfield_inner); + break; + } + + build_udp_header(buf + el3_len + ol3_len + ol4_len + il3_len, + cfg_payload_len, CFG_PORT_INNER, cfg_l3_inner); + + if (!cfg_encap_proto) + return il3_len + il4_len + cfg_payload_len; + + switch (cfg_l3_outer) { + case PF_INET: + build_ipv4_header(buf + el3_len, cfg_encap_proto, + out_saddr4.sin_addr.s_addr, + out_daddr4.sin_addr.s_addr, + ol4_len + il3_len + il4_len + cfg_payload_len, + cfg_dsfield_outer); + break; + case PF_INET6: + build_ipv6_header(buf + el3_len, cfg_encap_proto, + &out_saddr6, &out_daddr6, + ol4_len + il3_len + il4_len + cfg_payload_len, + cfg_dsfield_outer); + break; + } + + switch (cfg_encap_proto) { + case IPPROTO_UDP: + build_gue_header(buf + el3_len + ol3_len + ol4_len - + sizeof(struct guehdr), + cfg_l3_inner == PF_INET ? IPPROTO_IPIP + : IPPROTO_IPV6); + build_udp_header(buf + el3_len + ol3_len, + sizeof(struct guehdr) + il3_len + il4_len + + cfg_payload_len, + cfg_port_gue, cfg_l3_outer); + break; + case IPPROTO_GRE: + build_gre_header(buf + el3_len + ol3_len, + cfg_l3_inner == PF_INET ? ETH_P_IP + : ETH_P_IPV6); + break; + } + + switch (cfg_l3_extra) { + case PF_INET: + build_ipv4_header(buf, + cfg_l3_outer == PF_INET ? IPPROTO_IPIP + : IPPROTO_IPV6, + extra_saddr4.sin_addr.s_addr, + extra_daddr4.sin_addr.s_addr, + ol3_len + ol4_len + il3_len + il4_len + + cfg_payload_len, 0); + break; + case PF_INET6: + build_ipv6_header(buf, + cfg_l3_outer == PF_INET ? IPPROTO_IPIP + : IPPROTO_IPV6, + &extra_saddr6, &extra_daddr6, + ol3_len + ol4_len + il3_len + il4_len + + cfg_payload_len, 0); + break; + } + + return el3_len + ol3_len + ol4_len + il3_len + il4_len + + cfg_payload_len; +} + +/* sender transmits encapsulated over RAW or unencap'd over UDP */ +static int setup_tx(void) +{ + int family, fd, ret; + + if (cfg_l3_extra) + family = cfg_l3_extra; + else if (cfg_l3_outer) + family = cfg_l3_outer; + else + family = cfg_l3_inner; + + fd = socket(family, SOCK_RAW, IPPROTO_RAW); + if (fd == -1) + error(1, errno, "socket tx"); + + if (cfg_l3_extra) { + if (cfg_l3_extra == PF_INET) + ret = connect(fd, (void *) &extra_daddr4, + sizeof(extra_daddr4)); + else + ret = connect(fd, (void *) &extra_daddr6, + sizeof(extra_daddr6)); + if (ret) + error(1, errno, "connect tx"); + } else if (cfg_l3_outer) { + /* connect to destination if not encapsulated */ + if (cfg_l3_outer == PF_INET) + ret = connect(fd, (void *) &out_daddr4, + sizeof(out_daddr4)); + else + ret = connect(fd, (void *) &out_daddr6, + sizeof(out_daddr6)); + if (ret) + error(1, errno, "connect tx"); + } else { + /* otherwise using loopback */ + if (cfg_l3_inner == PF_INET) + ret = connect(fd, (void *) &in_daddr4, + sizeof(in_daddr4)); + else + ret = connect(fd, (void *) &in_daddr6, + sizeof(in_daddr6)); + if (ret) + error(1, errno, "connect tx"); + } + + return fd; +} + +/* receiver reads unencapsulated UDP */ +static int setup_rx(void) +{ + int fd, ret; + + fd = socket(cfg_l3_inner, SOCK_DGRAM, 0); + if (fd == -1) + error(1, errno, "socket rx"); + + if (cfg_l3_inner == PF_INET) + ret = bind(fd, (void *) &in_daddr4, sizeof(in_daddr4)); + else + ret = bind(fd, (void *) &in_daddr6, sizeof(in_daddr6)); + if (ret) + error(1, errno, "bind rx"); + + return fd; +} + +static int do_tx(int fd, const char *pkt, int len) +{ + int ret; + + ret = write(fd, pkt, len); + if (ret == -1) + error(1, errno, "send"); + if (ret != len) + error(1, errno, "send: len (%d < %d)\n", ret, len); + + return 1; +} + +static int do_poll(int fd, short events, int timeout) +{ + struct pollfd pfd; + int ret; + + pfd.fd = fd; + pfd.events = events; + + ret = poll(&pfd, 1, timeout); + if (ret == -1) + error(1, errno, "poll"); + if (ret && !(pfd.revents & POLLIN)) + error(1, errno, "poll: unexpected event 0x%x\n", pfd.revents); + + return ret; +} + +static int do_rx(int fd) +{ + char rbuf; + int ret, num = 0; + + while (1) { + ret = recv(fd, &rbuf, 1, MSG_DONTWAIT); + if (ret == -1 && errno == EAGAIN) + break; + if (ret == -1) + error(1, errno, "recv"); + if (rbuf != cfg_payload_char) + error(1, 0, "recv: payload mismatch"); + num++; + }; + + return num; +} + +static int do_main(void) +{ + unsigned long tstop, treport, tcur; + int fdt = -1, fdr = -1, len, tx = 0, rx = 0; + + if (!cfg_only_tx) + fdr = setup_rx(); + if (!cfg_only_rx) + fdt = setup_tx(); + + len = build_packet(); + + tcur = util_gettime(); + treport = tcur + 1000; + tstop = tcur + (cfg_num_secs * 1000); + + while (1) { + if (!cfg_only_rx) + tx += do_tx(fdt, buf, len); + + if (!cfg_only_tx) + rx += do_rx(fdr); + + if (cfg_num_secs) { + tcur = util_gettime(); + if (tcur >= tstop) + break; + if (tcur >= treport) { + fprintf(stderr, "pkts: tx=%u rx=%u\n", tx, rx); + tx = 0; + rx = 0; + treport = tcur + 1000; + } + } else { + if (tx == cfg_num_pkt) + break; + } + } + + /* read straggler packets, if any */ + if (rx < tx) { + tstop = util_gettime() + 100; + while (rx < tx) { + tcur = util_gettime(); + if (tcur >= tstop) + break; + + do_poll(fdr, POLLIN, tstop - tcur); + rx += do_rx(fdr); + } + } + + fprintf(stderr, "pkts: tx=%u rx=%u\n", tx, rx); + + if (fdr != -1 && close(fdr)) + error(1, errno, "close rx"); + if (fdt != -1 && close(fdt)) + error(1, errno, "close tx"); + + /* + * success (== 0) only if received all packets + * unless failure is expected, in which case none must arrive. + */ + if (cfg_expect_failure) + return rx != 0; + else + return rx != tx; +} + + +static void __attribute__((noreturn)) usage(const char *filepath) +{ + fprintf(stderr, "Usage: %s [-e gre|gue|bare|none] [-i 4|6] [-l len] " + "[-O 4|6] [-o 4|6] [-n num] [-t secs] [-R] [-T] " + "[-s <osrc> [-d <odst>] [-S <isrc>] [-D <idst>] " + "[-x <otos>] [-X <itos>] [-f <isport>] [-F]\n", + filepath); + exit(1); +} + +static void parse_addr(int family, void *addr, const char *optarg) +{ + int ret; + + ret = inet_pton(family, optarg, addr); + if (ret == -1) + error(1, errno, "inet_pton"); + if (ret == 0) + error(1, 0, "inet_pton: bad string"); +} + +static void parse_addr4(struct sockaddr_in *addr, const char *optarg) +{ + parse_addr(AF_INET, &addr->sin_addr, optarg); +} + +static void parse_addr6(struct sockaddr_in6 *addr, const char *optarg) +{ + parse_addr(AF_INET6, &addr->sin6_addr, optarg); +} + +static int parse_protocol_family(const char *filepath, const char *optarg) +{ + if (!strcmp(optarg, "4")) + return PF_INET; + if (!strcmp(optarg, "6")) + return PF_INET6; + + usage(filepath); +} + +static void parse_opts(int argc, char **argv) +{ + int c; + + while ((c = getopt(argc, argv, "d:D:e:f:Fhi:l:n:o:O:Rs:S:t:Tx:X:")) != -1) { + switch (c) { + case 'd': + if (cfg_l3_outer == AF_UNSPEC) + error(1, 0, "-d must be preceded by -o"); + if (cfg_l3_outer == AF_INET) + parse_addr4(&out_daddr4, optarg); + else + parse_addr6(&out_daddr6, optarg); + break; + case 'D': + if (cfg_l3_inner == AF_UNSPEC) + error(1, 0, "-D must be preceded by -i"); + if (cfg_l3_inner == AF_INET) + parse_addr4(&in_daddr4, optarg); + else + parse_addr6(&in_daddr6, optarg); + break; + case 'e': + if (!strcmp(optarg, "gre")) + cfg_encap_proto = IPPROTO_GRE; + else if (!strcmp(optarg, "gue")) + cfg_encap_proto = IPPROTO_UDP; + else if (!strcmp(optarg, "bare")) + cfg_encap_proto = IPPROTO_IPIP; + else if (!strcmp(optarg, "none")) + cfg_encap_proto = IPPROTO_IP; /* == 0 */ + else + usage(argv[0]); + break; + case 'f': + cfg_src_port = strtol(optarg, NULL, 0); + break; + case 'F': + cfg_expect_failure = true; + break; + case 'h': + usage(argv[0]); + break; + case 'i': + if (!strcmp(optarg, "4")) + cfg_l3_inner = PF_INET; + else if (!strcmp(optarg, "6")) + cfg_l3_inner = PF_INET6; + else + usage(argv[0]); + break; + case 'l': + cfg_payload_len = strtol(optarg, NULL, 0); + break; + case 'n': + cfg_num_pkt = strtol(optarg, NULL, 0); + break; + case 'o': + cfg_l3_outer = parse_protocol_family(argv[0], optarg); + break; + case 'O': + cfg_l3_extra = parse_protocol_family(argv[0], optarg); + break; + case 'R': + cfg_only_rx = true; + break; + case 's': + if (cfg_l3_outer == AF_INET) + parse_addr4(&out_saddr4, optarg); + else + parse_addr6(&out_saddr6, optarg); + break; + case 'S': + if (cfg_l3_inner == AF_INET) + parse_addr4(&in_saddr4, optarg); + else + parse_addr6(&in_saddr6, optarg); + break; + case 't': + cfg_num_secs = strtol(optarg, NULL, 0); + break; + case 'T': + cfg_only_tx = true; + break; + case 'x': + cfg_dsfield_outer = strtol(optarg, NULL, 0); + break; + case 'X': + cfg_dsfield_inner = strtol(optarg, NULL, 0); + break; + } + } + + if (cfg_only_rx && cfg_only_tx) + error(1, 0, "options: cannot combine rx-only and tx-only"); + + if (cfg_encap_proto && cfg_l3_outer == AF_UNSPEC) + error(1, 0, "options: must specify outer with encap"); + else if ((!cfg_encap_proto) && cfg_l3_outer != AF_UNSPEC) + error(1, 0, "options: cannot combine no-encap and outer"); + else if ((!cfg_encap_proto) && cfg_l3_extra != AF_UNSPEC) + error(1, 0, "options: cannot combine no-encap and extra"); + + if (cfg_l3_inner == AF_UNSPEC) + cfg_l3_inner = AF_INET6; + if (cfg_l3_inner == AF_INET6 && cfg_encap_proto == IPPROTO_IPIP) + cfg_encap_proto = IPPROTO_IPV6; + + /* RFC 6040 4.2: + * on decap, if outer encountered congestion (CE == 0x3), + * but inner cannot encode ECN (NoECT == 0x0), then drop packet. + */ + if (((cfg_dsfield_outer & 0x3) == 0x3) && + ((cfg_dsfield_inner & 0x3) == 0x0)) + cfg_expect_failure = true; +} + +static void print_opts(void) +{ + if (cfg_l3_inner == PF_INET6) { + util_printaddr("inner.dest6", (void *) &in_daddr6); + util_printaddr("inner.source6", (void *) &in_saddr6); + } else { + util_printaddr("inner.dest4", (void *) &in_daddr4); + util_printaddr("inner.source4", (void *) &in_saddr4); + } + + if (!cfg_l3_outer) + return; + + fprintf(stderr, "encap proto: %u\n", cfg_encap_proto); + + if (cfg_l3_outer == PF_INET6) { + util_printaddr("outer.dest6", (void *) &out_daddr6); + util_printaddr("outer.source6", (void *) &out_saddr6); + } else { + util_printaddr("outer.dest4", (void *) &out_daddr4); + util_printaddr("outer.source4", (void *) &out_saddr4); + } + + if (!cfg_l3_extra) + return; + + if (cfg_l3_outer == PF_INET6) { + util_printaddr("extra.dest6", (void *) &extra_daddr6); + util_printaddr("extra.source6", (void *) &extra_saddr6); + } else { + util_printaddr("extra.dest4", (void *) &extra_daddr4); + util_printaddr("extra.source4", (void *) &extra_saddr4); + } + +} + +int main(int argc, char **argv) +{ + parse_opts(argc, argv); + print_opts(); + return do_main(); +} diff --git a/tools/testing/selftests/bpf/test_flow_dissector.sh b/tools/testing/selftests/bpf/test_flow_dissector.sh new file mode 100755 index 000000000000..c0fb073b5eab --- /dev/null +++ b/tools/testing/selftests/bpf/test_flow_dissector.sh @@ -0,0 +1,115 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Load BPF flow dissector and verify it correctly dissects traffic +export TESTNAME=test_flow_dissector +unmount=0 + +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +msg="skip all tests:" +if [ $UID != 0 ]; then + echo $msg please run this as root >&2 + exit $ksft_skip +fi + +# This test needs to be run in a network namespace with in_netns.sh. Check if +# this is the case and run it with in_netns.sh if it is being run in the root +# namespace. +if [[ -z $(ip netns identify $$) ]]; then + ../net/in_netns.sh "$0" "$@" + exit $? +fi + +# Determine selftest success via shell exit code +exit_handler() +{ + if (( $? == 0 )); then + echo "selftests: $TESTNAME [PASS]"; + else + echo "selftests: $TESTNAME [FAILED]"; + fi + + set +e + + # Cleanup + tc filter del dev lo ingress pref 1337 2> /dev/null + tc qdisc del dev lo ingress 2> /dev/null + ./flow_dissector_load -d 2> /dev/null + if [ $unmount -ne 0 ]; then + umount bpffs 2> /dev/null + fi +} + +# Exit script immediately (well catched by trap handler) if any +# program/thing exits with a non-zero status. +set -e + +# (Use 'trap -l' to list meaning of numbers) +trap exit_handler 0 2 3 6 9 + +# Mount BPF file system +if /bin/mount | grep /sys/fs/bpf > /dev/null; then + echo "bpffs already mounted" +else + echo "bpffs not mounted. Mounting..." + unmount=1 + /bin/mount bpffs /sys/fs/bpf -t bpf +fi + +# Attach BPF program +./flow_dissector_load -p bpf_flow.o -s dissect + +# Setup +tc qdisc add dev lo ingress + +echo "Testing IPv4..." +# Drops all IP/UDP packets coming from port 9 +tc filter add dev lo parent ffff: protocol ip pref 1337 flower ip_proto \ + udp src_port 9 action drop + +# Send 10 IPv4/UDP packets from port 8. Filter should not drop any. +./test_flow_dissector -i 4 -f 8 +# Send 10 IPv4/UDP packets from port 9. Filter should drop all. +./test_flow_dissector -i 4 -f 9 -F +# Send 10 IPv4/UDP packets from port 10. Filter should not drop any. +./test_flow_dissector -i 4 -f 10 + +echo "Testing IPIP..." +# Send 10 IPv4/IPv4/UDP packets from port 8. Filter should not drop any. +./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \ + -D 192.168.0.1 -S 1.1.1.1 -f 8 +# Send 10 IPv4/IPv4/UDP packets from port 9. Filter should drop all. +./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \ + -D 192.168.0.1 -S 1.1.1.1 -f 9 -F +# Send 10 IPv4/IPv4/UDP packets from port 10. Filter should not drop any. +./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \ + -D 192.168.0.1 -S 1.1.1.1 -f 10 + +echo "Testing IPv4 + GRE..." +# Send 10 IPv4/GRE/IPv4/UDP packets from port 8. Filter should not drop any. +./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e gre -i 4 \ + -D 192.168.0.1 -S 1.1.1.1 -f 8 +# Send 10 IPv4/GRE/IPv4/UDP packets from port 9. Filter should drop all. +./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e gre -i 4 \ + -D 192.168.0.1 -S 1.1.1.1 -f 9 -F +# Send 10 IPv4/GRE/IPv4/UDP packets from port 10. Filter should not drop any. +./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e gre -i 4 \ + -D 192.168.0.1 -S 1.1.1.1 -f 10 + +tc filter del dev lo ingress pref 1337 + +echo "Testing IPv6..." +# Drops all IPv6/UDP packets coming from port 9 +tc filter add dev lo parent ffff: protocol ipv6 pref 1337 flower ip_proto \ + udp src_port 9 action drop + +# Send 10 IPv6/UDP packets from port 8. Filter should not drop any. +./test_flow_dissector -i 6 -f 8 +# Send 10 IPv6/UDP packets from port 9. Filter should drop all. +./test_flow_dissector -i 6 -f 9 -F +# Send 10 IPv6/UDP packets from port 10. Filter should not drop any. +./test_flow_dissector -i 6 -f 10 + +exit 0 diff --git a/tools/testing/selftests/bpf/test_netcnt.c b/tools/testing/selftests/bpf/test_netcnt.c new file mode 100644 index 000000000000..7887df693399 --- /dev/null +++ b/tools/testing/selftests/bpf/test_netcnt.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> +#include <assert.h> +#include <sys/sysinfo.h> +#include <sys/time.h> + +#include <linux/bpf.h> +#include <bpf/bpf.h> +#include <bpf/libbpf.h> + +#include "cgroup_helpers.h" +#include "bpf_rlimit.h" +#include "netcnt_common.h" + +#define BPF_PROG "./netcnt_prog.o" +#define TEST_CGROUP "/test-network-counters/" + +static int bpf_find_map(const char *test, struct bpf_object *obj, + const char *name) +{ + struct bpf_map *map; + + map = bpf_object__find_map_by_name(obj, name); + if (!map) { + printf("%s:FAIL:map '%s' not found\n", test, name); + return -1; + } + return bpf_map__fd(map); +} + +int main(int argc, char **argv) +{ + struct percpu_net_cnt *percpu_netcnt; + struct bpf_cgroup_storage_key key; + int map_fd, percpu_map_fd; + int error = EXIT_FAILURE; + struct net_cnt netcnt; + struct bpf_object *obj; + int prog_fd, cgroup_fd; + unsigned long packets; + unsigned long bytes; + int cpu, nproc; + __u32 prog_cnt; + + nproc = get_nprocs_conf(); + percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc); + if (!percpu_netcnt) { + printf("Not enough memory for per-cpu area (%d cpus)\n", nproc); + goto err; + } + + if (bpf_prog_load(BPF_PROG, BPF_PROG_TYPE_CGROUP_SKB, + &obj, &prog_fd)) { + printf("Failed to load bpf program\n"); + goto out; + } + + if (setup_cgroup_environment()) { + printf("Failed to load bpf program\n"); + goto err; + } + + /* Create a cgroup, get fd, and join it */ + cgroup_fd = create_and_get_cgroup(TEST_CGROUP); + if (!cgroup_fd) { + printf("Failed to create test cgroup\n"); + goto err; + } + + if (join_cgroup(TEST_CGROUP)) { + printf("Failed to join cgroup\n"); + goto err; + } + + /* Attach bpf program */ + if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) { + printf("Failed to attach bpf program"); + goto err; + } + + assert(system("ping localhost -6 -c 10000 -f -q > /dev/null") == 0); + + if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL, + &prog_cnt)) { + printf("Failed to query attached programs"); + goto err; + } + + map_fd = bpf_find_map(__func__, obj, "netcnt"); + if (map_fd < 0) { + printf("Failed to find bpf map with net counters"); + goto err; + } + + percpu_map_fd = bpf_find_map(__func__, obj, "percpu_netcnt"); + if (percpu_map_fd < 0) { + printf("Failed to find bpf map with percpu net counters"); + goto err; + } + + if (bpf_map_get_next_key(map_fd, NULL, &key)) { + printf("Failed to get key in cgroup storage\n"); + goto err; + } + + if (bpf_map_lookup_elem(map_fd, &key, &netcnt)) { + printf("Failed to lookup cgroup storage\n"); + goto err; + } + + if (bpf_map_lookup_elem(percpu_map_fd, &key, &percpu_netcnt[0])) { + printf("Failed to lookup percpu cgroup storage\n"); + goto err; + } + + /* Some packets can be still in per-cpu cache, but not more than + * MAX_PERCPU_PACKETS. + */ + packets = netcnt.packets; + bytes = netcnt.bytes; + for (cpu = 0; cpu < nproc; cpu++) { + if (percpu_netcnt[cpu].packets > MAX_PERCPU_PACKETS) { + printf("Unexpected percpu value: %llu\n", + percpu_netcnt[cpu].packets); + goto err; + } + + packets += percpu_netcnt[cpu].packets; + bytes += percpu_netcnt[cpu].bytes; + } + + /* No packets should be lost */ + if (packets != 10000) { + printf("Unexpected packet count: %lu\n", packets); + goto err; + } + + /* Let's check that bytes counter matches the number of packets + * multiplied by the size of ipv6 ICMP packet. + */ + if (bytes != packets * 104) { + printf("Unexpected bytes count: %lu\n", bytes); + goto err; + } + + error = 0; + printf("test_netcnt:PASS\n"); + +err: + cleanup_cgroup_environment(); + free(percpu_netcnt); + +out: + return error; +} diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 0ef68204c84b..e8becca9c521 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -112,13 +112,13 @@ static void test_pkt_access(void) err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4), NULL, NULL, &retval, &duration); - CHECK(err || errno || retval, "ipv4", + CHECK(err || retval, "ipv4", "err %d errno %d retval %d duration %d\n", err, errno, retval, duration); err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6), NULL, NULL, &retval, &duration); - CHECK(err || errno || retval, "ipv6", + CHECK(err || retval, "ipv6", "err %d errno %d retval %d duration %d\n", err, errno, retval, duration); bpf_object__close(obj); @@ -153,14 +153,14 @@ static void test_xdp(void) err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), buf, &size, &retval, &duration); - CHECK(err || errno || retval != XDP_TX || size != 74 || + CHECK(err || retval != XDP_TX || size != 74 || iph->protocol != IPPROTO_IPIP, "ipv4", "err %d errno %d retval %d size %d\n", err, errno, retval, size); err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), buf, &size, &retval, &duration); - CHECK(err || errno || retval != XDP_TX || size != 114 || + CHECK(err || retval != XDP_TX || size != 114 || iph6->nexthdr != IPPROTO_IPV6, "ipv6", "err %d errno %d retval %d size %d\n", err, errno, retval, size); @@ -185,13 +185,13 @@ static void test_xdp_adjust_tail(void) err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), buf, &size, &retval, &duration); - CHECK(err || errno || retval != XDP_DROP, + CHECK(err || retval != XDP_DROP, "ipv4", "err %d errno %d retval %d size %d\n", err, errno, retval, size); err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), buf, &size, &retval, &duration); - CHECK(err || errno || retval != XDP_TX || size != 54, + CHECK(err || retval != XDP_TX || size != 54, "ipv6", "err %d errno %d retval %d size %d\n", err, errno, retval, size); bpf_object__close(obj); @@ -254,14 +254,14 @@ static void test_l4lb(const char *file) err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), buf, &size, &retval, &duration); - CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 || + CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 || *magic != MAGIC_VAL, "ipv4", "err %d errno %d retval %d size %d magic %x\n", err, errno, retval, size, *magic); err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), buf, &size, &retval, &duration); - CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 || + CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 || *magic != MAGIC_VAL, "ipv6", "err %d errno %d retval %d size %d magic %x\n", err, errno, retval, size, *magic); @@ -343,14 +343,14 @@ static void test_xdp_noinline(void) err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), buf, &size, &retval, &duration); - CHECK(err || errno || retval != 1 || size != 54 || + CHECK(err || retval != 1 || size != 54 || *magic != MAGIC_VAL, "ipv4", "err %d errno %d retval %d size %d magic %x\n", err, errno, retval, size, *magic); err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), buf, &size, &retval, &duration); - CHECK(err || errno || retval != 1 || size != 74 || + CHECK(err || retval != 1 || size != 74 || *magic != MAGIC_VAL, "ipv6", "err %d errno %d retval %d size %d magic %x\n", err, errno, retval, size, *magic); @@ -1698,6 +1698,43 @@ static void test_task_fd_query_tp(void) "sys_enter_read"); } +static void test_reference_tracking() +{ + const char *file = "./test_sk_lookup_kern.o"; + struct bpf_object *obj; + struct bpf_program *prog; + __u32 duration; + int err = 0; + + obj = bpf_object__open(file); + if (IS_ERR(obj)) { + error_cnt++; + return; + } + + bpf_object__for_each_program(prog, obj) { + const char *title; + + /* Ignore .text sections */ + title = bpf_program__title(prog, false); + if (strstr(title, ".text") != NULL) + continue; + + bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS); + + /* Expect verifier failure if test name has 'fail' */ + if (strstr(title, "fail") != NULL) { + libbpf_set_print(NULL, NULL, NULL); + err = !bpf_program__load(prog, "GPL", 0); + libbpf_set_print(printf, printf, NULL); + } else { + err = bpf_program__load(prog, "GPL", 0); + } + CHECK(err, title, "\n"); + } + bpf_object__close(obj); +} + int main(void) { jit_enabled = is_jit_enabled(); @@ -1719,6 +1756,7 @@ int main(void) test_get_stack_raw_tp(); test_task_fd_query_rawtp(); test_task_fd_query_tp(); + test_reference_tracking(); printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; diff --git a/tools/testing/selftests/bpf/test_section_names.c b/tools/testing/selftests/bpf/test_section_names.c new file mode 100644 index 000000000000..7c4f41572b1c --- /dev/null +++ b/tools/testing/selftests/bpf/test_section_names.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook + +#include <err.h> +#include <bpf/libbpf.h> + +#include "bpf_util.h" + +struct sec_name_test { + const char sec_name[32]; + struct { + int rc; + enum bpf_prog_type prog_type; + enum bpf_attach_type expected_attach_type; + } expected_load; + struct { + int rc; + enum bpf_attach_type attach_type; + } expected_attach; +}; + +static struct sec_name_test tests[] = { + {"InvAliD", {-EINVAL, 0, 0}, {-EINVAL, 0} }, + {"cgroup", {-EINVAL, 0, 0}, {-EINVAL, 0} }, + {"socket", {0, BPF_PROG_TYPE_SOCKET_FILTER, 0}, {-EINVAL, 0} }, + {"kprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} }, + {"kretprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} }, + {"classifier", {0, BPF_PROG_TYPE_SCHED_CLS, 0}, {-EINVAL, 0} }, + {"action", {0, BPF_PROG_TYPE_SCHED_ACT, 0}, {-EINVAL, 0} }, + {"tracepoint/", {0, BPF_PROG_TYPE_TRACEPOINT, 0}, {-EINVAL, 0} }, + { + "raw_tracepoint/", + {0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0}, + {-EINVAL, 0}, + }, + {"xdp", {0, BPF_PROG_TYPE_XDP, 0}, {-EINVAL, 0} }, + {"perf_event", {0, BPF_PROG_TYPE_PERF_EVENT, 0}, {-EINVAL, 0} }, + {"lwt_in", {0, BPF_PROG_TYPE_LWT_IN, 0}, {-EINVAL, 0} }, + {"lwt_out", {0, BPF_PROG_TYPE_LWT_OUT, 0}, {-EINVAL, 0} }, + {"lwt_xmit", {0, BPF_PROG_TYPE_LWT_XMIT, 0}, {-EINVAL, 0} }, + {"lwt_seg6local", {0, BPF_PROG_TYPE_LWT_SEG6LOCAL, 0}, {-EINVAL, 0} }, + { + "cgroup_skb/ingress", + {0, BPF_PROG_TYPE_CGROUP_SKB, 0}, + {0, BPF_CGROUP_INET_INGRESS}, + }, + { + "cgroup_skb/egress", + {0, BPF_PROG_TYPE_CGROUP_SKB, 0}, + {0, BPF_CGROUP_INET_EGRESS}, + }, + {"cgroup/skb", {0, BPF_PROG_TYPE_CGROUP_SKB, 0}, {-EINVAL, 0} }, + { + "cgroup/sock", + {0, BPF_PROG_TYPE_CGROUP_SOCK, 0}, + {0, BPF_CGROUP_INET_SOCK_CREATE}, + }, + { + "cgroup/post_bind4", + {0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND}, + {0, BPF_CGROUP_INET4_POST_BIND}, + }, + { + "cgroup/post_bind6", + {0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND}, + {0, BPF_CGROUP_INET6_POST_BIND}, + }, + { + "cgroup/dev", + {0, BPF_PROG_TYPE_CGROUP_DEVICE, 0}, + {0, BPF_CGROUP_DEVICE}, + }, + {"sockops", {0, BPF_PROG_TYPE_SOCK_OPS, 0}, {0, BPF_CGROUP_SOCK_OPS} }, + { + "sk_skb/stream_parser", + {0, BPF_PROG_TYPE_SK_SKB, 0}, + {0, BPF_SK_SKB_STREAM_PARSER}, + }, + { + "sk_skb/stream_verdict", + {0, BPF_PROG_TYPE_SK_SKB, 0}, + {0, BPF_SK_SKB_STREAM_VERDICT}, + }, + {"sk_skb", {0, BPF_PROG_TYPE_SK_SKB, 0}, {-EINVAL, 0} }, + {"sk_msg", {0, BPF_PROG_TYPE_SK_MSG, 0}, {0, BPF_SK_MSG_VERDICT} }, + {"lirc_mode2", {0, BPF_PROG_TYPE_LIRC_MODE2, 0}, {0, BPF_LIRC_MODE2} }, + { + "flow_dissector", + {0, BPF_PROG_TYPE_FLOW_DISSECTOR, 0}, + {0, BPF_FLOW_DISSECTOR}, + }, + { + "cgroup/bind4", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND}, + {0, BPF_CGROUP_INET4_BIND}, + }, + { + "cgroup/bind6", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND}, + {0, BPF_CGROUP_INET6_BIND}, + }, + { + "cgroup/connect4", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT}, + {0, BPF_CGROUP_INET4_CONNECT}, + }, + { + "cgroup/connect6", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT}, + {0, BPF_CGROUP_INET6_CONNECT}, + }, + { + "cgroup/sendmsg4", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG}, + {0, BPF_CGROUP_UDP4_SENDMSG}, + }, + { + "cgroup/sendmsg6", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG}, + {0, BPF_CGROUP_UDP6_SENDMSG}, + }, +}; + +static int test_prog_type_by_name(const struct sec_name_test *test) +{ + enum bpf_attach_type expected_attach_type; + enum bpf_prog_type prog_type; + int rc; + + rc = libbpf_prog_type_by_name(test->sec_name, &prog_type, + &expected_attach_type); + + if (rc != test->expected_load.rc) { + warnx("prog: unexpected rc=%d for %s", rc, test->sec_name); + return -1; + } + + if (rc) + return 0; + + if (prog_type != test->expected_load.prog_type) { + warnx("prog: unexpected prog_type=%d for %s", prog_type, + test->sec_name); + return -1; + } + + if (expected_attach_type != test->expected_load.expected_attach_type) { + warnx("prog: unexpected expected_attach_type=%d for %s", + expected_attach_type, test->sec_name); + return -1; + } + + return 0; +} + +static int test_attach_type_by_name(const struct sec_name_test *test) +{ + enum bpf_attach_type attach_type; + int rc; + + rc = libbpf_attach_type_by_name(test->sec_name, &attach_type); + + if (rc != test->expected_attach.rc) { + warnx("attach: unexpected rc=%d for %s", rc, test->sec_name); + return -1; + } + + if (rc) + return 0; + + if (attach_type != test->expected_attach.attach_type) { + warnx("attach: unexpected attach_type=%d for %s", attach_type, + test->sec_name); + return -1; + } + + return 0; +} + +static int run_test_case(const struct sec_name_test *test) +{ + if (test_prog_type_by_name(test)) + return -1; + if (test_attach_type_by_name(test)) + return -1; + return 0; +} + +static int run_tests(void) +{ + int passes = 0; + int fails = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(tests); ++i) { + if (run_test_case(&tests[i])) + ++fails; + else + ++passes; + } + printf("Summary: %d PASSED, %d FAILED\n", passes, fails); + return fails ? -1 : 0; +} + +int main(int argc, char **argv) +{ + return run_tests(); +} diff --git a/tools/testing/selftests/bpf/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/test_sk_lookup_kern.c new file mode 100644 index 000000000000..b745bdc08c2b --- /dev/null +++ b/tools/testing/selftests/bpf/test_sk_lookup_kern.c @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (c) 2018 Covalent IO, Inc. http://covalent.io + +#include <stddef.h> +#include <stdbool.h> +#include <string.h> +#include <linux/bpf.h> +#include <linux/if_ether.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/pkt_cls.h> +#include <linux/tcp.h> +#include <sys/socket.h> +#include "bpf_helpers.h" +#include "bpf_endian.h" + +int _version SEC("version") = 1; +char _license[] SEC("license") = "GPL"; + +/* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */ +static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off, + void *data_end, __u16 eth_proto, + bool *ipv4) +{ + struct bpf_sock_tuple *result; + __u8 proto = 0; + __u64 ihl_len; + + if (eth_proto == bpf_htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)(data + nh_off); + + if (iph + 1 > data_end) + return NULL; + ihl_len = iph->ihl * 4; + proto = iph->protocol; + *ipv4 = true; + result = (struct bpf_sock_tuple *)&iph->saddr; + } else if (eth_proto == bpf_htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(data + nh_off); + + if (ip6h + 1 > data_end) + return NULL; + ihl_len = sizeof(*ip6h); + proto = ip6h->nexthdr; + *ipv4 = true; + result = (struct bpf_sock_tuple *)&ip6h->saddr; + } + + if (data + nh_off + ihl_len > data_end || proto != IPPROTO_TCP) + return NULL; + + return result; +} + +SEC("sk_lookup_success") +int bpf_sk_lookup_test0(struct __sk_buff *skb) +{ + void *data_end = (void *)(long)skb->data_end; + void *data = (void *)(long)skb->data; + struct ethhdr *eth = (struct ethhdr *)(data); + struct bpf_sock_tuple *tuple; + struct bpf_sock *sk; + size_t tuple_len; + bool ipv4; + + if (eth + 1 > data_end) + return TC_ACT_SHOT; + + tuple = get_tuple(data, sizeof(*eth), data_end, eth->h_proto, &ipv4); + if (!tuple || tuple + sizeof *tuple > data_end) + return TC_ACT_SHOT; + + tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6); + sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, 0, 0); + if (sk) + bpf_sk_release(sk); + return sk ? TC_ACT_OK : TC_ACT_UNSPEC; +} + +SEC("sk_lookup_success_simple") +int bpf_sk_lookup_test1(struct __sk_buff *skb) +{ + struct bpf_sock_tuple tuple = {}; + struct bpf_sock *sk; + + sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); + if (sk) + bpf_sk_release(sk); + return 0; +} + +SEC("fail_use_after_free") +int bpf_sk_lookup_uaf(struct __sk_buff *skb) +{ + struct bpf_sock_tuple tuple = {}; + struct bpf_sock *sk; + __u32 family = 0; + + sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); + if (sk) { + bpf_sk_release(sk); + family = sk->family; + } + return family; +} + +SEC("fail_modify_sk_pointer") +int bpf_sk_lookup_modptr(struct __sk_buff *skb) +{ + struct bpf_sock_tuple tuple = {}; + struct bpf_sock *sk; + __u32 family; + + sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); + if (sk) { + sk += 1; + bpf_sk_release(sk); + } + return 0; +} + +SEC("fail_modify_sk_or_null_pointer") +int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb) +{ + struct bpf_sock_tuple tuple = {}; + struct bpf_sock *sk; + __u32 family; + + sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); + sk += 1; + if (sk) + bpf_sk_release(sk); + return 0; +} + +SEC("fail_no_release") +int bpf_sk_lookup_test2(struct __sk_buff *skb) +{ + struct bpf_sock_tuple tuple = {}; + + bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); + return 0; +} + +SEC("fail_release_twice") +int bpf_sk_lookup_test3(struct __sk_buff *skb) +{ + struct bpf_sock_tuple tuple = {}; + struct bpf_sock *sk; + + sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); + bpf_sk_release(sk); + bpf_sk_release(sk); + return 0; +} + +SEC("fail_release_unchecked") +int bpf_sk_lookup_test4(struct __sk_buff *skb) +{ + struct bpf_sock_tuple tuple = {}; + struct bpf_sock *sk; + + sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); + bpf_sk_release(sk); + return 0; +} + +void lookup_no_release(struct __sk_buff *skb) +{ + struct bpf_sock_tuple tuple = {}; + bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); +} + +SEC("fail_no_release_subcall") +int bpf_sk_lookup_test5(struct __sk_buff *skb) +{ + lookup_no_release(skb); + return 0; +} diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c index 68e108e4687a..b6c2c605d8c0 100644 --- a/tools/testing/selftests/bpf/test_socket_cookie.c +++ b/tools/testing/selftests/bpf/test_socket_cookie.c @@ -158,11 +158,7 @@ static int run_test(int cgfd) bpf_object__for_each_program(prog, pobj) { prog_name = bpf_program__title(prog, /*needs_copy*/ false); - if (strcmp(prog_name, "cgroup/connect6") == 0) { - attach_type = BPF_CGROUP_INET6_CONNECT; - } else if (strcmp(prog_name, "sockops") == 0) { - attach_type = BPF_CGROUP_SOCK_OPS; - } else { + if (libbpf_attach_type_by_name(prog_name, &attach_type)) { log_err("Unexpected prog: %s", prog_name); goto err; } diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 0c7d9e556b47..ac7de38e5c63 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -469,8 +469,6 @@ static int sendmsg_test(struct sockmap_options *opt) fprintf(stderr, "msg_loop_rx: iov_count %i iov_buf %i cnt %i err %i\n", iov_count, iov_buf, cnt, err); - shutdown(p2, SHUT_RDWR); - shutdown(p1, SHUT_RDWR); if (s.end.tv_sec - s.start.tv_sec) { sent_Bps = sentBps(s); recvd_Bps = recvdBps(s); @@ -500,7 +498,6 @@ static int sendmsg_test(struct sockmap_options *opt) fprintf(stderr, "msg_loop_tx: iov_count %i iov_buf %i cnt %i err %i\n", iov_count, iov_buf, cnt, err); - shutdown(c1, SHUT_RDWR); if (s.end.tv_sec - s.start.tv_sec) { sent_Bps = sentBps(s); recvd_Bps = recvdBps(s); @@ -1348,9 +1345,9 @@ static int populate_progs(char *bpf_file) return 0; } -static int __test_suite(char *bpf_file) +static int __test_suite(int cg_fd, char *bpf_file) { - int cg_fd, err; + int err, cleanup = cg_fd; err = populate_progs(bpf_file); if (err < 0) { @@ -1358,22 +1355,24 @@ static int __test_suite(char *bpf_file) return err; } - if (setup_cgroup_environment()) { - fprintf(stderr, "ERROR: cgroup env failed\n"); - return -EINVAL; - } - - cg_fd = create_and_get_cgroup(CG_PATH); if (cg_fd < 0) { - fprintf(stderr, - "ERROR: (%i) open cg path failed: %s\n", - cg_fd, optarg); - return cg_fd; - } + if (setup_cgroup_environment()) { + fprintf(stderr, "ERROR: cgroup env failed\n"); + return -EINVAL; + } + + cg_fd = create_and_get_cgroup(CG_PATH); + if (cg_fd < 0) { + fprintf(stderr, + "ERROR: (%i) open cg path failed: %s\n", + cg_fd, optarg); + return cg_fd; + } - if (join_cgroup(CG_PATH)) { - fprintf(stderr, "ERROR: failed to join cgroup\n"); - return -EINVAL; + if (join_cgroup(CG_PATH)) { + fprintf(stderr, "ERROR: failed to join cgroup\n"); + return -EINVAL; + } } /* Tests basic commands and APIs with range of iov values */ @@ -1394,20 +1393,24 @@ static int __test_suite(char *bpf_file) out: printf("Summary: %i PASSED %i FAILED\n", passed, failed); - cleanup_cgroup_environment(); - close(cg_fd); + if (cleanup < 0) { + cleanup_cgroup_environment(); + close(cg_fd); + } return err; } -static int test_suite(void) +static int test_suite(int cg_fd) { int err; - err = __test_suite(BPF_SOCKMAP_FILENAME); + err = __test_suite(cg_fd, BPF_SOCKMAP_FILENAME); if (err) goto out; - err = __test_suite(BPF_SOCKHASH_FILENAME); + err = __test_suite(cg_fd, BPF_SOCKHASH_FILENAME); out: + if (cg_fd > -1) + close(cg_fd); return err; } @@ -1420,7 +1423,7 @@ int main(int argc, char **argv) int test = PING_PONG; if (argc < 2) - return test_suite(); + return test_suite(-1); while ((opt = getopt_long(argc, argv, ":dhvc:r:i:l:t:", long_options, &longindex)) != -1) { @@ -1486,6 +1489,9 @@ int main(int argc, char **argv) } } + if (argc <= 3 && cg_fd) + return test_suite(cg_fd); + if (!cg_fd) { fprintf(stderr, "%s requires cgroup option: --cgroup <path>\n", argv[0]); diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/test_tcpbpf_kern.c index 4b7fd540cea9..74f73b33a7b0 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/test_tcpbpf_kern.c @@ -5,6 +5,7 @@ #include <linux/if_ether.h> #include <linux/if_packet.h> #include <linux/ip.h> +#include <linux/ipv6.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/tcp.h> @@ -17,6 +18,13 @@ struct bpf_map_def SEC("maps") global_map = { .type = BPF_MAP_TYPE_ARRAY, .key_size = sizeof(__u32), .value_size = sizeof(struct tcpbpf_globals), + .max_entries = 4, +}; + +struct bpf_map_def SEC("maps") sockopt_results = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(int), .max_entries = 2, }; @@ -45,11 +53,14 @@ int _version SEC("version") = 1; SEC("sockops") int bpf_testcb(struct bpf_sock_ops *skops) { - int rv = -1; - int bad_call_rv = 0; + char header[sizeof(struct ipv6hdr) + sizeof(struct tcphdr)]; + struct tcphdr *thdr; int good_call_rv = 0; - int op; + int bad_call_rv = 0; + int save_syn = 1; + int rv = -1; int v = 0; + int op; op = (int) skops->op; @@ -82,6 +93,21 @@ int bpf_testcb(struct bpf_sock_ops *skops) v = 0xff; rv = bpf_setsockopt(skops, SOL_IPV6, IPV6_TCLASS, &v, sizeof(v)); + if (skops->family == AF_INET6) { + v = bpf_getsockopt(skops, IPPROTO_TCP, TCP_SAVED_SYN, + header, (sizeof(struct ipv6hdr) + + sizeof(struct tcphdr))); + if (!v) { + int offset = sizeof(struct ipv6hdr); + + thdr = (struct tcphdr *)(header + offset); + v = thdr->syn; + __u32 key = 1; + + bpf_map_update_elem(&sockopt_results, &key, &v, + BPF_ANY); + } + } break; case BPF_SOCK_OPS_RTO_CB: break; @@ -111,6 +137,12 @@ int bpf_testcb(struct bpf_sock_ops *skops) break; case BPF_SOCK_OPS_TCP_LISTEN_CB: bpf_sock_ops_cb_flags_set(skops, BPF_SOCK_OPS_STATE_CB_FLAG); + v = bpf_setsockopt(skops, IPPROTO_TCP, TCP_SAVE_SYN, + &save_syn, sizeof(save_syn)); + /* Update global map w/ result of setsock opt */ + __u32 key = 0; + + bpf_map_update_elem(&sockopt_results, &key, &v, BPF_ANY); break; default: rv = -1; diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c index a275c2971376..e6eebda7d112 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf_user.c +++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c @@ -54,6 +54,26 @@ err: return -1; } +int verify_sockopt_result(int sock_map_fd) +{ + __u32 key = 0; + int res; + int rv; + + /* check setsockopt for SAVE_SYN */ + rv = bpf_map_lookup_elem(sock_map_fd, &key, &res); + EXPECT_EQ(0, rv, "d"); + EXPECT_EQ(0, res, "d"); + key = 1; + /* check getsockopt for SAVED_SYN */ + rv = bpf_map_lookup_elem(sock_map_fd, &key, &res); + EXPECT_EQ(0, rv, "d"); + EXPECT_EQ(1, res, "d"); + return 0; +err: + return -1; +} + static int bpf_find_map(const char *test, struct bpf_object *obj, const char *name) { @@ -70,11 +90,11 @@ static int bpf_find_map(const char *test, struct bpf_object *obj, int main(int argc, char **argv) { const char *file = "test_tcpbpf_kern.o"; + int prog_fd, map_fd, sock_map_fd; struct tcpbpf_globals g = {0}; const char *cg_path = "/foo"; int error = EXIT_FAILURE; struct bpf_object *obj; - int prog_fd, map_fd; int cg_fd = -1; __u32 key = 0; int rv; @@ -110,6 +130,10 @@ int main(int argc, char **argv) if (map_fd < 0) goto err; + sock_map_fd = bpf_find_map(__func__, obj, "sockopt_results"); + if (sock_map_fd < 0) + goto err; + rv = bpf_map_lookup_elem(map_fd, &key, &g); if (rv != 0) { printf("FAILED: bpf_map_lookup_elem returns %d\n", rv); @@ -121,6 +145,11 @@ int main(int argc, char **argv) goto err; } + if (verify_sockopt_result(sock_map_fd)) { + printf("FAILED: Wrong sockopt stats\n"); + goto err; + } + printf("PASSED!\n"); error = 0; err: diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 67c412d19c09..bc9cd8537467 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -3,6 +3,7 @@ * * Copyright (c) 2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2017 Facebook + * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -68,6 +69,7 @@ struct bpf_test { int fixup_prog2[MAX_FIXUPS]; int fixup_map_in_map[MAX_FIXUPS]; int fixup_cgroup_storage[MAX_FIXUPS]; + int fixup_percpu_cgroup_storage[MAX_FIXUPS]; const char *errstr; const char *errstr_unpriv; uint32_t retval; @@ -177,6 +179,24 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self) self->retval = (uint32_t)res; } +/* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */ +#define BPF_SK_LOOKUP \ + /* struct bpf_sock_tuple tuple = {} */ \ + BPF_MOV64_IMM(BPF_REG_2, 0), \ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \ + /* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */ \ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \ + BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \ + BPF_MOV64_IMM(BPF_REG_4, 0), \ + BPF_MOV64_IMM(BPF_REG_5, 0), \ + BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp) + static struct bpf_test tests[] = { { "add+sub+mul", @@ -2707,6 +2727,137 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { + "unpriv: spill/fill of different pointers stx - ctx and sock", + .insns = { + BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), + /* struct bpf_sock *sock = bpf_sock_lookup(...); */ + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + /* u64 foo; */ + /* void *target = &foo; */ + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + /* if (skb == NULL) *target = sock; */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), + /* else *target = skb; */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + /* struct __sk_buff *skb = *target; */ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), + /* skb->mark = 42; */ + BPF_MOV64_IMM(BPF_REG_3, 42), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct __sk_buff, mark)), + /* if (sk) bpf_sk_release(sk) */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "type=ctx expected=sock", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "unpriv: spill/fill of different pointers stx - leak sock", + .insns = { + BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), + /* struct bpf_sock *sock = bpf_sock_lookup(...); */ + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + /* u64 foo; */ + /* void *target = &foo; */ + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + /* if (skb == NULL) *target = sock; */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), + /* else *target = skb; */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + /* struct __sk_buff *skb = *target; */ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), + /* skb->mark = 42; */ + BPF_MOV64_IMM(BPF_REG_3, 42), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .result = REJECT, + //.errstr = "same insn cannot be used with different pointers", + .errstr = "Unreleased reference", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "unpriv: spill/fill of different pointers stx - sock and ctx (read)", + .insns = { + BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), + /* struct bpf_sock *sock = bpf_sock_lookup(...); */ + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + /* u64 foo; */ + /* void *target = &foo; */ + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + /* if (skb) *target = skb */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + /* else *target = sock */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), + /* struct bpf_sock *sk = *target; */ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), + /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct bpf_sock, mark)), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "same insn cannot be used with different pointers", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "unpriv: spill/fill of different pointers stx - sock and ctx (write)", + .insns = { + BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), + /* struct bpf_sock *sock = bpf_sock_lookup(...); */ + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + /* u64 foo; */ + /* void *target = &foo; */ + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + /* if (skb) *target = skb */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + /* else *target = sock */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), + /* struct bpf_sock *sk = *target; */ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), + /* if (sk) sk->mark = 42; bpf_sk_release(sk); */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 42), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct bpf_sock, mark)), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + //.errstr = "same insn cannot be used with different pointers", + .errstr = "cannot write into socket", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { "unpriv: spill/fill of different pointers ldx", .insns = { BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), @@ -3275,7 +3426,7 @@ static struct bpf_test tests[] = { BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0), BPF_EXIT_INSN(), }, - .errstr = "BPF_ST stores into R1 context is not allowed", + .errstr = "BPF_ST stores into R1 inv is not allowed", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, @@ -3287,7 +3438,7 @@ static struct bpf_test tests[] = { BPF_REG_0, offsetof(struct __sk_buff, mark), 0), BPF_EXIT_INSN(), }, - .errstr = "BPF_XADD stores into R1 context is not allowed", + .errstr = "BPF_XADD stores into R1 inv is not allowed", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, @@ -3637,7 +3788,7 @@ static struct bpf_test tests[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END", + .errstr = "R3 pointer arithmetic on pkt_end", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, @@ -4676,7 +4827,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_CGROUP_SKB, }, { - "invalid per-cgroup storage access 3", + "invalid cgroup storage access 3", .insns = { BPF_MOV64_IMM(BPF_REG_2, 0), BPF_LD_MAP_FD(BPF_REG_1, 0), @@ -4744,6 +4895,121 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_CGROUP_SKB, }, { + "valid per-cpu cgroup storage access", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_percpu_cgroup_storage = { 1 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { + "invalid per-cpu cgroup storage access 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 1 }, + .result = REJECT, + .errstr = "cannot pass map_type 1 into func bpf_get_local_storage", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { + "invalid per-cpu cgroup storage access 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "fd 1 is not pointing to valid bpf_map", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { + "invalid per-cpu cgroup storage access 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_percpu_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=64 off=256 size=4", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { + "invalid per-cpu cgroup storage access 4", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_EXIT_INSN(), + }, + .fixup_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=64 off=-2 size=4", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { + "invalid per-cpu cgroup storage access 5", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 7), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_percpu_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "get_local_storage() doesn't support non-zero flags", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { + "invalid per-cpu cgroup storage access 6", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_percpu_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "get_local_storage() doesn't support non-zero flags", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { "multiple registers share map_lookup_elem result", .insns = { BPF_MOV64_IMM(BPF_REG_1, 10), @@ -4780,7 +5046,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 4 }, - .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL", + .errstr = "R4 pointer arithmetic on map_value_or_null", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS }, @@ -4801,7 +5067,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 4 }, - .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL", + .errstr = "R4 pointer arithmetic on map_value_or_null", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS }, @@ -4822,7 +5088,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 4 }, - .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL", + .errstr = "R4 pointer arithmetic on map_value_or_null", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS }, @@ -5150,7 +5416,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R2 leaks addr into mem", .result_unpriv = REJECT, .result = REJECT, - .errstr = "BPF_XADD stores into R1 context is not allowed", + .errstr = "BPF_XADD stores into R1 inv is not allowed", }, { "leak pointer into ctx 2", @@ -5165,7 +5431,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R10 leaks addr into mem", .result_unpriv = REJECT, .result = REJECT, - .errstr = "BPF_XADD stores into R1 context is not allowed", + .errstr = "BPF_XADD stores into R1 inv is not allowed", }, { "leak pointer into ctx 3", @@ -7137,7 +7403,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map_in_map = { 3 }, - .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited", + .errstr = "R1 pointer arithmetic on map_ptr prohibited", .result = REJECT, }, { @@ -8811,7 +9077,7 @@ static struct bpf_test tests[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END", + .errstr = "R3 pointer arithmetic on pkt_end", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, }, @@ -8830,7 +9096,7 @@ static struct bpf_test tests[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END", + .errstr = "R3 pointer arithmetic on pkt_end", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, }, @@ -12114,7 +12380,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "BPF_XADD stores into R2 packet", + .errstr = "BPF_XADD stores into R2 ctx", .prog_type = BPF_PROG_TYPE_XDP, }, { @@ -12442,6 +12708,214 @@ static struct bpf_test tests[] = { .result = ACCEPT, }, { + "reference tracking: leak potential reference", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */ + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, + }, + { + "reference tracking: leak potential reference on stack", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, + }, + { + "reference tracking: leak potential reference on stack 2", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, + }, + { + "reference tracking: zero potential reference", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */ + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, + }, + { + "reference tracking: copy and zero potential references", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */ + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, + }, + { + "reference tracking: release reference without check", + .insns = { + BPF_SK_LOOKUP, + /* reference in r0 may be NULL */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "type=sock_or_null expected=sock", + .result = REJECT, + }, + { + "reference tracking: release reference", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + }, + { + "reference tracking: release reference 2", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + }, + { + "reference tracking: release reference twice", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "type=inv expected=sock", + .result = REJECT, + }, + { + "reference tracking: release reference twice inside branch", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */ + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "type=inv expected=sock", + .result = REJECT, + }, + { + "reference tracking: alloc, check, free in one subbranch", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16), + /* if (offsetof(skb, mark) > data_len) exit; */ + BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2, + offsetof(struct __sk_buff, mark)), + BPF_SK_LOOKUP, + BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */ + /* Leak reference in R0 */ + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, + }, + { + "reference tracking: alloc, check, free in both subbranches", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16), + /* if (offsetof(skb, mark) > data_len) exit; */ + BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2, + offsetof(struct __sk_buff, mark)), + BPF_SK_LOOKUP, + BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + }, + { + "reference tracking in call: free reference in subprog", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + }, + { "pass modified ctx pointer to helper, 1", .insns = { BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612), @@ -12511,6 +12985,407 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = ACCEPT, }, + { + "reference tracking in call: free reference in subprog and outside", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "type=inv expected=sock", + .result = REJECT, + }, + { + "reference tracking in call: alloc & leak reference in subprog", + .insns = { + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_4), + BPF_SK_LOOKUP, + /* spill unchecked sk_ptr into stack of caller */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, + }, + { + "reference tracking in call: alloc in subprog, release outside", + .insns = { + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_SK_LOOKUP, + BPF_EXIT_INSN(), /* return sk */ + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .retval = POINTER_VALUE, + .result = ACCEPT, + }, + { + "reference tracking in call: sk_ptr leak into caller stack", + .insns = { + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), + /* spill unchecked sk_ptr into stack of caller */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 2 */ + BPF_SK_LOOKUP, + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, + }, + { + "reference tracking in call: sk_ptr spill into caller stack", + .insns = { + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), + /* spill unchecked sk_ptr into stack of caller */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + /* now the sk_ptr is verified, free the reference */ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + + /* subprog 2 */ + BPF_SK_LOOKUP, + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + }, + { + "reference tracking: allow LD_ABS", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_LD_ABS(BPF_B, 0), + BPF_LD_ABS(BPF_H, 0), + BPF_LD_ABS(BPF_W, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + }, + { + "reference tracking: forbid LD_ABS while holding reference", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_SK_LOOKUP, + BPF_LD_ABS(BPF_B, 0), + BPF_LD_ABS(BPF_H, 0), + BPF_LD_ABS(BPF_W, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references", + .result = REJECT, + }, + { + "reference tracking: allow LD_IND", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_MOV64_IMM(BPF_REG_7, 1), + BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 1, + }, + { + "reference tracking: forbid LD_IND while holding reference", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_7, 1), + BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references", + .result = REJECT, + }, + { + "reference tracking: check reference or tail call", + .insns = { + BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), + BPF_SK_LOOKUP, + /* if (sk) bpf_sk_release() */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7), + /* bpf_tail_call() */ + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 17 }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + }, + { + "reference tracking: release reference then tail call", + .insns = { + BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), + BPF_SK_LOOKUP, + /* if (sk) bpf_sk_release() */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + /* bpf_tail_call() */ + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 18 }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + }, + { + "reference tracking: leak possible reference over tail call", + .insns = { + BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), + /* Look up socket and store in REG_6 */ + BPF_SK_LOOKUP, + /* bpf_tail_call() */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 0), + /* if (sk) bpf_sk_release() */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 16 }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "tail_call would lead to reference leak", + .result = REJECT, + }, + { + "reference tracking: leak checked reference over tail call", + .insns = { + BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), + /* Look up socket and store in REG_6 */ + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + /* if (!sk) goto end */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + /* bpf_tail_call() */ + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 17 }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "tail_call would lead to reference leak", + .result = REJECT, + }, + { + "reference tracking: mangle and release sock_or_null", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "R1 pointer arithmetic on sock_or_null prohibited", + .result = REJECT, + }, + { + "reference tracking: mangle and release sock", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "R1 pointer arithmetic on sock prohibited", + .result = REJECT, + }, + { + "reference tracking: access member", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + }, + { + "reference tracking: write to member", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_LD_IMM64(BPF_REG_2, 42), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2, + offsetof(struct bpf_sock, mark)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_LD_IMM64(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "cannot write into socket", + .result = REJECT, + }, + { + "reference tracking: invalid 64-bit access of member", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "invalid bpf_sock access off=0 size=8", + .result = REJECT, + }, + { + "reference tracking: access after release", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "!read_ok", + .result = REJECT, + }, + { + "reference tracking: direct access for lookup", + .insns = { + /* Check that the packet is at least 64B long */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9), + /* sk = sk_lookup_tcp(ctx, skb->data, ...) */ + BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + }, }; static int probe_filter_length(const struct bpf_insn *fp) @@ -12536,18 +13411,18 @@ static int create_map(uint32_t type, uint32_t size_key, return fd; } -static int create_prog_dummy1(void) +static int create_prog_dummy1(enum bpf_map_type prog_type) { struct bpf_insn prog[] = { BPF_MOV64_IMM(BPF_REG_0, 42), BPF_EXIT_INSN(), }; - return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog, + return bpf_load_program(prog_type, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0); } -static int create_prog_dummy2(int mfd, int idx) +static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx) { struct bpf_insn prog[] = { BPF_MOV64_IMM(BPF_REG_3, idx), @@ -12558,11 +13433,12 @@ static int create_prog_dummy2(int mfd, int idx) BPF_EXIT_INSN(), }; - return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog, + return bpf_load_program(prog_type, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0); } -static int create_prog_array(uint32_t max_elem, int p1key) +static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem, + int p1key) { int p2key = 1; int mfd, p1fd, p2fd; @@ -12574,8 +13450,8 @@ static int create_prog_array(uint32_t max_elem, int p1key) return -1; } - p1fd = create_prog_dummy1(); - p2fd = create_prog_dummy2(mfd, p2key); + p1fd = create_prog_dummy1(prog_type); + p2fd = create_prog_dummy2(prog_type, mfd, p2key); if (p1fd < 0 || p2fd < 0) goto out; if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0) @@ -12615,23 +13491,25 @@ static int create_map_in_map(void) return outer_map_fd; } -static int create_cgroup_storage(void) +static int create_cgroup_storage(bool percpu) { + enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE : + BPF_MAP_TYPE_CGROUP_STORAGE; int fd; - fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, - sizeof(struct bpf_cgroup_storage_key), + fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key), TEST_DATA_LEN, 0, 0); if (fd < 0) - printf("Failed to create array '%s'!\n", strerror(errno)); + printf("Failed to create cgroup storage '%s'!\n", + strerror(errno)); return fd; } static char bpf_vlog[UINT_MAX >> 8]; -static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog, - int *map_fds) +static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type, + struct bpf_insn *prog, int *map_fds) { int *fixup_map1 = test->fixup_map1; int *fixup_map2 = test->fixup_map2; @@ -12641,6 +13519,7 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog, int *fixup_prog2 = test->fixup_prog2; int *fixup_map_in_map = test->fixup_map_in_map; int *fixup_cgroup_storage = test->fixup_cgroup_storage; + int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage; if (test->fill_helper) test->fill_helper(test); @@ -12686,7 +13565,7 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog, } if (*fixup_prog1) { - map_fds[4] = create_prog_array(4, 0); + map_fds[4] = create_prog_array(prog_type, 4, 0); do { prog[*fixup_prog1].imm = map_fds[4]; fixup_prog1++; @@ -12694,7 +13573,7 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog, } if (*fixup_prog2) { - map_fds[5] = create_prog_array(8, 7); + map_fds[5] = create_prog_array(prog_type, 8, 7); do { prog[*fixup_prog2].imm = map_fds[5]; fixup_prog2++; @@ -12710,12 +13589,20 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog, } if (*fixup_cgroup_storage) { - map_fds[7] = create_cgroup_storage(); + map_fds[7] = create_cgroup_storage(false); do { prog[*fixup_cgroup_storage].imm = map_fds[7]; fixup_cgroup_storage++; } while (*fixup_cgroup_storage); } + + if (*fixup_percpu_cgroup_storage) { + map_fds[8] = create_cgroup_storage(true); + do { + prog[*fixup_percpu_cgroup_storage].imm = map_fds[8]; + fixup_percpu_cgroup_storage++; + } while (*fixup_percpu_cgroup_storage); + } } static void do_test_single(struct bpf_test *test, bool unpriv, @@ -12732,11 +13619,13 @@ static void do_test_single(struct bpf_test *test, bool unpriv, for (i = 0; i < MAX_NR_MAPS; i++) map_fds[i] = -1; - do_test_fixup(test, prog, map_fds); + if (!prog_type) + prog_type = BPF_PROG_TYPE_SOCKET_FILTER; + do_test_fixup(test, prog_type, prog, map_fds); prog_len = probe_filter_length(prog); - fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, - prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT, + fd_prog = bpf_verify_program(prog_type, prog, prog_len, + test->flags & F_LOAD_WITH_STRICT_ALIGNMENT, "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1); expected_ret = unpriv && test->result_unpriv != UNDEF ? diff --git a/tools/testing/selftests/bpf/with_addr.sh b/tools/testing/selftests/bpf/with_addr.sh new file mode 100755 index 000000000000..ffcd3953f94c --- /dev/null +++ b/tools/testing/selftests/bpf/with_addr.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# add private ipv4 and ipv6 addresses to loopback + +readonly V6_INNER='100::a/128' +readonly V4_INNER='192.168.0.1/32' + +if getopts ":s" opt; then + readonly SIT_DEV_NAME='sixtofourtest0' + readonly V6_SIT='2::/64' + readonly V4_SIT='172.17.0.1/32' + shift +fi + +fail() { + echo "error: $*" 1>&2 + exit 1 +} + +setup() { + ip -6 addr add "${V6_INNER}" dev lo || fail 'failed to setup v6 address' + ip -4 addr add "${V4_INNER}" dev lo || fail 'failed to setup v4 address' + + if [[ -n "${V6_SIT}" ]]; then + ip link add "${SIT_DEV_NAME}" type sit remote any local any \ + || fail 'failed to add sit' + ip link set dev "${SIT_DEV_NAME}" up \ + || fail 'failed to bring sit device up' + ip -6 addr add "${V6_SIT}" dev "${SIT_DEV_NAME}" \ + || fail 'failed to setup v6 SIT address' + ip -4 addr add "${V4_SIT}" dev "${SIT_DEV_NAME}" \ + || fail 'failed to setup v4 SIT address' + fi + + sleep 2 # avoid race causing bind to fail +} + +cleanup() { + if [[ -n "${V6_SIT}" ]]; then + ip -4 addr del "${V4_SIT}" dev "${SIT_DEV_NAME}" + ip -6 addr del "${V6_SIT}" dev "${SIT_DEV_NAME}" + ip link del "${SIT_DEV_NAME}" + fi + + ip -4 addr del "${V4_INNER}" dev lo + ip -6 addr del "${V6_INNER}" dev lo +} + +trap cleanup EXIT + +setup +"$@" +exit "$?" diff --git a/tools/testing/selftests/bpf/with_tunnels.sh b/tools/testing/selftests/bpf/with_tunnels.sh new file mode 100755 index 000000000000..e24949ed3a20 --- /dev/null +++ b/tools/testing/selftests/bpf/with_tunnels.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# setup tunnels for flow dissection test + +readonly SUFFIX="test_$(mktemp -u XXXX)" +CONFIG="remote 127.0.0.2 local 127.0.0.1 dev lo" + +setup() { + ip link add "ipip_${SUFFIX}" type ipip ${CONFIG} + ip link add "gre_${SUFFIX}" type gre ${CONFIG} + ip link add "sit_${SUFFIX}" type sit ${CONFIG} + + echo "tunnels before test:" + ip tunnel show + + ip link set "ipip_${SUFFIX}" up + ip link set "gre_${SUFFIX}" up + ip link set "sit_${SUFFIX}" up +} + + +cleanup() { + ip tunnel del "ipip_${SUFFIX}" + ip tunnel del "gre_${SUFFIX}" + ip tunnel del "sit_${SUFFIX}" + + echo "tunnels after test:" + ip tunnel show +} + +trap cleanup EXIT + +setup +"$@" +exit "$?" diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh new file mode 100755 index 000000000000..0150bb2741eb --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh @@ -0,0 +1,347 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# A test for switch behavior under MC overload. An issue in Spectrum chips +# causes throughput of UC traffic to drop severely when a switch is under heavy +# MC load. This issue can be overcome by putting the switch to MC-aware mode. +# This test verifies that UC performance stays intact even as the switch is +# under MC flood, and therefore that the MC-aware mode is enabled and correctly +# configured. +# +# Because mlxsw throttles CPU port, the traffic can't actually reach userspace +# at full speed. That makes it impossible to use iperf3 to simply measure the +# throughput, because many packets (that reach $h3) don't get to the kernel at +# all even in UDP mode (the situation is even worse in TCP mode, where one can't +# hope to see more than a couple Mbps). +# +# So instead we send traffic with mausezahn and use RX ethtool counters at $h3. +# Multicast traffic is untagged, unicast traffic is tagged with PCP 1. Therefore +# each gets a different priority and we can use per-prio ethtool counters to +# measure the throughput. In order to avoid prioritizing unicast traffic, prio +# qdisc is installed on $swp3 and maps all priorities to the same band #7 (and +# thus TC 0). +# +# Mausezahn can't actually saturate the links unless it's using large frames. +# Thus we set MTU to 10K on all involved interfaces. Then both unicast and +# multicast traffic uses 8K frames. +# +# +-----------------------+ +----------------------------------+ +# | H1 | | H2 | +# | | | unicast --> + $h2.111 | +# | | | traffic | 192.0.2.129/28 | +# | multicast | | | e-qos-map 0:1 | +# | traffic | | | | +# | $h1 + <----- | | + $h2 | +# +-----|-----------------+ +--------------|-------------------+ +# | | +# +-----|-------------------------------------------------|-------------------+ +# | + $swp1 + $swp2 | +# | | >1Gbps | >1Gbps | +# | +---|----------------+ +----------|----------------+ | +# | | + $swp1.1 | | + $swp2.111 | | +# | | BR1 | SW | BR111 | | +# | | + $swp3.1 | | + $swp3.111 | | +# | +---|----------------+ +----------|----------------+ | +# | \_________________________________________________/ | +# | | | +# | + $swp3 | +# | | 1Gbps bottleneck | +# | | prio qdisc: {0..7} -> 7 | +# +------------------------------------|--------------------------------------+ +# | +# +--|-----------------+ +# | + $h3 H3 | +# | | | +# | + $h3.111 | +# | 192.0.2.130/28 | +# +--------------------+ + +ALL_TESTS=" + ping_ipv4 + test_mc_aware +" + +lib_dir=$(dirname $0)/../../../net/forwarding + +NUM_NETIFS=6 +source $lib_dir/lib.sh + +h1_create() +{ + simple_if_init $h1 + mtu_set $h1 10000 +} + +h1_destroy() +{ + mtu_restore $h1 + simple_if_fini $h1 +} + +h2_create() +{ + simple_if_init $h2 + mtu_set $h2 10000 + + vlan_create $h2 111 v$h2 192.0.2.129/28 + ip link set dev $h2.111 type vlan egress-qos-map 0:1 +} + +h2_destroy() +{ + vlan_destroy $h2 111 + + mtu_restore $h2 + simple_if_fini $h2 +} + +h3_create() +{ + simple_if_init $h3 + mtu_set $h3 10000 + + vlan_create $h3 111 v$h3 192.0.2.130/28 +} + +h3_destroy() +{ + vlan_destroy $h3 111 + + mtu_restore $h3 + simple_if_fini $h3 +} + +switch_create() +{ + ip link set dev $swp1 up + mtu_set $swp1 10000 + + ip link set dev $swp2 up + mtu_set $swp2 10000 + + ip link set dev $swp3 up + mtu_set $swp3 10000 + + vlan_create $swp2 111 + vlan_create $swp3 111 + + ethtool -s $swp3 speed 1000 autoneg off + tc qdisc replace dev $swp3 root handle 3: \ + prio bands 8 priomap 7 7 7 7 7 7 7 7 + + ip link add name br1 type bridge vlan_filtering 0 + ip link set dev br1 up + ip link set dev $swp1 master br1 + ip link set dev $swp3 master br1 + + ip link add name br111 type bridge vlan_filtering 0 + ip link set dev br111 up + ip link set dev $swp2.111 master br111 + ip link set dev $swp3.111 master br111 +} + +switch_destroy() +{ + ip link del dev br111 + ip link del dev br1 + + tc qdisc del dev $swp3 root handle 3: + ethtool -s $swp3 autoneg on + + vlan_destroy $swp3 111 + vlan_destroy $swp2 111 + + mtu_restore $swp3 + ip link set dev $swp3 down + + mtu_restore $swp2 + ip link set dev $swp2 down + + mtu_restore $swp1 + ip link set dev $swp1 down +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + swp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + h3mac=$(mac_get $h3) + + vrf_prepare + + h1_create + h2_create + h3_create + switch_create +} + +cleanup() +{ + pre_cleanup + + switch_destroy + h3_destroy + h2_destroy + h1_destroy + + vrf_cleanup +} + +ping_ipv4() +{ + ping_test $h2 192.0.2.130 +} + +humanize() +{ + local speed=$1; shift + + for unit in bps Kbps Mbps Gbps; do + if (($(echo "$speed < 1024" | bc))); then + break + fi + + speed=$(echo "scale=1; $speed / 1024" | bc) + done + + echo "$speed${unit}" +} + +rate() +{ + local t0=$1; shift + local t1=$1; shift + local interval=$1; shift + + echo $((8 * (t1 - t0) / interval)) +} + +check_rate() +{ + local rate=$1; shift + local min=$1; shift + local what=$1; shift + + if ((rate > min)); then + return 0 + fi + + echo "$what $(humanize $ir) < $(humanize $min_ingress)" > /dev/stderr + return 1 +} + +measure_uc_rate() +{ + local what=$1; shift + + local interval=10 + local i + local ret=0 + + # Dips in performance might cause momentary ingress rate to drop below + # 1Gbps. That wouldn't saturate egress and MC would thus get through, + # seemingly winning bandwidth on account of UC. Demand at least 2Gbps + # average ingress rate to somewhat mitigate this. + local min_ingress=2147483648 + + mausezahn $h2.111 -p 8000 -A 192.0.2.129 -B 192.0.2.130 -c 0 \ + -a own -b $h3mac -t udp -q & + sleep 1 + + for i in {5..0}; do + local t0=$(ethtool_stats_get $h3 rx_octets_prio_1) + local u0=$(ethtool_stats_get $swp2 rx_octets_prio_1) + sleep $interval + local t1=$(ethtool_stats_get $h3 rx_octets_prio_1) + local u1=$(ethtool_stats_get $swp2 rx_octets_prio_1) + + local ir=$(rate $u0 $u1 $interval) + local er=$(rate $t0 $t1 $interval) + + if check_rate $ir $min_ingress "$what ingress rate"; then + break + fi + + # Fail the test if we can't get the throughput. + if ((i == 0)); then + ret=1 + fi + done + + # Suppress noise from killing mausezahn. + { kill %% && wait; } 2>/dev/null + + echo $ir $er + exit $ret +} + +test_mc_aware() +{ + RET=0 + + local -a uc_rate + uc_rate=($(measure_uc_rate "UC-only")) + check_err $? "Could not get high enough UC-only ingress rate" + local ucth1=${uc_rate[1]} + + mausezahn $h1 -p 8000 -c 0 -a own -b bc -t udp -q & + + local d0=$(date +%s) + local t0=$(ethtool_stats_get $h3 rx_octets_prio_0) + local u0=$(ethtool_stats_get $swp1 rx_octets_prio_0) + + local -a uc_rate_2 + uc_rate_2=($(measure_uc_rate "UC+MC")) + check_err $? "Could not get high enough UC+MC ingress rate" + local ucth2=${uc_rate_2[1]} + + local d1=$(date +%s) + local t1=$(ethtool_stats_get $h3 rx_octets_prio_0) + local u1=$(ethtool_stats_get $swp1 rx_octets_prio_0) + + local deg=$(bc <<< " + scale=2 + ret = 100 * ($ucth1 - $ucth2) / $ucth1 + if (ret > 0) { ret } else { 0 } + ") + check_err $(bc <<< "$deg > 10") + + local interval=$((d1 - d0)) + local mc_ir=$(rate $u0 $u1 $interval) + local mc_er=$(rate $t0 $t1 $interval) + + # Suppress noise from killing mausezahn. + { kill %% && wait; } 2>/dev/null + + log_test "UC performace under MC overload" + + echo "UC-only throughput $(humanize $ucth1)" + echo "UC+MC throughput $(humanize $ucth2)" + echo "Degradation $deg %" + echo + echo "Full report:" + echo " UC only:" + echo " ingress UC throughput $(humanize ${uc_rate[0]})" + echo " egress UC throughput $(humanize ${uc_rate[1]})" + echo " UC+MC:" + echo " ingress UC throughput $(humanize ${uc_rate_2[0]})" + echo " egress UC throughput $(humanize ${uc_rate_2[1]})" + echo " ingress MC throughput $(humanize $mc_ir)" + echo " egress MC throughput $(humanize $mc_er)" +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore index 78b24cf76f40..8cf22b3c2563 100644 --- a/tools/testing/selftests/net/.gitignore +++ b/tools/testing/selftests/net/.gitignore @@ -14,3 +14,4 @@ udpgso_bench_rx udpgso_bench_tx tcp_inq tls +ip_defrag diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 919aa2ac00af..256d82d5fa87 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -5,13 +5,13 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g CFLAGS += -I../../../../usr/include/ TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh -TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh +TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh ip_defrag.sh TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh TEST_PROGS_EXTENDED := in_netns.sh TEST_GEN_FILES = socket TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd -TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx +TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx ip_defrag TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 0f45633bd634..802b4af18729 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -9,11 +9,11 @@ ret=0 ksft_skip=4 # all tests in this script. Can be overridden with -t option -TESTS="unregister down carrier nexthop ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric" +TESTS="unregister down carrier nexthop ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics" VERBOSE=0 PAUSE_ON_FAIL=no PAUSE=no -IP="ip -netns testns" +IP="ip -netns ns1" log_test() { @@ -47,8 +47,10 @@ log_test() setup() { set -e - ip netns add testns + ip netns add ns1 $IP link set dev lo up + ip netns exec ns1 sysctl -qw net.ipv4.ip_forward=1 + ip netns exec ns1 sysctl -qw net.ipv6.conf.all.forwarding=1 $IP link add dummy0 type dummy $IP link set dev dummy0 up @@ -61,7 +63,8 @@ setup() cleanup() { $IP link del dev dummy0 &> /dev/null - ip netns del testns + ip netns del ns1 + ip netns del ns2 &> /dev/null } get_linklocal() @@ -639,11 +642,14 @@ add_initial_route6() check_route6() { - local pfx="2001:db8:104::/64" + local pfx local expected="$1" local out local rc=0 + set -- $expected + pfx=$1 + out=$($IP -6 ro ls match ${pfx} | sed -e 's/ pref medium//') [ "${out}" = "${expected}" ] && return 0 @@ -690,28 +696,33 @@ route_setup() [ "${VERBOSE}" = "1" ] && set -x set -e - $IP li add red up type vrf table 101 + ip netns add ns2 + ip -netns ns2 link set dev lo up + ip netns exec ns2 sysctl -qw net.ipv4.ip_forward=1 + ip netns exec ns2 sysctl -qw net.ipv6.conf.all.forwarding=1 + $IP li add veth1 type veth peer name veth2 $IP li add veth3 type veth peer name veth4 $IP li set veth1 up $IP li set veth3 up - $IP li set veth2 vrf red up - $IP li set veth4 vrf red up - $IP li add dummy1 type dummy - $IP li set dummy1 vrf red up - - $IP -6 addr add 2001:db8:101::1/64 dev veth1 - $IP -6 addr add 2001:db8:101::2/64 dev veth2 - $IP -6 addr add 2001:db8:103::1/64 dev veth3 - $IP -6 addr add 2001:db8:103::2/64 dev veth4 - $IP -6 addr add 2001:db8:104::1/64 dev dummy1 + $IP li set veth2 netns ns2 up + $IP li set veth4 netns ns2 up + ip -netns ns2 li add dummy1 type dummy + ip -netns ns2 li set dummy1 up + $IP -6 addr add 2001:db8:101::1/64 dev veth1 nodad + $IP -6 addr add 2001:db8:103::1/64 dev veth3 nodad $IP addr add 172.16.101.1/24 dev veth1 - $IP addr add 172.16.101.2/24 dev veth2 $IP addr add 172.16.103.1/24 dev veth3 - $IP addr add 172.16.103.2/24 dev veth4 - $IP addr add 172.16.104.1/24 dev dummy1 + + ip -netns ns2 -6 addr add 2001:db8:101::2/64 dev veth2 nodad + ip -netns ns2 -6 addr add 2001:db8:103::2/64 dev veth4 nodad + ip -netns ns2 -6 addr add 2001:db8:104::1/64 dev dummy1 nodad + + ip -netns ns2 addr add 172.16.101.2/24 dev veth2 + ip -netns ns2 addr add 172.16.103.2/24 dev veth4 + ip -netns ns2 addr add 172.16.104.1/24 dev dummy1 set +ex } @@ -944,7 +955,7 @@ ipv6_addr_metric_test() log_test $rc 0 "Modify metric of address" # verify prefix route removed on down - run_cmd "ip netns exec testns sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1" + run_cmd "ip netns exec ns1 sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1" run_cmd "$IP li set dev dummy2 down" rc=$? if [ $rc -eq 0 ]; then @@ -967,6 +978,77 @@ ipv6_addr_metric_test() cleanup } +ipv6_route_metrics_test() +{ + local rc + + echo + echo "IPv6 routes with metrics" + + route_setup + + # + # single path with metrics + # + run_cmd "$IP -6 ro add 2001:db8:111::/64 via 2001:db8:101::2 mtu 1400" + rc=$? + if [ $rc -eq 0 ]; then + check_route6 "2001:db8:111::/64 via 2001:db8:101::2 dev veth1 metric 1024 mtu 1400" + rc=$? + fi + log_test $rc 0 "Single path route with mtu metric" + + + # + # multipath via separate routes with metrics + # + run_cmd "$IP -6 ro add 2001:db8:112::/64 via 2001:db8:101::2 mtu 1400" + run_cmd "$IP -6 ro append 2001:db8:112::/64 via 2001:db8:103::2" + rc=$? + if [ $rc -eq 0 ]; then + check_route6 "2001:db8:112::/64 metric 1024 mtu 1400 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1" + rc=$? + fi + log_test $rc 0 "Multipath route via 2 single routes with mtu metric on first" + + # second route is coalesced to first to make a multipath route. + # MTU of the second path is hidden from display! + run_cmd "$IP -6 ro add 2001:db8:113::/64 via 2001:db8:101::2" + run_cmd "$IP -6 ro append 2001:db8:113::/64 via 2001:db8:103::2 mtu 1400" + rc=$? + if [ $rc -eq 0 ]; then + check_route6 "2001:db8:113::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1" + rc=$? + fi + log_test $rc 0 "Multipath route via 2 single routes with mtu metric on 2nd" + + run_cmd "$IP -6 ro del 2001:db8:113::/64 via 2001:db8:101::2" + if [ $? -eq 0 ]; then + check_route6 "2001:db8:113::/64 via 2001:db8:103::2 dev veth3 metric 1024 mtu 1400" + log_test $? 0 " MTU of second leg" + fi + + # + # multipath with metrics + # + run_cmd "$IP -6 ro add 2001:db8:115::/64 mtu 1400 nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2" + rc=$? + if [ $rc -eq 0 ]; then + check_route6 "2001:db8:115::/64 metric 1024 mtu 1400 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1" + rc=$? + fi + log_test $rc 0 "Multipath route with mtu metric" + + $IP -6 ro add 2001:db8:104::/64 via 2001:db8:101::2 mtu 1300 + run_cmd "ip netns exec ns1 ping6 -w1 -c1 -s 1500 2001:db8:104::1" + log_test $? 0 "Using route with mtu metric" + + run_cmd "$IP -6 ro add 2001:db8:114::/64 via 2001:db8:101::2 congctl lock foo" + log_test $? 2 "Invalid metric (fails metric_convert)" + + route_cleanup +} + # add route for a prefix, flushing any existing routes first # expected to be the first step of a test add_route() @@ -1005,11 +1087,15 @@ add_initial_route() check_route() { - local pfx="172.16.104.0/24" + local pfx local expected="$1" local out local rc=0 + set -- $expected + pfx=$1 + [ "${pfx}" = "unreachable" ] && pfx=$2 + out=$($IP ro ls match ${pfx}) [ "${out}" = "${expected}" ] && return 0 @@ -1319,6 +1405,43 @@ ipv4_addr_metric_test() cleanup } +ipv4_route_metrics_test() +{ + local rc + + echo + echo "IPv4 route add / append tests" + + route_setup + + run_cmd "$IP ro add 172.16.111.0/24 via 172.16.101.2 mtu 1400" + rc=$? + if [ $rc -eq 0 ]; then + check_route "172.16.111.0/24 via 172.16.101.2 dev veth1 mtu 1400" + rc=$? + fi + log_test $rc 0 "Single path route with mtu metric" + + + run_cmd "$IP ro add 172.16.112.0/24 mtu 1400 nexthop via 172.16.101.2 nexthop via 172.16.103.2" + rc=$? + if [ $rc -eq 0 ]; then + check_route "172.16.112.0/24 mtu 1400 nexthop via 172.16.101.2 dev veth1 weight 1 nexthop via 172.16.103.2 dev veth3 weight 1" + rc=$? + fi + log_test $rc 0 "Multipath route with mtu metric" + + $IP ro add 172.16.104.0/24 via 172.16.101.2 mtu 1300 + run_cmd "ip netns exec ns1 ping -w1 -c1 -s 1500 172.16.104.1" + log_test $? 0 "Using route with mtu metric" + + run_cmd "$IP ro add 172.16.111.0/24 via 172.16.101.2 congctl lock foo" + log_test $? 2 "Invalid metric (fails metric_convert)" + + route_cleanup +} + + ################################################################################ # usage @@ -1385,6 +1508,8 @@ do ipv4_route_test|ipv4_rt) ipv4_route_test;; ipv6_addr_metric) ipv6_addr_metric_test;; ipv4_addr_metric) ipv4_addr_metric_test;; + ipv6_route_metrics) ipv6_route_metrics_test;; + ipv4_route_metrics) ipv4_route_metrics_test;; help) echo "Test names: $TESTS"; exit 0;; esac diff --git a/tools/testing/selftests/net/forwarding/bridge_sticky_fdb.sh b/tools/testing/selftests/net/forwarding/bridge_sticky_fdb.sh new file mode 100755 index 000000000000..1f8ef0eff862 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/bridge_sticky_fdb.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ALL_TESTS="sticky" +NUM_NETIFS=4 +TEST_MAC=de:ad:be:ef:13:37 +source lib.sh + +switch_create() +{ + ip link add dev br0 type bridge + + ip link set dev $swp1 master br0 + ip link set dev $swp2 master br0 + + ip link set dev br0 up + ip link set dev $h1 up + ip link set dev $swp1 up + ip link set dev $h2 up + ip link set dev $swp2 up +} + +switch_destroy() +{ + ip link set dev $swp2 down + ip link set dev $h2 down + ip link set dev $swp1 down + ip link set dev $h1 down + + ip link del dev br0 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + h2=${NETIFS[p3]} + swp2=${NETIFS[p4]} + + switch_create +} + +cleanup() +{ + pre_cleanup + switch_destroy +} + +sticky() +{ + bridge fdb add $TEST_MAC dev $swp1 master static sticky + check_err $? "Could not add fdb entry" + bridge fdb del $TEST_MAC dev $swp1 vlan 1 master static sticky + $MZ $h2 -c 1 -a $TEST_MAC -t arp "request" -q + bridge -j fdb show br br0 brport $swp1\ + | jq -e ".[] | select(.mac == \"$TEST_MAC\")" &> /dev/null + check_err $? "Did not find FDB record when should" + + log_test "Sticky fdb entry" +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index ca53b539aa2d..85d253546684 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -251,7 +251,7 @@ lldpad_app_wait_set() { local dev=$1; shift - while lldptool -t -i $dev -V APP -c app | grep -q pending; do + while lldptool -t -i $dev -V APP -c app | grep -Eq "pending|unknown"; do echo "$dev: waiting for lldpad to push pending APP updates" sleep 5 done @@ -494,6 +494,14 @@ tc_rule_stats_get() | jq '.[1].options.actions[].stats.packets' } +ethtool_stats_get() +{ + local dev=$1; shift + local stat=$1; shift + + ethtool -S $dev | grep "^ *$stat:" | head -n 1 | cut -d: -f2 +} + mac_get() { local if_name=$1 @@ -541,6 +549,23 @@ forwarding_restore() sysctl_restore net.ipv4.conf.all.forwarding } +declare -A MTU_ORIG +mtu_set() +{ + local dev=$1; shift + local mtu=$1; shift + + MTU_ORIG["$dev"]=$(ip -j link show dev $dev | jq -e '.[].mtu') + ip link set dev $dev mtu $mtu +} + +mtu_restore() +{ + local dev=$1; shift + + ip link set dev $dev mtu ${MTU_ORIG["$dev"]} +} + tc_offload_check() { local num_netifs=${1:-$NUM_NETIFS} diff --git a/tools/testing/selftests/net/ip_defrag.c b/tools/testing/selftests/net/ip_defrag.c new file mode 100644 index 000000000000..61ae2782388e --- /dev/null +++ b/tools/testing/selftests/net/ip_defrag.c @@ -0,0 +1,393 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define _GNU_SOURCE + +#include <arpa/inet.h> +#include <errno.h> +#include <error.h> +#include <linux/in.h> +#include <netinet/ip.h> +#include <netinet/ip6.h> +#include <netinet/udp.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <time.h> +#include <unistd.h> + +static bool cfg_do_ipv4; +static bool cfg_do_ipv6; +static bool cfg_verbose; +static bool cfg_overlap; +static unsigned short cfg_port = 9000; + +const struct in_addr addr4 = { .s_addr = __constant_htonl(INADDR_LOOPBACK + 2) }; +const struct in6_addr addr6 = IN6ADDR_LOOPBACK_INIT; + +#define IP4_HLEN (sizeof(struct iphdr)) +#define IP6_HLEN (sizeof(struct ip6_hdr)) +#define UDP_HLEN (sizeof(struct udphdr)) + +/* IPv6 fragment header lenth. */ +#define FRAG_HLEN 8 + +static int payload_len; +static int max_frag_len; + +#define MSG_LEN_MAX 60000 /* Max UDP payload length. */ + +#define IP4_MF (1u << 13) /* IPv4 MF flag. */ +#define IP6_MF (1) /* IPv6 MF flag. */ + +#define CSUM_MANGLED_0 (0xffff) + +static uint8_t udp_payload[MSG_LEN_MAX]; +static uint8_t ip_frame[IP_MAXPACKET]; +static uint32_t ip_id = 0xabcd; +static int msg_counter; +static int frag_counter; +static unsigned int seed; + +/* Receive a UDP packet. Validate it matches udp_payload. */ +static void recv_validate_udp(int fd_udp) +{ + ssize_t ret; + static uint8_t recv_buff[MSG_LEN_MAX]; + + ret = recv(fd_udp, recv_buff, payload_len, 0); + msg_counter++; + + if (cfg_overlap) { + if (ret != -1) + error(1, 0, "recv: expected timeout; got %d", + (int)ret); + if (errno != ETIMEDOUT && errno != EAGAIN) + error(1, errno, "recv: expected timeout: %d", + errno); + return; /* OK */ + } + + if (ret == -1) + error(1, errno, "recv: payload_len = %d max_frag_len = %d", + payload_len, max_frag_len); + if (ret != payload_len) + error(1, 0, "recv: wrong size: %d vs %d", (int)ret, payload_len); + if (memcmp(udp_payload, recv_buff, payload_len)) + error(1, 0, "recv: wrong data"); +} + +static uint32_t raw_checksum(uint8_t *buf, int len, uint32_t sum) +{ + int i; + + for (i = 0; i < (len & ~1U); i += 2) { + sum += (u_int16_t)ntohs(*((u_int16_t *)(buf + i))); + if (sum > 0xffff) + sum -= 0xffff; + } + + if (i < len) { + sum += buf[i] << 8; + if (sum > 0xffff) + sum -= 0xffff; + } + + return sum; +} + +static uint16_t udp_checksum(struct ip *iphdr, struct udphdr *udphdr) +{ + uint32_t sum = 0; + uint16_t res; + + sum = raw_checksum((uint8_t *)&iphdr->ip_src, 2 * sizeof(iphdr->ip_src), + IPPROTO_UDP + (uint32_t)(UDP_HLEN + payload_len)); + sum = raw_checksum((uint8_t *)udphdr, UDP_HLEN, sum); + sum = raw_checksum((uint8_t *)udp_payload, payload_len, sum); + res = 0xffff & ~sum; + if (res) + return htons(res); + else + return CSUM_MANGLED_0; +} + +static uint16_t udp6_checksum(struct ip6_hdr *iphdr, struct udphdr *udphdr) +{ + uint32_t sum = 0; + uint16_t res; + + sum = raw_checksum((uint8_t *)&iphdr->ip6_src, 2 * sizeof(iphdr->ip6_src), + IPPROTO_UDP); + sum = raw_checksum((uint8_t *)&udphdr->len, sizeof(udphdr->len), sum); + sum = raw_checksum((uint8_t *)udphdr, UDP_HLEN, sum); + sum = raw_checksum((uint8_t *)udp_payload, payload_len, sum); + res = 0xffff & ~sum; + if (res) + return htons(res); + else + return CSUM_MANGLED_0; +} + +static void send_fragment(int fd_raw, struct sockaddr *addr, socklen_t alen, + int offset, bool ipv6) +{ + int frag_len; + int res; + int payload_offset = offset > 0 ? offset - UDP_HLEN : 0; + uint8_t *frag_start = ipv6 ? ip_frame + IP6_HLEN + FRAG_HLEN : + ip_frame + IP4_HLEN; + + if (offset == 0) { + struct udphdr udphdr; + udphdr.source = htons(cfg_port + 1); + udphdr.dest = htons(cfg_port); + udphdr.len = htons(UDP_HLEN + payload_len); + udphdr.check = 0; + if (ipv6) + udphdr.check = udp6_checksum((struct ip6_hdr *)ip_frame, &udphdr); + else + udphdr.check = udp_checksum((struct ip *)ip_frame, &udphdr); + memcpy(frag_start, &udphdr, UDP_HLEN); + } + + if (ipv6) { + struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame; + struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN); + if (payload_len - payload_offset <= max_frag_len && offset > 0) { + /* This is the last fragment. */ + frag_len = FRAG_HLEN + payload_len - payload_offset; + fraghdr->ip6f_offlg = htons(offset); + } else { + frag_len = FRAG_HLEN + max_frag_len; + fraghdr->ip6f_offlg = htons(offset | IP6_MF); + } + ip6hdr->ip6_plen = htons(frag_len); + if (offset == 0) + memcpy(frag_start + UDP_HLEN, udp_payload, + frag_len - FRAG_HLEN - UDP_HLEN); + else + memcpy(frag_start, udp_payload + payload_offset, + frag_len - FRAG_HLEN); + frag_len += IP6_HLEN; + } else { + struct ip *iphdr = (struct ip *)ip_frame; + if (payload_len - payload_offset <= max_frag_len && offset > 0) { + /* This is the last fragment. */ + frag_len = IP4_HLEN + payload_len - payload_offset; + iphdr->ip_off = htons(offset / 8); + } else { + frag_len = IP4_HLEN + max_frag_len; + iphdr->ip_off = htons(offset / 8 | IP4_MF); + } + iphdr->ip_len = htons(frag_len); + if (offset == 0) + memcpy(frag_start + UDP_HLEN, udp_payload, + frag_len - IP4_HLEN - UDP_HLEN); + else + memcpy(frag_start, udp_payload + payload_offset, + frag_len - IP4_HLEN); + } + + res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen); + if (res < 0) + error(1, errno, "send_fragment"); + if (res != frag_len) + error(1, 0, "send_fragment: %d vs %d", res, frag_len); + + frag_counter++; +} + +static void send_udp_frags(int fd_raw, struct sockaddr *addr, + socklen_t alen, bool ipv6) +{ + struct ip *iphdr = (struct ip *)ip_frame; + struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame; + int res; + int offset; + int frag_len; + + /* Send the UDP datagram using raw IP fragments: the 0th fragment + * has the UDP header; other fragments are pieces of udp_payload + * split in chunks of frag_len size. + * + * Odd fragments (1st, 3rd, 5th, etc.) are sent out first, then + * even fragments (0th, 2nd, etc.) are sent out. + */ + if (ipv6) { + struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN); + ((struct sockaddr_in6 *)addr)->sin6_port = 0; + memset(ip6hdr, 0, sizeof(*ip6hdr)); + ip6hdr->ip6_flow = htonl(6<<28); /* Version. */ + ip6hdr->ip6_nxt = IPPROTO_FRAGMENT; + ip6hdr->ip6_hops = 255; + ip6hdr->ip6_src = addr6; + ip6hdr->ip6_dst = addr6; + fraghdr->ip6f_nxt = IPPROTO_UDP; + fraghdr->ip6f_reserved = 0; + fraghdr->ip6f_ident = htonl(ip_id++); + } else { + memset(iphdr, 0, sizeof(*iphdr)); + iphdr->ip_hl = 5; + iphdr->ip_v = 4; + iphdr->ip_tos = 0; + iphdr->ip_id = htons(ip_id++); + iphdr->ip_ttl = 0x40; + iphdr->ip_p = IPPROTO_UDP; + iphdr->ip_src.s_addr = htonl(INADDR_LOOPBACK); + iphdr->ip_dst = addr4; + iphdr->ip_sum = 0; + } + + /* Odd fragments. */ + offset = max_frag_len; + while (offset < (UDP_HLEN + payload_len)) { + send_fragment(fd_raw, addr, alen, offset, ipv6); + offset += 2 * max_frag_len; + } + + if (cfg_overlap) { + /* Send an extra random fragment. */ + offset = rand() % (UDP_HLEN + payload_len - 1); + /* sendto() returns EINVAL if offset + frag_len is too small. */ + if (ipv6) { + struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN); + frag_len = max_frag_len + rand() % 256; + /* In IPv6 if !!(frag_len % 8), the fragment is dropped. */ + frag_len &= ~0x7; + fraghdr->ip6f_offlg = htons(offset / 8 | IP6_MF); + ip6hdr->ip6_plen = htons(frag_len); + frag_len += IP6_HLEN; + } else { + frag_len = IP4_HLEN + UDP_HLEN + rand() % 256; + iphdr->ip_off = htons(offset / 8 | IP4_MF); + iphdr->ip_len = htons(frag_len); + } + res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen); + if (res < 0) + error(1, errno, "sendto overlap"); + if (res != frag_len) + error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len); + frag_counter++; + } + + /* Event fragments. */ + offset = 0; + while (offset < (UDP_HLEN + payload_len)) { + send_fragment(fd_raw, addr, alen, offset, ipv6); + offset += 2 * max_frag_len; + } +} + +static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6) +{ + int fd_tx_raw, fd_rx_udp; + struct timeval tv = { .tv_sec = 0, .tv_usec = 10 * 1000 }; + int idx; + int min_frag_len = ipv6 ? 1280 : 8; + + /* Initialize the payload. */ + for (idx = 0; idx < MSG_LEN_MAX; ++idx) + udp_payload[idx] = idx % 256; + + /* Open sockets. */ + fd_tx_raw = socket(addr->sa_family, SOCK_RAW, IPPROTO_RAW); + if (fd_tx_raw == -1) + error(1, errno, "socket tx_raw"); + + fd_rx_udp = socket(addr->sa_family, SOCK_DGRAM, 0); + if (fd_rx_udp == -1) + error(1, errno, "socket rx_udp"); + if (bind(fd_rx_udp, addr, alen)) + error(1, errno, "bind"); + /* Fail fast. */ + if (setsockopt(fd_rx_udp, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv))) + error(1, errno, "setsockopt rcv timeout"); + + for (payload_len = min_frag_len; payload_len < MSG_LEN_MAX; + payload_len += (rand() % 4096)) { + if (cfg_verbose) + printf("payload_len: %d\n", payload_len); + max_frag_len = min_frag_len; + do { + send_udp_frags(fd_tx_raw, addr, alen, ipv6); + recv_validate_udp(fd_rx_udp); + max_frag_len += 8 * (rand() % 8); + } while (max_frag_len < (1500 - FRAG_HLEN) && max_frag_len <= payload_len); + } + + /* Cleanup. */ + if (close(fd_tx_raw)) + error(1, errno, "close tx_raw"); + if (close(fd_rx_udp)) + error(1, errno, "close rx_udp"); + + if (cfg_verbose) + printf("processed %d messages, %d fragments\n", + msg_counter, frag_counter); + + fprintf(stderr, "PASS\n"); +} + + +static void run_test_v4(void) +{ + struct sockaddr_in addr = {0}; + + addr.sin_family = AF_INET; + addr.sin_port = htons(cfg_port); + addr.sin_addr = addr4; + + run_test((void *)&addr, sizeof(addr), false /* !ipv6 */); +} + +static void run_test_v6(void) +{ + struct sockaddr_in6 addr = {0}; + + addr.sin6_family = AF_INET6; + addr.sin6_port = htons(cfg_port); + addr.sin6_addr = addr6; + + run_test((void *)&addr, sizeof(addr), true /* ipv6 */); +} + +static void parse_opts(int argc, char **argv) +{ + int c; + + while ((c = getopt(argc, argv, "46ov")) != -1) { + switch (c) { + case '4': + cfg_do_ipv4 = true; + break; + case '6': + cfg_do_ipv6 = true; + break; + case 'o': + cfg_overlap = true; + break; + case 'v': + cfg_verbose = true; + break; + default: + error(1, 0, "%s: parse error", argv[0]); + } + } +} + +int main(int argc, char **argv) +{ + parse_opts(argc, argv); + seed = time(NULL); + srand(seed); + /* Print the seed to track/reproduce potential failures. */ + printf("seed = %d\n", seed); + + if (cfg_do_ipv4) + run_test_v4(); + if (cfg_do_ipv6) + run_test_v6(); + + return 0; +} diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh new file mode 100755 index 000000000000..f34672796044 --- /dev/null +++ b/tools/testing/selftests/net/ip_defrag.sh @@ -0,0 +1,39 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 +# +# Run a couple of IP defragmentation tests. + +set +x +set -e + +readonly NETNS="ns-$(mktemp -u XXXXXX)" + +setup() { + ip netns add "${NETNS}" + ip -netns "${NETNS}" link set lo up + ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1 + ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1 + ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1 + ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1 +} + +cleanup() { + ip netns del "${NETNS}" +} + +trap cleanup EXIT +setup + +echo "ipv4 defrag" +ip netns exec "${NETNS}" ./ip_defrag -4 + + +echo "ipv4 defrag with overlaps" +ip netns exec "${NETNS}" ./ip_defrag -4o + +echo "ipv6 defrag" +ip netns exec "${NETNS}" ./ip_defrag -6 + +echo "ipv6 defrag with overlaps" +ip netns exec "${NETNS}" ./ip_defrag -6o + diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index 0ab9423d009f..b9cdb68df4c5 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -6,6 +6,26 @@ # # Tests currently implemented: # +# - pmtu_ipv4 +# Set up two namespaces, A and B, with two paths between them over routers +# R1 and R2 (also implemented with namespaces), with different MTUs: +# +# segment a_r1 segment b_r1 a_r1: 2000 +# .--------------R1--------------. a_r2: 1500 +# A B a_r3: 2000 +# '--------------R2--------------' a_r4: 1400 +# segment a_r2 segment b_r2 +# +# Check that PMTU exceptions with the correct PMTU are created. Then +# decrease and increase the MTU of the local link for one of the paths, +# A to R1, checking that route exception PMTU changes accordingly over +# this path. Also check that locked exceptions are created when an ICMP +# message advertising a PMTU smaller than net.ipv4.route.min_pmtu is +# received +# +# - pmtu_ipv6 +# Same as pmtu_ipv4, except for locked PMTU tests, using IPv6 +# # - pmtu_vti4_exception # Set up vti tunnel on top of veth, with xfrm states and policies, in two # namespaces with matching endpoints. Check that route exception is not @@ -50,6 +70,8 @@ ksft_skip=4 which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping) tests=" + pmtu_ipv4_exception ipv4: PMTU exceptions + pmtu_ipv6_exception ipv6: PMTU exceptions pmtu_vti6_exception vti6: PMTU exceptions pmtu_vti4_exception vti4: PMTU exceptions pmtu_vti4_default_mtu vti4: default MTU assignment @@ -60,8 +82,45 @@ tests=" NS_A="ns-$(mktemp -u XXXXXX)" NS_B="ns-$(mktemp -u XXXXXX)" +NS_R1="ns-$(mktemp -u XXXXXX)" +NS_R2="ns-$(mktemp -u XXXXXX)" ns_a="ip netns exec ${NS_A}" ns_b="ip netns exec ${NS_B}" +ns_r1="ip netns exec ${NS_R1}" +ns_r2="ip netns exec ${NS_R2}" + +# Addressing and routing for tests with routers: four network segments, with +# index SEGMENT between 1 and 4, a common prefix (PREFIX4 or PREFIX6) and an +# identifier ID, which is 1 for hosts (A and B), 2 for routers (R1 and R2). +# Addresses are: +# - IPv4: PREFIX4.SEGMENT.ID (/24) +# - IPv6: PREFIX6:SEGMENT::ID (/64) +prefix4="192.168" +prefix6="fd00" +a_r1=1 +a_r2=2 +b_r1=3 +b_r2=4 +# ns peer segment +routing_addrs=" + A R1 ${a_r1} + A R2 ${a_r2} + B R1 ${b_r1} + B R2 ${b_r2} +" +# Traffic from A to B goes through R1 by default, and through R2, if destined to +# B's address on the b_r2 segment. +# Traffic from B to A goes through R1. +# ns destination gateway +routes=" + A default ${prefix4}.${a_r1}.2 + A ${prefix4}.${b_r2}.1 ${prefix4}.${a_r2}.2 + B default ${prefix4}.${b_r1}.2 + + A default ${prefix6}:${a_r1}::2 + A ${prefix6}:${b_r2}::1 ${prefix6}:${a_r2}::2 + B default ${prefix6}:${b_r1}::2 +" veth4_a_addr="192.168.1.1" veth4_b_addr="192.168.1.2" @@ -94,9 +153,15 @@ err_flush() { err_buf= } +# Find the auto-generated name for this namespace +nsname() { + eval echo \$NS_$1 +} + setup_namespaces() { - ip netns add ${NS_A} || return 1 - ip netns add ${NS_B} + for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do + ip netns add ${n} || return 1 + done } setup_veth() { @@ -167,6 +232,49 @@ setup_xfrm6() { setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr} } +setup_routing() { + for i in ${NS_R1} ${NS_R2}; do + ip netns exec ${i} sysctl -q net/ipv4/ip_forward=1 + ip netns exec ${i} sysctl -q net/ipv6/conf/all/forwarding=1 + done + + for i in ${routing_addrs}; do + [ "${ns}" = "" ] && ns="${i}" && continue + [ "${peer}" = "" ] && peer="${i}" && continue + [ "${segment}" = "" ] && segment="${i}" + + ns_name="$(nsname ${ns})" + peer_name="$(nsname ${peer})" + if="veth_${ns}-${peer}" + ifpeer="veth_${peer}-${ns}" + + # Create veth links + ip link add ${if} up netns ${ns_name} type veth peer name ${ifpeer} netns ${peer_name} || return 1 + ip -n ${peer_name} link set dev ${ifpeer} up + + # Add addresses + ip -n ${ns_name} addr add ${prefix4}.${segment}.1/24 dev ${if} + ip -n ${ns_name} addr add ${prefix6}:${segment}::1/64 dev ${if} + + ip -n ${peer_name} addr add ${prefix4}.${segment}.2/24 dev ${ifpeer} + ip -n ${peer_name} addr add ${prefix6}:${segment}::2/64 dev ${ifpeer} + + ns=""; peer=""; segment="" + done + + for i in ${routes}; do + [ "${ns}" = "" ] && ns="${i}" && continue + [ "${addr}" = "" ] && addr="${i}" && continue + [ "${gw}" = "" ] && gw="${i}" + + ns_name="$(nsname ${ns})" + + ip -n ${ns_name} route add ${addr} via ${gw} + + ns=""; addr=""; gw="" + done +} + setup() { [ "$(id -u)" -ne 0 ] && echo " need to run as root" && return $ksft_skip @@ -178,8 +286,9 @@ setup() { cleanup() { [ ${cleanup_done} -eq 1 ] && return - ip netns del ${NS_A} 2> /dev/null - ip netns del ${NS_B} 2> /dev/null + for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do + ip netns del ${n} 2> /dev/null + done cleanup_done=1 } @@ -196,7 +305,9 @@ mtu_parse() { next=0 for i in ${input}; do + [ ${next} -eq 1 -a "${i}" = "lock" ] && next=2 && continue [ ${next} -eq 1 ] && echo "${i}" && return + [ ${next} -eq 2 ] && echo "lock ${i}" && return [ "${i}" = "mtu" ] && next=1 done } @@ -229,6 +340,109 @@ route_get_dst_pmtu_from_exception() { mtu_parse "$(route_get_dst_exception "${ns_cmd}" ${dst})" } +check_pmtu_value() { + expected="${1}" + value="${2}" + event="${3}" + + [ "${expected}" = "any" ] && [ -n "${value}" ] && return 0 + [ "${value}" = "${expected}" ] && return 0 + [ -z "${value}" ] && err " PMTU exception wasn't created after ${event}" && return 1 + [ -z "${expected}" ] && err " PMTU exception shouldn't exist after ${event}" && return 1 + err " found PMTU exception with incorrect MTU ${value}, expected ${expected}, after ${event}" + return 1 +} + +test_pmtu_ipvX() { + family=${1} + + setup namespaces routing || return 2 + + if [ ${family} -eq 4 ]; then + ping=ping + dst1="${prefix4}.${b_r1}.1" + dst2="${prefix4}.${b_r2}.1" + else + ping=${ping6} + dst1="${prefix6}:${b_r1}::1" + dst2="${prefix6}:${b_r2}::1" + fi + + # Set up initial MTU values + mtu "${ns_a}" veth_A-R1 2000 + mtu "${ns_r1}" veth_R1-A 2000 + mtu "${ns_r1}" veth_R1-B 1400 + mtu "${ns_b}" veth_B-R1 1400 + + mtu "${ns_a}" veth_A-R2 2000 + mtu "${ns_r2}" veth_R2-A 2000 + mtu "${ns_r2}" veth_R2-B 1500 + mtu "${ns_b}" veth_B-R2 1500 + + # Create route exceptions + ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst1} > /dev/null + ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst2} > /dev/null + + # Check that exceptions have been created with the correct PMTU + pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})" + check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1 + pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})" + check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1 + + # Decrease local MTU below PMTU, check for PMTU decrease in route exception + mtu "${ns_a}" veth_A-R1 1300 + mtu "${ns_r1}" veth_R1-A 1300 + pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})" + check_pmtu_value "1300" "${pmtu_1}" "decreasing local MTU" || return 1 + # Second exception shouldn't be modified + pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})" + check_pmtu_value "1500" "${pmtu_2}" "changing local MTU on a link not on this path" || return 1 + + # Increase MTU, check for PMTU increase in route exception + mtu "${ns_a}" veth_A-R1 1700 + mtu "${ns_r1}" veth_R1-A 1700 + pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})" + check_pmtu_value "1700" "${pmtu_1}" "increasing local MTU" || return 1 + # Second exception shouldn't be modified + pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})" + check_pmtu_value "1500" "${pmtu_2}" "changing local MTU on a link not on this path" || return 1 + + # Skip PMTU locking tests for IPv6 + [ $family -eq 6 ] && return 0 + + # Decrease remote MTU on path via R2, get new exception + mtu "${ns_r2}" veth_R2-B 400 + mtu "${ns_b}" veth_B-R2 400 + ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null + pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})" + check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1 + + # Decrease local MTU below PMTU + mtu "${ns_a}" veth_A-R2 500 + mtu "${ns_r2}" veth_R2-A 500 + pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})" + check_pmtu_value "500" "${pmtu_2}" "decreasing local MTU" || return 1 + + # Increase local MTU + mtu "${ns_a}" veth_A-R2 1500 + mtu "${ns_r2}" veth_R2-A 1500 + pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})" + check_pmtu_value "1500" "${pmtu_2}" "increasing local MTU" || return 1 + + # Get new exception + ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null + pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})" + check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1 +} + +test_pmtu_ipv4_exception() { + test_pmtu_ipvX 4 +} + +test_pmtu_ipv6_exception() { + test_pmtu_ipvX 6 +} + test_pmtu_vti4_exception() { setup namespaces veth vti4 xfrm4 || return 2 @@ -248,24 +462,13 @@ test_pmtu_vti4_exception() { # exception is created ${ns_a} ping -q -M want -i 0.1 -w 2 -s ${ping_payload} ${vti4_b_addr} > /dev/null pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti4_b_addr})" - if [ "${pmtu}" != "" ]; then - err " unexpected exception created with PMTU ${pmtu} for IP payload length ${esp_payload_rfc4106}" - return 1 - fi + check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1 # Now exceed link layer MTU by one byte, check that exception is created + # with the right PMTU value ${ns_a} ping -q -M want -i 0.1 -w 2 -s $((ping_payload + 1)) ${vti4_b_addr} > /dev/null pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti4_b_addr})" - if [ "${pmtu}" = "" ]; then - err " exception not created for IP payload length $((esp_payload_rfc4106 + 1))" - return 1 - fi - - # ...with the right PMTU value - if [ ${pmtu} -ne ${esp_payload_rfc4106} ]; then - err " wrong PMTU ${pmtu} in exception, expected: ${esp_payload_rfc4106}" - return 1 - fi + check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))" } test_pmtu_vti6_exception() { @@ -280,25 +483,18 @@ test_pmtu_vti6_exception() { ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null # Check that exception was created - if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then - err " tunnel exceeding link layer MTU didn't create route exception" - return 1 - fi + pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" + check_pmtu_value any "${pmtu}" "creating tunnel exceeding link layer MTU" || return 1 # Decrease tunnel MTU, check for PMTU decrease in route exception mtu "${ns_a}" vti6_a 3000 - - if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" -ne 3000 ]; then - err " decreasing tunnel MTU didn't decrease route exception PMTU" - fail=1 - fi + pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" + check_pmtu_value "3000" "${pmtu}" "decreasing tunnel MTU" || fail=1 # Increase tunnel MTU, check for PMTU increase in route exception mtu "${ns_a}" vti6_a 9000 - if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" -ne 9000 ]; then - err " increasing tunnel MTU didn't increase route exception PMTU" - fail=1 - fi + pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" + check_pmtu_value "9000" "${pmtu}" "increasing tunnel MTU" || fail=1 return ${fail} } diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index 8fdfeafaf8c0..fac68d710f35 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -121,11 +121,11 @@ TEST_F(tls, send_then_sendfile) buf = (char *)malloc(st.st_size); EXPECT_EQ(send(self->fd, test_str, to_send, 0), to_send); - EXPECT_EQ(recv(self->cfd, recv_buf, to_send, 0), to_send); + EXPECT_EQ(recv(self->cfd, recv_buf, to_send, MSG_WAITALL), to_send); EXPECT_EQ(memcmp(test_str, recv_buf, to_send), 0); EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0); - EXPECT_EQ(recv(self->cfd, buf, st.st_size, 0), st.st_size); + EXPECT_EQ(recv(self->cfd, buf, st.st_size, MSG_WAITALL), st.st_size); } TEST_F(tls, recv_max) @@ -160,7 +160,7 @@ TEST_F(tls, msg_more) EXPECT_EQ(send(self->fd, test_str, send_len, MSG_MORE), send_len); EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_DONTWAIT), -1); EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); - EXPECT_EQ(recv(self->cfd, buf, send_len * 2, MSG_DONTWAIT), + EXPECT_EQ(recv(self->cfd, buf, send_len * 2, MSG_WAITALL), send_len * 2); EXPECT_EQ(memcmp(buf, test_str, send_len), 0); } @@ -180,7 +180,7 @@ TEST_F(tls, sendmsg_single) msg.msg_iov = &vec; msg.msg_iovlen = 1; EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len); - EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len); + EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_WAITALL), send_len); EXPECT_EQ(memcmp(buf, test_str, send_len), 0); } @@ -288,7 +288,7 @@ TEST_F(tls, splice_from_pipe) ASSERT_GE(pipe(p), 0); EXPECT_GE(write(p[1], mem_send, send_len), 0); EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), 0); - EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0); + EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len); EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); } @@ -306,7 +306,7 @@ TEST_F(tls, splice_from_pipe2) EXPECT_GE(splice(p[0], NULL, self->fd, NULL, 8000, 0), 0); EXPECT_GE(write(p2[1], mem_send + 8000, 8000), 0); EXPECT_GE(splice(p2[0], NULL, self->fd, NULL, 8000, 0), 0); - EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0); + EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len); EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); } @@ -322,13 +322,13 @@ TEST_F(tls, send_and_splice) ASSERT_GE(pipe(p), 0); EXPECT_EQ(send(self->fd, test_str, send_len2, 0), send_len2); - EXPECT_NE(recv(self->cfd, buf, send_len2, 0), -1); + EXPECT_EQ(recv(self->cfd, buf, send_len2, MSG_WAITALL), send_len2); EXPECT_EQ(memcmp(test_str, buf, send_len2), 0); EXPECT_GE(write(p[1], mem_send, send_len), send_len); EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), send_len); - EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0); + EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len); EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); } @@ -436,7 +436,7 @@ TEST_F(tls, multiple_send_single_recv) EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0); EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0); memset(recv_mem, 0, total_len); - EXPECT_EQ(recv(self->cfd, recv_mem, total_len, 0), total_len); + EXPECT_EQ(recv(self->cfd, recv_mem, total_len, MSG_WAITALL), total_len); EXPECT_EQ(memcmp(send_mem, recv_mem, send_len), 0); EXPECT_EQ(memcmp(send_mem, recv_mem + send_len, send_len), 0); @@ -516,17 +516,17 @@ TEST_F(tls, recv_peek_multiple_records) len = strlen(test_str_second) + 1; EXPECT_EQ(send(self->fd, test_str_second, len, 0), len); - len = sizeof(buf); + len = strlen(test_str_first); memset(buf, 0, len); - EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1); + EXPECT_EQ(recv(self->cfd, buf, len, MSG_PEEK | MSG_WAITALL), len); /* MSG_PEEK can only peek into the current record. */ - len = strlen(test_str_first) + 1; + len = strlen(test_str_first); EXPECT_EQ(memcmp(test_str_first, buf, len), 0); - len = sizeof(buf); + len = strlen(test_str) + 1; memset(buf, 0, len); - EXPECT_NE(recv(self->cfd, buf, len, 0), -1); + EXPECT_EQ(recv(self->cfd, buf, len, MSG_WAITALL), len); /* Non-MSG_PEEK will advance strparser (and therefore record) * however. @@ -543,6 +543,28 @@ TEST_F(tls, recv_peek_multiple_records) len = strlen(test_str_second) + 1; EXPECT_EQ(send(self->fd, test_str_second, len, 0), len); + len = strlen(test_str) + 1; + memset(buf, 0, len); + EXPECT_EQ(recv(self->cfd, buf, len, MSG_PEEK | MSG_WAITALL), len); + + len = strlen(test_str) + 1; + EXPECT_EQ(memcmp(test_str, buf, len), 0); +} + +TEST_F(tls, recv_peek_large_buf_mult_recs) +{ + char const *test_str = "test_read_peek_mult_recs"; + char const *test_str_first = "test_read_peek"; + char const *test_str_second = "_mult_recs"; + int len; + char buf[64]; + + len = strlen(test_str_first); + EXPECT_EQ(send(self->fd, test_str_first, len, 0), len); + + len = strlen(test_str_second) + 1; + EXPECT_EQ(send(self->fd, test_str_second, len, 0), len); + len = sizeof(buf); memset(buf, 0, len); EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1); @@ -551,6 +573,7 @@ TEST_F(tls, recv_peek_multiple_records) EXPECT_EQ(memcmp(test_str, buf, len), 0); } + TEST_F(tls, pollin) { char const *test_str = "test_poll"; @@ -564,7 +587,7 @@ TEST_F(tls, pollin) EXPECT_EQ(poll(&fd, 1, 20), 1); EXPECT_EQ(fd.revents & POLLIN, 1); - EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len); + EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_WAITALL), send_len); /* Test timing out */ EXPECT_EQ(poll(&fd, 1, 20), 0); } @@ -582,7 +605,7 @@ TEST_F(tls, poll_wait) /* Set timeout to inf. secs */ EXPECT_EQ(poll(&fd, 1, -1), 1); EXPECT_EQ(fd.revents & POLLIN, 1); - EXPECT_EQ(recv(self->cfd, recv_mem, send_len, 0), send_len); + EXPECT_EQ(recv(self->cfd, recv_mem, send_len, MSG_WAITALL), send_len); } TEST_F(tls, blocking) @@ -728,7 +751,7 @@ TEST_F(tls, control_msg) EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); vec.iov_base = buf; - EXPECT_EQ(recvmsg(self->cfd, &msg, 0), send_len); + EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL), send_len); cmsg = CMSG_FIRSTHDR(&msg); EXPECT_NE(cmsg, NULL); EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); diff --git a/tools/testing/selftests/tc-testing/README b/tools/testing/selftests/tc-testing/README index 49a6f8c3fdae..f9281e8aa313 100644 --- a/tools/testing/selftests/tc-testing/README +++ b/tools/testing/selftests/tc-testing/README @@ -232,6 +232,8 @@ directory: and the other is a test whether the command leaked memory or not. (This one is a preliminary version, it may not work quite right yet, but the overall template is there and it should only need tweaks.) + - buildebpfPlugin.py: + builds all programs in $EBPFDIR. ACKNOWLEDGEMENTS diff --git a/tools/testing/selftests/tc-testing/bpf/Makefile b/tools/testing/selftests/tc-testing/bpf/Makefile new file mode 100644 index 000000000000..dc92eb271d9a --- /dev/null +++ b/tools/testing/selftests/tc-testing/bpf/Makefile @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: GPL-2.0 + +APIDIR := ../../../../include/uapi +TEST_GEN_FILES = action.o + +top_srcdir = ../../../../.. +include ../../lib.mk + +CLANG ?= clang +LLC ?= llc +PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1) + +ifeq ($(PROBE),) + CPU ?= probe +else + CPU ?= generic +endif + +CLANG_SYS_INCLUDES := $(shell $(CLANG) -v -E - </dev/null 2>&1 \ + | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') + +CLANG_FLAGS = -I. -I$(APIDIR) \ + $(CLANG_SYS_INCLUDES) \ + -Wno-compare-distinct-pointer-types + +$(OUTPUT)/%.o: %.c + $(CLANG) $(CLANG_FLAGS) \ + -O2 -target bpf -emit-llvm -c $< -o - | \ + $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@ diff --git a/tools/testing/selftests/tc-testing/bpf/action.c b/tools/testing/selftests/tc-testing/bpf/action.c new file mode 100644 index 000000000000..c32b99b80e19 --- /dev/null +++ b/tools/testing/selftests/tc-testing/bpf/action.c @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2018 Davide Caratti, Red Hat inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ + +#include <linux/bpf.h> +#include <linux/pkt_cls.h> + +__attribute__((section("action-ok"),used)) int action_ok(struct __sk_buff *s) +{ + return TC_ACT_OK; +} + +__attribute__((section("action-ko"),used)) int action_ko(struct __sk_buff *s) +{ + s->data = 0x0; + return TC_ACT_OK; +} + +char _license[] __attribute__((section("license"),used)) = "GPL"; diff --git a/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py new file mode 100644 index 000000000000..9f0ba10c44b4 --- /dev/null +++ b/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py @@ -0,0 +1,66 @@ +''' +build ebpf program +''' + +import os +import signal +from string import Template +import subprocess +import time +from TdcPlugin import TdcPlugin +from tdc_config import * + +class SubPlugin(TdcPlugin): + def __init__(self): + self.sub_class = 'buildebpf/SubPlugin' + self.tap = '' + super().__init__() + + def pre_suite(self, testcount, testidlist): + super().pre_suite(testcount, testidlist) + + if self.args.buildebpf: + self._ebpf_makeall() + + def post_suite(self, index): + super().post_suite(index) + + self._ebpf_makeclean() + + def add_args(self, parser): + super().add_args(parser) + + self.argparser_group = self.argparser.add_argument_group( + 'buildebpf', + 'options for buildebpfPlugin') + self.argparser_group.add_argument( + '-B', '--buildebpf', action='store_true', + help='build eBPF programs') + + return self.argparser + + def _ebpf_makeall(self): + if self.args.buildebpf: + self._make('all') + + def _ebpf_makeclean(self): + if self.args.buildebpf: + self._make('clean') + + def _make(self, target): + command = 'make -C {} {}'.format(self.args.NAMES['EBPFDIR'], target) + proc = subprocess.Popen(command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=ENVIR) + (rawout, serr) = proc.communicate() + + if proc.returncode != 0 and len(serr) > 0: + foutput = serr.decode("utf-8") + else: + foutput = rawout.decode("utf-8") + + proc.stdout.close() + proc.stderr.close() + return proc, foutput diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json index 6f289a49e5ec..5970cee6d05f 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json @@ -55,7 +55,6 @@ "bpf" ], "setup": [ - "printf '#include <linux/bpf.h>\nchar l[] __attribute__((section(\"license\"),used))=\"GPL\"; __attribute__((section(\"action\"),used)) int m(struct __sk_buff *s) { return 2; }' | clang -O2 -x c -c - -target bpf -o _b.o", [ "$TC action flush action bpf", 0, @@ -63,14 +62,13 @@ 255 ] ], - "cmdUnderTest": "$TC action add action bpf object-file _b.o index 667", + "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ok index 667", "expExitCode": "0", "verifyCmd": "$TC action get action bpf index 667", - "matchPattern": "action order [0-9]*: bpf _b.o:\\[action\\] id [0-9]* tag 3b185187f1855c4c( jited)? default-action pipe.*index 667 ref", + "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9]* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref", "matchCount": "1", "teardown": [ - "$TC action flush action bpf", - "rm -f _b.o" + "$TC action flush action bpf" ] }, { @@ -81,7 +79,6 @@ "bpf" ], "setup": [ - "printf '#include <linux/bpf.h>\nchar l[] __attribute__((section(\"license\"),used))=\"GPL\"; __attribute__((section(\"action\"),used)) int m(struct __sk_buff *s) { s->data = 0x0; return 2; }' | clang -O2 -x c -c - -target bpf -o _c.o", [ "$TC action flush action bpf", 0, @@ -89,10 +86,10 @@ 255 ] ], - "cmdUnderTest": "$TC action add action bpf object-file _c.o index 667", + "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ko index 667", "expExitCode": "255", "verifyCmd": "$TC action get action bpf index 667", - "matchPattern": "action order [0-9]*: bpf _c.o:\\[action\\] id [0-9].*index 667 ref", + "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ko\\] id [0-9].*index 667 ref", "matchCount": "0", "teardown": [ [ @@ -100,8 +97,7 @@ 0, 1, 255 - ], - "rm -f _c.o" + ] ] }, { diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py index a023d0d62b25..d651bc1501bd 100644 --- a/tools/testing/selftests/tc-testing/tdc_config.py +++ b/tools/testing/selftests/tc-testing/tdc_config.py @@ -16,7 +16,9 @@ NAMES = { 'DEV2': '', 'BATCH_FILE': './batch.txt', # Name of the namespace to use - 'NS': 'tcut' + 'NS': 'tcut', + # Directory containing eBPF test programs + 'EBPFDIR': './bpf' } |