diff options
512 files changed, 12563 insertions, 4465 deletions
diff --git a/Documentation/ABI/testing/debugfs-hisi-migration b/Documentation/ABI/testing/debugfs-hisi-migration new file mode 100644 index 000000000000..2c01b2d387dd --- /dev/null +++ b/Documentation/ABI/testing/debugfs-hisi-migration @@ -0,0 +1,25 @@ +What: /sys/kernel/debug/vfio/<device>/migration/hisi_acc/dev_data +Date: Jan 2025 +KernelVersion: 6.13 +Contact: Longfang Liu <liulongfang@huawei.com> +Description: Read the configuration data and some status data + required for device live migration. These data include device + status data, queue configuration data, some task configuration + data and device attribute data. The output format of the data + is defined by the live migration driver. + +What: /sys/kernel/debug/vfio/<device>/migration/hisi_acc/migf_data +Date: Jan 2025 +KernelVersion: 6.13 +Contact: Longfang Liu <liulongfang@huawei.com> +Description: Read the data from the last completed live migration. + This data includes the same device status data as in "dev_data". + The migf_data is the dev_data that is migrated. + +What: /sys/kernel/debug/vfio/<device>/migration/hisi_acc/cmd_state +Date: Jan 2025 +KernelVersion: 6.13 +Contact: Longfang Liu <liulongfang@huawei.com> +Description: Used to obtain the device command sending and receiving + channel status. Returns failure or success logs based on the + results. diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-vpa-pmu b/Documentation/ABI/testing/sysfs-bus-event_source-devices-vpa-pmu index 8285263ff78d..a116aee9709a 100644 --- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-vpa-pmu +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-vpa-pmu @@ -8,17 +8,18 @@ Description: Read-only. Attribute group to describe the magic bits Each attribute under this group defines a bit range of the perf_event_attr.config. Supported attribute are listed below:: - event = "config:0-31" - event ID - For example:: + event = "config:0-31" - event ID - l1_to_l2_lat = "event=0x1" + For example:: + + l1_to_l2_lat = "event=0x1" What: /sys/bus/event_source/devices/vpa_pmu/events Date: November 2024 Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org> -Description: Read-only. Attribute group to describe performance monitoring +Description: Read-only. Attribute group to describe performance monitoring events for the Virtual Processor Area events. Each attribute - in this group describes a single performance monitoring event - supported by vpa_pmu. The name of the file is the name of - the event (See ABI/testing/sysfs-bus-event_source-devices-events). + in this group describes a single performance monitoring event + supported by vpa_pmu. The name of the file is the name of + the event (See ABI/testing/sysfs-bus-event_source-devices-events). diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst index 6247bbc9547c..59931f21c974 100644 --- a/Documentation/admin-guide/kernel-parameters.rst +++ b/Documentation/admin-guide/kernel-parameters.rst @@ -174,6 +174,7 @@ is applicable:: SCSI Appropriate SCSI support is enabled. A lot of drivers have their options described inside the Documentation/scsi/ sub-directory. + SDW SoundWire support is enabled. SECURITY Different security models are enabled. SELINUX SELinux support is enabled. SERIAL Serial support is enabled. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 14ebd1f4b45e..dc663c0ca670 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -6075,6 +6075,10 @@ non-zero "wait" parameter. See weight_single and weight_many. + sdw_mclk_divider=[SDW] + Specify the MCLK divider for Intel SoundWire buses in + case the BIOS does not provide the clock rate properly. + skew_tick= [KNL,EARLY] Offset the periodic timer tick per cpu to mitigate xtime_lock contention on larger systems, and/or RCU lock contention on all systems with CONFIG_MAXSMP set. diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst index 85b709257918..955fbcd19ce9 100644 --- a/Documentation/arch/riscv/hwprobe.rst +++ b/Documentation/arch/riscv/hwprobe.rst @@ -239,6 +239,9 @@ The following keys are defined: ratified in commit 98918c844281 ("Merge pull request #1217 from riscv/zawrs") of riscv-isa-manual. + * :c:macro:`RISCV_HWPROBE_EXT_SUPM`: The Supm extension is supported as + defined in version 1.0 of the RISC-V Pointer Masking extensions. + * :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: Deprecated. Returns similar values to :c:macro:`RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF`, but the key was mistakenly classified as a bitmask rather than a value. @@ -274,3 +277,19 @@ The following keys are defined: represent the highest userspace virtual address usable. * :c:macro:`RISCV_HWPROBE_KEY_TIME_CSR_FREQ`: Frequency (in Hz) of `time CSR`. + +* :c:macro:`RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF`: An enum value describing the + performance of misaligned vector accesses on the selected set of processors. + + * :c:macro:`RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN`: The performance of misaligned + vector accesses is unknown. + + * :c:macro:`RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW`: 32-bit misaligned accesses using vector + registers are slower than the equivalent quantity of byte accesses via vector registers. + Misaligned accesses may be supported directly in hardware, or trapped and emulated by software. + + * :c:macro:`RISCV_HWPROBE_MISALIGNED_VECTOR_FAST`: 32-bit misaligned accesses using vector + registers are faster than the equivalent quantity of byte accesses via vector registers. + + * :c:macro:`RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED`: Misaligned vector accesses are + not supported at all and will generate a misaligned address fault. diff --git a/Documentation/arch/riscv/uabi.rst b/Documentation/arch/riscv/uabi.rst index 2b420bab0527..243e40062e34 100644 --- a/Documentation/arch/riscv/uabi.rst +++ b/Documentation/arch/riscv/uabi.rst @@ -68,3 +68,19 @@ Misaligned accesses Misaligned scalar accesses are supported in userspace, but they may perform poorly. Misaligned vector accesses are only supported if the Zicclsm extension is supported. + +Pointer masking +--------------- + +Support for pointer masking in userspace (the Supm extension) is provided via +the ``PR_SET_TAGGED_ADDR_CTRL`` and ``PR_GET_TAGGED_ADDR_CTRL`` ``prctl()`` +operations. Pointer masking is disabled by default. To enable it, userspace +must call ``PR_SET_TAGGED_ADDR_CTRL`` with the ``PR_PMLEN`` field set to the +number of mask/tag bits needed by the application. ``PR_PMLEN`` is interpreted +as a lower bound; if the kernel is unable to satisfy the request, the +``PR_SET_TAGGED_ADDR_CTRL`` operation will fail. The actual number of tag bits +is returned in ``PR_PMLEN`` by the ``PR_GET_TAGGED_ADDR_CTRL`` operation. + +Additionally, when pointer masking is enabled (``PR_PMLEN`` is greater than 0), +a tagged address ABI is supported, with the same interface and behavior as +documented for AArch64 (Documentation/arch/arm64/tagged-address-abi.rst). diff --git a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml index 4df4e61895d2..4ad56a409b9c 100644 --- a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml +++ b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml @@ -26,6 +26,7 @@ properties: - enum: - qcom,qcm2290-gpi-dma - qcom,qdu1000-gpi-dma + - qcom,sar2130p-gpi-dma - qcom,sc7280-gpi-dma - qcom,sdx75-gpi-dma - qcom,sm6115-gpi-dma diff --git a/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml index ca24cf48769f..b356251de5a8 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml +++ b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml @@ -4,18 +4,16 @@ $id: http://devicetree.org/schemas/dma/renesas,rz-dmac.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Renesas RZ/{G2L,G2UL,V2L} DMA Controller +title: Renesas RZ DMA Controller maintainers: - Biju Das <biju.das.jz@bp.renesas.com> -allOf: - - $ref: dma-controller.yaml# - properties: compatible: items: - enum: + - renesas,r7s72100-dmac # RZ/A1H - renesas,r9a07g043-dmac # RZ/G2UL and RZ/Five - renesas,r9a07g044-dmac # RZ/G2{L,LC} - renesas,r9a07g054-dmac # RZ/V2L @@ -93,13 +91,26 @@ required: - reg - interrupts - interrupt-names - - clocks - - clock-names - '#dma-cells' - dma-channels - - power-domains - - resets - - reset-names + +allOf: + - $ref: dma-controller.yaml# + + - if: + not: + properties: + compatible: + contains: + enum: + - renesas,r7s72100-dmac + then: + required: + - clocks + - clock-names + - power-domains + - resets + - reset-names additionalProperties: false diff --git a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml index 3b22183a1a37..609e38901434 100644 --- a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml +++ b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml @@ -27,11 +27,16 @@ allOf: properties: compatible: - items: - - enum: - - microchip,mpfs-pdma - - sifive,fu540-c000-pdma - - const: sifive,pdma0 + oneOf: + - items: + - const: microchip,pic64gx-pdma + - const: microchip,mpfs-pdma + - const: sifive,pdma0 + - items: + - enum: + - microchip,mpfs-pdma + - sifive,fu540-c000-pdma + - const: sifive,pdma0 description: Should be "sifive,<chip>-pdma" and "sifive,pdma<version>". Supported compatible strings are - diff --git a/Documentation/devicetree/bindings/dma/stm32/st,stm32-dma3.yaml b/Documentation/devicetree/bindings/dma/stm32/st,stm32-dma3.yaml index 7fdc44b2e646..36f9fe860eb9 100644 --- a/Documentation/devicetree/bindings/dma/stm32/st,stm32-dma3.yaml +++ b/Documentation/devicetree/bindings/dma/stm32/st,stm32-dma3.yaml @@ -96,6 +96,12 @@ properties: including the update of the LLI if any 0x3: at channel level, the transfer complete event is generated at the end of the last LLI + -bit 16: Prevent packing/unpacking mode + 0x0: pack/unpack enabled when source data width/burst != destination data width/burst + 0x1: memory data width/burst forced to peripheral data width/burst to prevent pack/unpack + -bit 17: Prevent additional transfers due to linked-list refactoring + 0x0: don't prevent additional transfers for optimal performance + 0x1: prevent additional transfer to accommodate user constraints such as single transfer required: - compatible diff --git a/Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml b/Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml index fd0c8d5c5f3e..624d1f3f1382 100644 --- a/Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml +++ b/Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml @@ -45,7 +45,25 @@ patternProperties: clock-latency-ns: true opp-hz: true opp-microvolt: true - opp-supported-hw: true + opp-supported-hw: + items: + items: + - description: + The revision of the SoC the OPP is supported by. + This can be easily obtained from the datasheet of the + part being ordered/used. For example, it will be 0x01 for SR1.0 + + - description: + The eFuse bits that indicate the particular OPP is available. + The device datasheet has a table talking about Device Speed Grades. + This table is to be sorted with only the unique elements of the + MAXIMUM OPERATING FREQUENCY starting from the first row which + tells the lowest OPP, to the highest. The corresponding bits + need to be set based on N elements of speed grade the device supports. + So, if there are 3 possible unique MAXIMUM OPERATING FREQUENCY + in the table, then BIT(0) | (1) | (2) will be set, which means + the value shall be 0x7. + opp-suspend: true turbo-mode: true diff --git a/Documentation/devicetree/bindings/phy/bcm-ns-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/bcm-ns-usb2-phy.yaml index 426101530a21..d72c02ab55ae 100644 --- a/Documentation/devicetree/bindings/phy/bcm-ns-usb2-phy.yaml +++ b/Documentation/devicetree/bindings/phy/bcm-ns-usb2-phy.yaml @@ -18,16 +18,8 @@ properties: const: brcm,ns-usb2-phy reg: - anyOf: - - maxItems: 1 - description: PHY control register - - maxItems: 1 - description: iomem address range of DMU (Device Management Unit) - deprecated: true - - reg-names: - items: - - const: dmu + maxItems: 1 + description: PHY control register brcm,syscon-clkset: description: phandle to syscon for clkset register @@ -50,12 +42,7 @@ required: - clocks - clock-names - "#phy-cells" - -oneOf: - - required: - - brcm,syscon-clkset - - required: - - reg-names + - brcm,syscon-clkset additionalProperties: false diff --git a/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml b/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml index ce665a2779b7..d01b7d187040 100644 --- a/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml +++ b/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml @@ -32,6 +32,7 @@ properties: - enum: - fsl,imx8dxl-usbphy - fsl,imx8qm-usbphy + - fsl,imx8qxp-usbphy - fsl,imx8ulp-usbphy - const: fsl,imx7ulp-usbphy diff --git a/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml index 423b7c4e62f2..6be3aa4557e5 100644 --- a/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml +++ b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml @@ -125,6 +125,16 @@ properties: $ref: /schemas/types.yaml#/definitions/uint32 default: 28 + power-domains: + description: + The TPHY of MediaTek should exist within a power domain. The + developer should be aware that the hardware design of MediaTek TPHY + does not require the addition of MTCMOS. If the power to the TPHY + is turned off, it will impact other functions. From the current + perspective of USB hardware design, even if MTCMOS is added to the + TPHY, it should remain always on. + maxItems: 1 + # Required child node: patternProperties: "^(usb|pcie|sata)-phy@[0-9a-f]+$": diff --git a/Documentation/devicetree/bindings/phy/microchip,sparx5-serdes.yaml b/Documentation/devicetree/bindings/phy/microchip,sparx5-serdes.yaml index bdbdb3bbddbe..fa0b02916dac 100644 --- a/Documentation/devicetree/bindings/phy/microchip,sparx5-serdes.yaml +++ b/Documentation/devicetree/bindings/phy/microchip,sparx5-serdes.yaml @@ -8,6 +8,7 @@ title: Microchip Sparx5 Serdes controller maintainers: - Steen Hegelund <steen.hegelund@microchip.com> + - Daniel Machon <daniel.machon@microchip.com> description: | The Sparx5 SERDES interfaces share the same basic functionality, but @@ -62,12 +63,26 @@ description: | * 10.3125 Gbps (10GBASE-R/10GBASE-KR/USXGMII) * 25.78125 Gbps (25GBASE-KR/25GBASE-CR/25GBASE-SR/25GBASE-LR/25GBASE-ER) + lan969x has ten SERDES10G interfaces that share the same features, operating + modes and data rates as the equivalent Sparx5 SERDES10G interfaces. + properties: $nodename: pattern: "^serdes@[0-9a-f]+$" compatible: - const: microchip,sparx5-serdes + oneOf: + - enum: + - microchip,sparx5-serdes + - microchip,lan9691-serdes + - items: + - enum: + - microchip,lan9698-serdes + - microchip,lan9696-serdes + - microchip,lan9694-serdes + - microchip,lan9693-serdes + - microchip,lan9692-serdes + - const: microchip,lan9691-serdes reg: minItems: 1 diff --git a/Documentation/devicetree/bindings/phy/nxp,ptn3222.yaml b/Documentation/devicetree/bindings/phy/nxp,ptn3222.yaml new file mode 100644 index 000000000000..acec5bb2391d --- /dev/null +++ b/Documentation/devicetree/bindings/phy/nxp,ptn3222.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/nxp,ptn3222.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NXP PTN3222 1-port eUSB2 to USB2 redriver + +maintainers: + - Dmitry Baryshkov <dmitry.baryshkov@linaro.org> + +properties: + compatible: + enum: + - nxp,ptn3222 + + reg: + maxItems: 1 + + "#phy-cells": + const: 0 + + vdd1v8-supply: + description: power supply (1.8V) + + vdd3v3-supply: + description: power supply (3.3V) + + reset-gpios: true + +required: + - compatible + - reg + - '#phy-cells' + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + redriver@4f { + compatible = "nxp,ptn3222"; + reg = <0x4f>; + #phy-cells = <0>; + vdd3v3-supply = <&vreg_3p3>; + vdd1v8-supply = <&vreg_1p8>; + reset-gpios = <&gpio_reset GPIO_ACTIVE_LOW>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml b/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml index 37f028f7a095..137ac5703853 100644 --- a/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml +++ b/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml @@ -96,7 +96,7 @@ patternProperties: Specifies the type of PHY for which the group of PHY lanes is used. Refer include/dt-bindings/phy/phy.h. Constants from the header should be used. $ref: /schemas/types.yaml#/definitions/uint32 - enum: [2, 4] + enum: [2, 4, 8, 9] cdns,num-lanes: description: diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-hdmi.txt b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-hdmi.txt deleted file mode 100644 index 710cccd5ee56..000000000000 --- a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-hdmi.txt +++ /dev/null @@ -1,43 +0,0 @@ -ROCKCHIP HDMI PHY WITH INNO IP BLOCK - -Required properties: - - compatible : should be one of the listed compatibles: - * "rockchip,rk3228-hdmi-phy", - * "rockchip,rk3328-hdmi-phy"; - - reg : Address and length of the hdmi phy control register set - - clocks : phandle + clock specifier for the phy clocks - - clock-names : string, clock name, must contain "sysclk" for system - control and register configuration, "refoclk" for crystal- - oscillator reference PLL clock input and "refpclk" for pclk- - based refeference PLL clock input. - - #clock-cells: should be 0. - - clock-output-names : shall be the name for the output clock. - - interrupts : phandle + interrupt specified for the hdmiphy interrupt - - #phy-cells : must be 0. See ./phy-bindings.txt for details. - -Optional properties for rk3328-hdmi-phy: - - nvmem-cells = phandle + nvmem specifier for the cpu-version efuse - - nvmem-cell-names : "cpu-version" to read the chip version, required - for adjustment to some frequency settings - -Example: - hdmi_phy: hdmi-phy@12030000 { - compatible = "rockchip,rk3228-hdmi-phy"; - reg = <0x12030000 0x10000>; - #phy-cells = <0>; - clocks = <&cru PCLK_HDMI_PHY>, <&xin24m>, <&cru DCLK_HDMIPHY>; - clock-names = "sysclk", "refoclk", "refpclk"; - #clock-cells = <0>; - clock-output-names = "hdmi_phy"; - status = "disabled"; - }; - -Then the PHY can be used in other nodes such as: - - hdmi: hdmi@200a0000 { - compatible = "rockchip,rk3228-dw-hdmi"; - ... - phys = <&hdmi_phy>; - phy-names = "hdmi"; - ... - }; diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-usbdp.yaml b/Documentation/devicetree/bindings/phy/phy-rockchip-usbdp.yaml index 1f1f8863b80d..b42f1272903d 100644 --- a/Documentation/devicetree/bindings/phy/phy-rockchip-usbdp.yaml +++ b/Documentation/devicetree/bindings/phy/phy-rockchip-usbdp.yaml @@ -13,6 +13,7 @@ maintainers: properties: compatible: enum: + - rockchip,rk3576-usbdp-phy - rockchip,rk3588-usbdp-phy reg: diff --git a/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml index 4e15d90d08b0..293fb6a9b1c3 100644 --- a/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml @@ -17,6 +17,7 @@ description: properties: compatible: enum: + - qcom,sa8775p-edp-phy - qcom,sc7280-edp-phy - qcom,sc8180x-edp-phy - qcom,sc8280xp-dp-phy diff --git a/Documentation/devicetree/bindings/phy/qcom,sa8775p-dwmac-sgmii-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sa8775p-dwmac-sgmii-phy.yaml index b9107759b2a5..90fc8c039219 100644 --- a/Documentation/devicetree/bindings/phy/qcom,sa8775p-dwmac-sgmii-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,sa8775p-dwmac-sgmii-phy.yaml @@ -15,7 +15,12 @@ description: properties: compatible: - const: qcom,sa8775p-dwmac-sgmii-phy + oneOf: + - items: + - enum: + - qcom,qcs8300-dwmac-sgmii-phy + - const: qcom,sa8775p-dwmac-sgmii-phy + - const: qcom,sa8775p-dwmac-sgmii-phy reg: items: diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml index 380a9222a51d..13fdf5f1beba 100644 --- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml @@ -41,6 +41,7 @@ properties: - qcom,x1e80100-qmp-gen3x2-pcie-phy - qcom,x1e80100-qmp-gen4x2-pcie-phy - qcom,x1e80100-qmp-gen4x4-pcie-phy + - qcom,x1e80100-qmp-gen4x8-pcie-phy reg: minItems: 1 @@ -172,6 +173,7 @@ allOf: - qcom,x1e80100-qmp-gen3x2-pcie-phy - qcom,x1e80100-qmp-gen4x2-pcie-phy - qcom,x1e80100-qmp-gen4x4-pcie-phy + - qcom,x1e80100-qmp-gen4x8-pcie-phy then: properties: clocks: @@ -202,6 +204,7 @@ allOf: - qcom,sm8650-qmp-gen4x2-pcie-phy - qcom,x1e80100-qmp-gen4x2-pcie-phy - qcom,x1e80100-qmp-gen4x4-pcie-phy + - qcom,x1e80100-qmp-gen4x8-pcie-phy then: properties: resets: diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml index f9cfbd0b2de6..72bed2933b03 100644 --- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml @@ -15,26 +15,35 @@ description: properties: compatible: - enum: - - qcom,msm8996-qmp-ufs-phy - - qcom,msm8998-qmp-ufs-phy - - qcom,sa8775p-qmp-ufs-phy - - qcom,sc7180-qmp-ufs-phy - - qcom,sc7280-qmp-ufs-phy - - qcom,sc8180x-qmp-ufs-phy - - qcom,sc8280xp-qmp-ufs-phy - - qcom,sdm845-qmp-ufs-phy - - qcom,sm6115-qmp-ufs-phy - - qcom,sm6125-qmp-ufs-phy - - qcom,sm6350-qmp-ufs-phy - - qcom,sm7150-qmp-ufs-phy - - qcom,sm8150-qmp-ufs-phy - - qcom,sm8250-qmp-ufs-phy - - qcom,sm8350-qmp-ufs-phy - - qcom,sm8450-qmp-ufs-phy - - qcom,sm8475-qmp-ufs-phy - - qcom,sm8550-qmp-ufs-phy - - qcom,sm8650-qmp-ufs-phy + oneOf: + - items: + - enum: + - qcom,qcs615-qmp-ufs-phy + - const: qcom,sm6115-qmp-ufs-phy + - items: + - enum: + - qcom,qcs8300-qmp-ufs-phy + - const: qcom,sa8775p-qmp-ufs-phy + - enum: + - qcom,msm8996-qmp-ufs-phy + - qcom,msm8998-qmp-ufs-phy + - qcom,sa8775p-qmp-ufs-phy + - qcom,sc7180-qmp-ufs-phy + - qcom,sc7280-qmp-ufs-phy + - qcom,sc8180x-qmp-ufs-phy + - qcom,sc8280xp-qmp-ufs-phy + - qcom,sdm845-qmp-ufs-phy + - qcom,sm6115-qmp-ufs-phy + - qcom,sm6125-qmp-ufs-phy + - qcom,sm6350-qmp-ufs-phy + - qcom,sm7150-qmp-ufs-phy + - qcom,sm8150-qmp-ufs-phy + - qcom,sm8250-qmp-ufs-phy + - qcom,sm8350-qmp-ufs-phy + - qcom,sm8450-qmp-ufs-phy + - qcom,sm8475-qmp-ufs-phy + - qcom,sm8550-qmp-ufs-phy + - qcom,sm8650-qmp-ufs-phy reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml index 0e0b6cae07bc..baf5134ea3d8 100644 --- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml @@ -20,6 +20,7 @@ properties: - qcom,ipq8074-qmp-usb3-phy - qcom,ipq9574-qmp-usb3-phy - qcom,msm8996-qmp-usb3-phy + - qcom,qcs8300-qmp-usb3-uni-phy - qcom,qdu1000-qmp-usb3-uni-phy - qcom,sa8775p-qmp-usb3-uni-phy - qcom,sc8180x-qmp-usb3-uni-phy @@ -111,6 +112,7 @@ allOf: compatible: contains: enum: + - qcom,qcs8300-qmp-usb3-uni-phy - qcom,qdu1000-qmp-usb3-uni-phy - qcom,sa8775p-qmp-usb3-uni-phy - qcom,sc8180x-qmp-usb3-uni-phy diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-phy.yaml index b82f7f5731ed..142b3c8839d6 100644 --- a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-phy.yaml @@ -17,6 +17,7 @@ properties: oneOf: - items: - enum: + - qcom,sar2130p-snps-eusb2-phy - qcom,sdx75-snps-eusb2-phy - qcom,sm8650-snps-eusb2-phy - qcom,x1e80100-snps-eusb2-phy diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml index 519c2b403f66..661759b25064 100644 --- a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml @@ -22,6 +22,7 @@ properties: - const: qcom,usb-snps-hs-5nm-phy - items: - enum: + - qcom,qcs8300-usb-hs-phy - qcom,qdu1000-usb-hs-phy - qcom,sc7280-usb-hs-phy - qcom,sc8180x-usb-hs-phy diff --git a/Documentation/devicetree/bindings/phy/rockchip,inno-usb2phy.yaml b/Documentation/devicetree/bindings/phy/rockchip,inno-usb2phy.yaml index 5254413137c6..6a7ef556414c 100644 --- a/Documentation/devicetree/bindings/phy/rockchip,inno-usb2phy.yaml +++ b/Documentation/devicetree/bindings/phy/rockchip,inno-usb2phy.yaml @@ -20,6 +20,7 @@ properties: - rockchip,rk3366-usb2phy - rockchip,rk3399-usb2phy - rockchip,rk3568-usb2phy + - rockchip,rk3576-usb2phy - rockchip,rk3588-usb2phy - rockchip,rv1108-usb2phy @@ -34,10 +35,15 @@ properties: const: 0 clocks: - maxItems: 1 + minItems: 1 + maxItems: 3 clock-names: - const: phyclk + minItems: 1 + items: + - const: phyclk + - const: aclk + - const: aclk_slv assigned-clocks: description: @@ -172,6 +178,41 @@ allOf: - interrupts - interrupt-names + - if: + properties: + compatible: + contains: + enum: + - rockchip,px30-usb2phy + - rockchip,rk3128-usb2phy + - rockchip,rk3228-usb2phy + - rockchip,rk3308-usb2phy + - rockchip,rk3328-usb2phy + - rockchip,rk3366-usb2phy + - rockchip,rk3399-usb2phy + - rockchip,rk3568-usb2phy + - rockchip,rk3588-usb2phy + - rockchip,rv1108-usb2phy + then: + properties: + clocks: + maxItems: 1 + clock-names: + maxItems: 1 + + - if: + properties: + compatible: + contains: + enum: + - rockchip,rk3576-usb2phy + then: + properties: + clocks: + minItems: 3 + clock-names: + minItems: 3 + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/phy/rockchip,rk3228-hdmi-phy.yaml b/Documentation/devicetree/bindings/phy/rockchip,rk3228-hdmi-phy.yaml new file mode 100644 index 000000000000..ac15bf857ef9 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/rockchip,rk3228-hdmi-phy.yaml @@ -0,0 +1,97 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/rockchip,rk3228-hdmi-phy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Rockchip HDMI PHY with Innosilicon IP block + +maintainers: + - Heiko Stuebner <heiko@sntech.de> + +properties: + compatible: + enum: + - rockchip,rk3228-hdmi-phy + - rockchip,rk3328-hdmi-phy + + reg: + maxItems: 1 + + clocks: + maxItems: 3 + + clock-names: + items: + - const: sysclk + - const: refoclk + - const: refpclk + + clock-output-names: + description: + The hdmiphy output clock name, that gets fed back to the CRU. + + "#clock-cells": + const: 0 + + interrupts: + maxItems: 1 + + nvmem-cells: + maxItems: 1 + description: A phandle + nvmem specifier for the cpu-version efuse + for adjustment to some frequency settings, depending on cpu-version + + nvmem-cell-names: + items: + - const: cpu-version + + '#phy-cells': + const: 0 + +required: + - compatible + - reg + - clocks + - clock-names + - clock-output-names + - '#clock-cells' + - '#phy-cells' + +allOf: + - if: + properties: + compatible: + contains: + const: rockchip,rk3228-hdmi-phy + + then: + properties: + interrupts: false + + - if: + properties: + compatible: + contains: + const: rockchip,rk3328-hdmi-phy + + then: + required: + - interrupts + +additionalProperties: false + +examples: + - | + + #include <dt-bindings/clock/rk3228-cru.h> + hdmi_phy: phy@12030000 { + compatible = "rockchip,rk3228-hdmi-phy"; + reg = <0x12030000 0x10000>; + #phy-cells = <0>; + clocks = <&cru PCLK_HDMI_PHY>, <&xin24m>, <&cru DCLK_HDMI_PHY>; + clock-names = "sysclk", "refoclk", "refpclk"; + #clock-cells = <0>; + + clock-output-names = "hdmi_phy"; + }; diff --git a/Documentation/devicetree/bindings/phy/st,stm32mp25-combophy.yaml b/Documentation/devicetree/bindings/phy/st,stm32mp25-combophy.yaml new file mode 100644 index 000000000000..a2e82c0bb56b --- /dev/null +++ b/Documentation/devicetree/bindings/phy/st,stm32mp25-combophy.yaml @@ -0,0 +1,119 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/st,stm32mp25-combophy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: STMicroelectronics STM32MP25 USB3/PCIe COMBOPHY + +maintainers: + - Christian Bruel <christian.bruel@foss.st.com> + +description: + Single lane PHY shared (exclusive) between the USB3 and PCIe controllers. + Supports 5Gbit/s for USB3 and PCIe gen2 or 2.5Gbit/s for PCIe gen1. + +properties: + compatible: + const: st,stm32mp25-combophy + + reg: + maxItems: 1 + + "#phy-cells": + const: 1 + + clocks: + minItems: 2 + items: + - description: apb Bus clock mandatory to access registers. + - description: ker Internal RCC reference clock for USB3 or PCIe + - description: pad Optional on board clock input for PCIe only. Typically an + external 100Mhz oscillator wired on dedicated CLKIN pad. Used as reference + clock input instead of the ker + + clock-names: + minItems: 2 + items: + - const: apb + - const: ker + - const: pad + + resets: + maxItems: 1 + + reset-names: + const: phy + + power-domains: + maxItems: 1 + + wakeup-source: true + + interrupts: + maxItems: 1 + description: interrupt used for wakeup + + access-controllers: + maxItems: 1 + description: Phandle to the rifsc device to check access right. + + st,ssc-on: + $ref: /schemas/types.yaml#/definitions/flag + description: + A property whose presence indicates that the Spread Spectrum Clocking is active. + + st,rx-equalizer: + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 0 + maximum: 7 + default: 2 + description: + A 3 bit value to tune the RX fixed equalizer setting for optimal eye compliance + + st,output-micro-ohms: + minimum: 3999000 + maximum: 6090000 + default: 4968000 + description: + A value property to tune the Single Ended Output Impedance, simulations results + at 25C for a VDDP=0.8V. The hardware accepts discrete values in this range. + + st,output-vswing-microvolt: + minimum: 442000 + maximum: 803000 + default: 803000 + description: + A value property in microvolt to tune the Single Ended Output Voltage Swing to change the + Vlo, Vhi for a VDDP = 0.8V. The hardware accepts discrete values in this range. + +required: + - compatible + - reg + - "#phy-cells" + - clocks + - clock-names + - resets + - reset-names + +additionalProperties: false + +examples: + - | + #include <dt-bindings/clock/st,stm32mp25-rcc.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/reset/st,stm32mp25-rcc.h> + + phy@480c0000 { + compatible = "st,stm32mp25-combophy"; + reg = <0x480c0000 0x1000>; + #phy-cells = <1>; + clocks = <&rcc CK_BUS_USB3PCIEPHY>, <&rcc CK_KER_USB3PCIEPHY>; + clock-names = "apb", "ker"; + resets = <&rcc USB3PCIEPHY_R>; + reset-names = "phy"; + access-controllers = <&rifsc 67>; + power-domains = <&CLUSTER_PD>; + wakeup-source; + interrupts-extended = <&exti1 45 IRQ_TYPE_EDGE_FALLING>; + }; diff --git a/Documentation/devicetree/bindings/phy/ti,tcan104x-can.yaml b/Documentation/devicetree/bindings/phy/ti,tcan104x-can.yaml index 79dad3e89aa6..4a8c3829d85d 100644 --- a/Documentation/devicetree/bindings/phy/ti,tcan104x-can.yaml +++ b/Documentation/devicetree/bindings/phy/ti,tcan104x-can.yaml @@ -14,10 +14,15 @@ properties: pattern: "^can-phy" compatible: - enum: - - nxp,tjr1443 - - ti,tcan1042 - - ti,tcan1043 + oneOf: + - items: + - enum: + - microchip,ata6561 + - const: ti,tcan1042 + - enum: + - ti,tcan1042 + - ti,tcan1043 + - nxp,tjr1443 '#phy-cells': const: 0 diff --git a/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml b/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml index 627f8a6078c2..7f5f94673e9c 100644 --- a/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml +++ b/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml @@ -31,6 +31,10 @@ properties: allOf: - $ref: reboot-mode.yaml# +patternProperties: + "^mode-.*$": + maxItems: 1 + required: - compatible - nvmem-cells diff --git a/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml b/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml index fc8105a7b9b2..3da3d02a6690 100644 --- a/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml +++ b/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml @@ -54,6 +54,10 @@ required: - compatible - reg +patternProperties: + "^mode-.*$": + maxItems: 1 + unevaluatedProperties: false allOf: @@ -75,6 +79,9 @@ allOf: reg-names: items: - const: pon + else: + patternProperties: + "^mode-.*$": false # Special case for pm8941, which doesn't store reset mode - if: diff --git a/Documentation/devicetree/bindings/power/reset/reboot-mode.yaml b/Documentation/devicetree/bindings/power/reset/reboot-mode.yaml index ad0a0b95cec1..3ddac06cec72 100644 --- a/Documentation/devicetree/bindings/power/reset/reboot-mode.yaml +++ b/Documentation/devicetree/bindings/power/reset/reboot-mode.yaml @@ -28,13 +28,13 @@ description: | properties: mode-normal: - $ref: /schemas/types.yaml#/definitions/uint32 + $ref: /schemas/types.yaml#/definitions/uint32-array description: Default value to set on a reboot if no command was provided. patternProperties: "^mode-.*$": - $ref: /schemas/types.yaml#/definitions/uint32 + $ref: /schemas/types.yaml#/definitions/uint32-array additionalProperties: true diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml b/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml index b6acff199cde..79ffc78b23ea 100644 --- a/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml +++ b/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml @@ -32,6 +32,10 @@ properties: allOf: - $ref: reboot-mode.yaml# +patternProperties: + "^mode-.*$": + maxItems: 1 + unevaluatedProperties: false required: diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml b/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml index 75061124d9a8..19d3093e6cd2 100644 --- a/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml +++ b/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml @@ -31,6 +31,10 @@ properties: $ref: /schemas/types.yaml#/definitions/uint32 description: Offset in the register map for the reboot register (in bytes). + reg: + maxItems: 1 + description: Base address and size for the reboot register. + regmap: $ref: /schemas/types.yaml#/definitions/phandle deprecated: true @@ -45,9 +49,14 @@ properties: priority: default: 192 +oneOf: + - required: + - offset + - required: + - reg + required: - compatible - - offset additionalProperties: false diff --git a/Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml b/Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml index 277c47e048b6..a0f9d49ff8fb 100644 --- a/Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml +++ b/Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml @@ -60,7 +60,6 @@ examples: pmic { #address-cells = <1>; #size-cells = <0>; - #interrupt-cells = <4>; charger@1000 { compatible = "qcom,pmi8998-charger"; diff --git a/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml b/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml index 9495397c9269..c1de2c80291d 100644 --- a/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml +++ b/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml @@ -27,6 +27,11 @@ properties: battery-detect-gpios: maxItems: 1 + bat-detect-gpio: + maxItems: 1 + deprecated: true + description: use battery-detect-gpios instead + interrupts: maxItems: 1 diff --git a/Documentation/devicetree/bindings/power/supply/ti,twl6030-charger.yaml b/Documentation/devicetree/bindings/power/supply/ti,twl6030-charger.yaml new file mode 100644 index 000000000000..fc604d8a469d --- /dev/null +++ b/Documentation/devicetree/bindings/power/supply/ti,twl6030-charger.yaml @@ -0,0 +1,48 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/power/supply/ti,twl6030-charger.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: TWL6030/32 BCI (Battery Charger Interface) + +description: + The battery charger needs to be configured to do any charging besides of + precharging. The GPADC in the PMIC has to be used to get the related + voltages. + +maintainers: + - Andreas Kemnade <andreas@kemnade.info> + +allOf: + - $ref: power-supply.yaml# + +properties: + compatible: + oneOf: + - const: ti,twl6030-charger + - items: + - const: ti,twl6032-charger + - const: ti,twl6030-charger + + interrupts: + items: + - description: Charger Control Interrupt + - description: Charger Fault Interrupt + + io-channels: + items: + - description: VBUS Voltage Channel + + io-channel-names: + items: + - const: vusb + + monitored-battery: true + +required: + - compatible + - interrupts + - monitored-battery + +additionalProperties: false diff --git a/Documentation/devicetree/bindings/regulator/qcom-labibb-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom-labibb-regulator.yaml index e987c39b223e..83965076d6ab 100644 --- a/Documentation/devicetree/bindings/regulator/qcom-labibb-regulator.yaml +++ b/Documentation/devicetree/bindings/regulator/qcom-labibb-regulator.yaml @@ -16,7 +16,12 @@ description: properties: compatible: - const: qcom,pmi8998-lab-ibb + oneOf: + - const: qcom,pmi8998-lab-ibb + - items: + - enum: + - qcom,pmi8950-lab-ibb + - const: qcom,pmi8998-lab-ibb lab: type: object diff --git a/Documentation/devicetree/bindings/riscv/extensions.yaml b/Documentation/devicetree/bindings/riscv/extensions.yaml index 2cf2026cff57..af7e5237b2c0 100644 --- a/Documentation/devicetree/bindings/riscv/extensions.yaml +++ b/Documentation/devicetree/bindings/riscv/extensions.yaml @@ -128,6 +128,18 @@ properties: changes to interrupts as frozen at commit ccbddab ("Merge pull request #42 from riscv/jhauser-2023-RC4") of riscv-aia. + - const: smmpm + description: | + The standard Smmpm extension for M-mode pointer masking as + ratified at commit d70011dde6c2 ("Update to ratified state") + of riscv-j-extension. + + - const: smnpm + description: | + The standard Smnpm extension for next-mode pointer masking as + ratified at commit d70011dde6c2 ("Update to ratified state") + of riscv-j-extension. + - const: smstateen description: | The standard Smstateen extension for controlling access to CSRs @@ -147,6 +159,12 @@ properties: and mode-based filtering as ratified at commit 01d1df0 ("Add ability to manually trigger workflow. (#2)") of riscv-count-overflow. + - const: ssnpm + description: | + The standard Ssnpm extension for next-mode pointer masking as + ratified at commit d70011dde6c2 ("Update to ratified state") + of riscv-j-extension. + - const: sstc description: | The standard Sstc supervisor-level extension for time compare as @@ -178,6 +196,12 @@ properties: as ratified at commit 4a69197e5617 ("Update to ratified state") of riscv-svvptc. + - const: zabha + description: | + The Zabha extension for Byte and Halfword Atomic Memory Operations + as ratified at commit 49f49c842ff9 ("Update to Rafified state") of + riscv-zabha. + - const: zacas description: | The Zacas extension for Atomic Compare-and-Swap (CAS) instructions @@ -290,6 +314,12 @@ properties: in commit 64074bc ("Update version numbers for Zfh/Zfinx") of riscv-isa-manual. + - const: ziccrse + description: + The standard Ziccrse extension which provides forward progress + guarantee on LR/SC sequences, as ratified in commit b1d806605f87 + ("Updated to ratified state.") of the riscv profiles specification. + - const: zk description: The standard Zk Standard Scalar cryptography extension as ratified diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml index 18758efb8d29..f7be05641930 100644 --- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml +++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml @@ -26,6 +26,7 @@ properties: - qcom,msm8998-dwc3 - qcom,qcm2290-dwc3 - qcom,qcs404-dwc3 + - qcom,qcs8300-dwc3 - qcom,qdu1000-dwc3 - qcom,sa8775p-dwc3 - qcom,sc7180-dwc3 @@ -201,6 +202,7 @@ allOf: - qcom,msm8953-dwc3 - qcom,msm8996-dwc3 - qcom,msm8998-dwc3 + - qcom,qcs8300-dwc3 - qcom,sa8775p-dwc3 - qcom,sc7180-dwc3 - qcom,sc7280-dwc3 @@ -465,6 +467,7 @@ allOf: - qcom,ipq4019-dwc3 - qcom,ipq8064-dwc3 - qcom,msm8994-dwc3 + - qcom,qcs8300-dwc3 - qcom,qdu1000-dwc3 - qcom,sa8775p-dwc3 - qcom,sc7180-dwc3 @@ -490,6 +493,7 @@ allOf: minItems: 4 maxItems: 5 interrupt-names: + minItems: 4 items: - const: pwr_event - const: hs_phy_irq diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst index 568ed9714dbe..d594d0ea0e9d 100644 --- a/Documentation/driver-api/driver-model/devres.rst +++ b/Documentation/driver-api/driver-model/devres.rst @@ -458,7 +458,6 @@ SERDEV SLAVE DMA ENGINE devm_acpi_dma_controller_register() - devm_acpi_dma_controller_free() SPI devm_spi_alloc_host() diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt index 22f2990392ff..cf26042480e2 100644 --- a/Documentation/features/locking/queued-spinlocks/arch-support.txt +++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt @@ -20,7 +20,7 @@ | openrisc: | ok | | parisc: | TODO | | powerpc: | ok | - | riscv: | TODO | + | riscv: | ok | | s390: | TODO | | sh: | TODO | | sparc: | ok | diff --git a/Documentation/filesystems/mount_api.rst b/Documentation/filesystems/mount_api.rst index 317934c9e8fc..d92c276f1575 100644 --- a/Documentation/filesystems/mount_api.rst +++ b/Documentation/filesystems/mount_api.rst @@ -770,7 +770,8 @@ process the parameters it is given. * :: - bool fs_validate_description(const struct fs_parameter_description *desc); + bool fs_validate_description(const char *name, + const struct fs_parameter_description *desc); This performs some validation checks on a parameter description. It returns true if the description is good and false if it is not. It will diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst index 6dc66b4f31a7..bc8a283bc44b 100644 --- a/Documentation/kbuild/llvm.rst +++ b/Documentation/kbuild/llvm.rst @@ -179,6 +179,9 @@ yet. Bug reports are always welcome at the issue tracker below! * - s390 - Maintained - ``LLVM=1`` (LLVM >= 18.1.0), ``CC=clang`` (LLVM < 18.1.0) + * - sparc (sparc64 only) + - Maintained + - ``CC=clang LLVM_IAS=0`` (LLVM >= 20) * - um (User Mode) - Maintained - ``LLVM=1`` diff --git a/Documentation/networking/cdc_mbim.rst b/Documentation/networking/cdc_mbim.rst index 37f968acc473..8404a3f794f3 100644 --- a/Documentation/networking/cdc_mbim.rst +++ b/Documentation/networking/cdc_mbim.rst @@ -51,7 +51,7 @@ Such userspace applications includes, but are not limited to: - mbimcli (included with the libmbim [3] library), and - ModemManager [4] -Establishing a MBIM IP session reequires at least these actions by the +Establishing a MBIM IP session requires at least these actions by the management application: - open the control channel diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst index 5f0dea3d571e..7354d48cdf92 100644 --- a/Documentation/networking/tls-offload.rst +++ b/Documentation/networking/tls-offload.rst @@ -51,7 +51,7 @@ and send them to the device for encryption and transmission. RX -- -On the receive side if the device handled decryption and authentication +On the receive side, if the device handled decryption and authentication successfully, the driver will set the decrypted bit in the associated :c:type:`struct sk_buff <sk_buff>`. The packets reach the TCP stack and are handled normally. ``ktls`` is informed when data is queued to the socket @@ -120,8 +120,9 @@ before installing the connection state in the kernel. RX -- -In RX direction local networking stack has little control over the segmentation, -so the initial records' TCP sequence number may be anywhere inside the segment. +In the RX direction, the local networking stack has little control over +segmentation, so the initial records' TCP sequence number may be anywhere +inside the segment. Normal operation ================ @@ -138,8 +139,8 @@ There are no guarantees on record length or record segmentation. In particular segments may start at any point of a record and contain any number of records. Assuming segments are received in order, the device should be able to perform crypto operations and authentication regardless of segmentation. For this -to be possible device has to keep small amount of segment-to-segment state. -This includes at least: +to be possible, the device has to keep a small amount of segment-to-segment +state. This includes at least: * partial headers (if a segment carried only a part of the TLS header) * partial data block @@ -175,12 +176,12 @@ and packet transformation functions) the device validates the Layer 4 checksum and performs a 5-tuple lookup to find any TLS connection the packet may belong to (technically a 4-tuple lookup is sufficient - IP addresses and TCP port numbers, as the protocol -is always TCP). If connection is matched device confirms if the TCP sequence -number is the expected one and proceeds to TLS handling (record delineation, -decryption, authentication for each record in the packet). The device leaves -the record framing unmodified, the stack takes care of record decapsulation. -Device indicates successful handling of TLS offload in the per-packet context -(descriptor) passed to the host. +is always TCP). If the packet is matched to a connection, the device confirms +if the TCP sequence number is the expected one and proceeds to TLS handling +(record delineation, decryption, authentication for each record in the packet). +The device leaves the record framing unmodified, the stack takes care of record +decapsulation. Device indicates successful handling of TLS offload in the +per-packet context (descriptor) passed to the host. Upon reception of a TLS offloaded packet, the driver sets the :c:member:`decrypted` mark in :c:type:`struct sk_buff <sk_buff>` @@ -439,7 +440,7 @@ by the driver: * ``rx_tls_resync_req_end`` - number of times the TLS async resync request properly ended with providing the HW tracked tcp-seq. * ``rx_tls_resync_req_skip`` - number of times the TLS async resync request - procedure was started by not properly ended. + procedure was started but not properly ended. * ``rx_tls_resync_res_ok`` - number of times the TLS resync response call to the driver was successfully handled. * ``rx_tls_resync_res_skip`` - number of times the TLS resync response call to @@ -507,8 +508,8 @@ in packets as seen on the wire. Transport layer transparency ---------------------------- -The device should not modify any packet headers for the purpose -of the simplifying TLS offload. +For the purpose of simplifying TLS offload, the device should not modify any +packet headers. The device should not depend on any packet headers beyond what is strictly necessary for TLS offload. diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst index 74d5bd801b1a..272464bb7c60 100644 --- a/Documentation/trace/ftrace.rst +++ b/Documentation/trace/ftrace.rst @@ -1033,9 +1033,13 @@ explains which is which. irqs-off: 'd' interrupts are disabled. '.' otherwise. need-resched: + - 'B' all, TIF_NEED_RESCHED, PREEMPT_NEED_RESCHED and TIF_RESCHED_LAZY is set, - 'N' both TIF_NEED_RESCHED and PREEMPT_NEED_RESCHED is set, - 'n' only TIF_NEED_RESCHED is set, - 'p' only PREEMPT_NEED_RESCHED is set, + - 'L' both PREEMPT_NEED_RESCHED and TIF_RESCHED_LAZY is set, + - 'b' both TIF_NEED_RESCHED and TIF_RESCHED_LAZY is set, + - 'l' only TIF_RESCHED_LAZY is set - '.' otherwise. hardirq/softirq: diff --git a/MAINTAINERS b/MAINTAINERS index 85cdc618a51c..0456a33ef657 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13408,12 +13408,12 @@ S: Maintained F: Documentation/devicetree/bindings/gpio/loongson,ls-gpio.yaml F: drivers/gpio/gpio-loongson-64bit.c -LOONGSON LS2X APB DMA DRIVER +LOONGSON-2 APB DMA DRIVER M: Binbin Zhou <zhoubinbin@loongson.cn> L: dmaengine@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/dma/loongson,ls2x-apbdma.yaml -F: drivers/dma/ls2x-apb-dma.c +F: drivers/dma/loongson2-apb-dma.c LOONGSON LS2X I2C DRIVER M: Binbin Zhou <zhoubinbin@loongson.cn> @@ -22428,6 +22428,12 @@ F: drivers/*/stm32-*timer* F: drivers/pwm/pwm-stm32* F: include/linux/*/stm32-*tim* +STM32MP25 USB3/PCIE COMBOPHY DRIVER +M: Christian Bruel <christian.bruel@foss.st.com> +S: Maintained +F: Documentation/devicetree/bindings/phy/st,stm32mp25-combophy.yaml +F: drivers/phy/st/phy-stm32-combophy.c + STMMAC ETHERNET DRIVER M: Alexandre Torgue <alexandre.torgue@foss.st.com> M: Jose Abreu <joabreu@synopsys.com> @@ -1610,7 +1610,6 @@ help: @echo ' with a stack size larger than MINSTACKSIZE (default: 100)' @echo ' versioncheck - Sanity check on version.h usage' @echo ' includecheck - Check for duplicate included header files' - @echo ' export_report - List the usages of all exported symbols' @echo ' headerdep - Detect inclusion cycles in headers' @echo ' coccicheck - Check with Coccinelle' @echo ' clang-analyzer - Check with clang static analyzer' @@ -2023,7 +2022,7 @@ endif # Scripts to check various things for consistency # --------------------------------------------------------------------------- -PHONY += includecheck versioncheck coccicheck export_report +PHONY += includecheck versioncheck coccicheck includecheck: find $(srctree)/* $(RCS_FIND_IGNORE) \ @@ -2038,9 +2037,6 @@ versioncheck: coccicheck: $(Q)$(BASH) $(srctree)/scripts/$@ -export_report: - $(PERL) $(srctree)/scripts/export_report.pl - PHONY += checkstack kernelrelease kernelversion image_name # UML needs a little special treatment here. It wants to use the host diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 1dfae1af8e31..ef6a657c8d13 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -25,6 +25,7 @@ #include <asm/tls.h> #include <asm/system_info.h> #include <asm/uaccess-asm.h> +#include <asm/kasan_def.h> #include "entry-header.S" #include <asm/probes.h> @@ -561,6 +562,13 @@ ENTRY(__switch_to) @ entries covering the vmalloc region. @ ldr r2, [ip] +#ifdef CONFIG_KASAN_VMALLOC + @ Also dummy read from the KASAN shadow memory for the new stack if we + @ are using KASAN + mov_l r2, KASAN_SHADOW_OFFSET + add r2, r2, ip, lsr #KASAN_SHADOW_SCALE_SHIFT + ldr r2, [r2] +#endif #endif @ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 794cfea9f9d4..89f1c97f3079 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -23,6 +23,7 @@ */ #include <linux/module.h> #include <linux/errno.h> +#include <linux/kasan.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/io.h> @@ -115,16 +116,40 @@ int ioremap_page(unsigned long virt, unsigned long phys, } EXPORT_SYMBOL(ioremap_page); +#ifdef CONFIG_KASAN +static unsigned long arm_kasan_mem_to_shadow(unsigned long addr) +{ + return (unsigned long)kasan_mem_to_shadow((void *)addr); +} +#else +static unsigned long arm_kasan_mem_to_shadow(unsigned long addr) +{ + return 0; +} +#endif + +static void memcpy_pgd(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + end = ALIGN(end, PGDIR_SIZE); + memcpy(pgd_offset(mm, start), pgd_offset_k(start), + sizeof(pgd_t) * (pgd_index(end) - pgd_index(start))); +} + void __check_vmalloc_seq(struct mm_struct *mm) { int seq; do { - seq = atomic_read(&init_mm.context.vmalloc_seq); - memcpy(pgd_offset(mm, VMALLOC_START), - pgd_offset_k(VMALLOC_START), - sizeof(pgd_t) * (pgd_index(VMALLOC_END) - - pgd_index(VMALLOC_START))); + seq = atomic_read_acquire(&init_mm.context.vmalloc_seq); + memcpy_pgd(mm, VMALLOC_START, VMALLOC_END); + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { + unsigned long start = + arm_kasan_mem_to_shadow(VMALLOC_START); + unsigned long end = + arm_kasan_mem_to_shadow(VMALLOC_END); + memcpy_pgd(mm, start, end); + } /* * Use a store-release so that other CPUs that observe the * counter's new value are guaranteed to see the results of the diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index b68efe643a12..d44867fc0c5e 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -56,6 +56,34 @@ extern unsigned int VFP_arch_feroceon __alias(VFP_arch); union vfp_state *vfp_current_hw_state[NR_CPUS]; /* + * Claim ownership of the VFP unit. + * + * The caller may change VFP registers until vfp_state_release() is called. + * + * local_bh_disable() is used to disable preemption and to disable VFP + * processing in softirq context. On PREEMPT_RT kernels local_bh_disable() is + * not sufficient because it only serializes soft interrupt related sections + * via a local lock, but stays preemptible. Disabling preemption is the right + * choice here as bottom half processing is always in thread context on RT + * kernels so it implicitly prevents bottom half processing as well. + */ +static void vfp_state_hold(void) +{ + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_bh_disable(); + else + preempt_disable(); +} + +static void vfp_state_release(void) +{ + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_bh_enable(); + else + preempt_enable(); +} + +/* * Is 'thread's most up to date state stored in this CPUs hardware? * Must be called from non-preemptible context. */ @@ -240,7 +268,7 @@ static void vfp_panic(char *reason, u32 inst) /* * Process bitmask of exception conditions. */ -static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs) +static int vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr) { int si_code = 0; @@ -248,8 +276,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_ if (exceptions == VFP_EXCEPTION_ERROR) { vfp_panic("unhandled bounce", inst); - vfp_raise_sigfpe(FPE_FLTINV, regs); - return; + return FPE_FLTINV; } /* @@ -277,8 +304,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_ RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF); RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV); - if (si_code) - vfp_raise_sigfpe(si_code, regs); + return si_code; } /* @@ -324,6 +350,8 @@ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) { u32 fpscr, orig_fpscr, fpsid, exceptions; + int si_code2 = 0; + int si_code = 0; pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc); @@ -369,8 +397,8 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) * unallocated VFP instruction but with FPSCR.IXE set and not * on VFP subarch 1. */ - vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs); - return; + si_code = vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr); + goto exit; } /* @@ -394,14 +422,14 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) */ exceptions = vfp_emulate_instruction(trigger, fpscr, regs); if (exceptions) - vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); + si_code2 = vfp_raise_exceptions(exceptions, trigger, orig_fpscr); /* * If there isn't a second FP instruction, exit now. Note that * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. */ if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V)) - return; + goto exit; /* * The barrier() here prevents fpinst2 being read @@ -413,7 +441,13 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) emulate: exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs); if (exceptions) - vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); + si_code = vfp_raise_exceptions(exceptions, trigger, orig_fpscr); +exit: + vfp_state_release(); + if (si_code2) + vfp_raise_sigfpe(si_code2, regs); + if (si_code) + vfp_raise_sigfpe(si_code, regs); } static void vfp_enable(void *unused) @@ -512,11 +546,9 @@ static inline void vfp_pm_init(void) { } */ void vfp_sync_hwstate(struct thread_info *thread) { - unsigned int cpu = get_cpu(); + vfp_state_hold(); - local_bh_disable(); - - if (vfp_state_in_hw(cpu, thread)) { + if (vfp_state_in_hw(raw_smp_processor_id(), thread)) { u32 fpexc = fmrx(FPEXC); /* @@ -527,8 +559,7 @@ void vfp_sync_hwstate(struct thread_info *thread) fmxr(FPEXC, fpexc); } - local_bh_enable(); - put_cpu(); + vfp_state_release(); } /* Ensure that the thread reloads the hardware VFP state on the next use. */ @@ -683,7 +714,7 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger) if (!user_mode(regs)) return vfp_kmode_exception(regs, trigger); - local_bh_disable(); + vfp_state_hold(); fpexc = fmrx(FPEXC); /* @@ -748,6 +779,7 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger) * replay the instruction that trapped. */ fmxr(FPEXC, fpexc); + vfp_state_release(); } else { /* Check for synchronous or asynchronous exceptions */ if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) { @@ -762,17 +794,17 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger) if (!(fpscr & FPSCR_IXE)) { if (!(fpscr & FPSCR_LENGTH_MASK)) { pr_debug("not VFP\n"); - local_bh_enable(); + vfp_state_release(); return -ENOEXEC; } fpexc |= FPEXC_DEX; } } bounce: regs->ARM_pc += 4; + /* VFP_bounce() will invoke vfp_state_release() */ VFP_bounce(trigger, fpexc, regs); } - local_bh_enable(); return 0; } @@ -837,7 +869,7 @@ void kernel_neon_begin(void) unsigned int cpu; u32 fpexc; - local_bh_disable(); + vfp_state_hold(); /* * Kernel mode NEON is only allowed outside of hardirq context with @@ -868,7 +900,7 @@ void kernel_neon_end(void) { /* Disable the NEON/VFP unit. */ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); - local_bh_enable(); + vfp_state_release(); } EXPORT_SYMBOL(kernel_neon_end); diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index d9fce0fd475a..dae3a9104ca6 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -23,6 +23,7 @@ config LOONGARCH select ARCH_HAS_KERNEL_FPU_SUPPORT if CPU_HAS_FPU select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE + select ARCH_HAS_PREEMPT_LAZY select ARCH_HAS_PTE_DEVMAP select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_MEMORY @@ -66,6 +67,7 @@ config LOONGARCH select ARCH_SUPPORTS_LTO_CLANG select ARCH_SUPPORTS_LTO_CLANG_THIN select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_SUPPORTS_RT select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_QUEUED_RWLOCKS @@ -155,6 +157,7 @@ config LOONGARCH select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_POSIX_CPU_TIMERS_TASK_WORK select HAVE_PREEMPT_DYNAMIC_KEY select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE if UNWINDER_ORC diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index ae3f80622f4c..567bd122a9ee 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -59,7 +59,7 @@ endif ifdef CONFIG_64BIT ld-emul = $(64bit-emul) -cflags-y += -mabi=lp64s +cflags-y += -mabi=lp64s -mcmodel=normal endif cflags-y += -pipe $(CC_FLAGS_NO_FPU) @@ -104,7 +104,7 @@ ifdef CONFIG_OBJTOOL KBUILD_CFLAGS += -fno-jump-tables endif -KBUILD_RUSTFLAGS += --target=loongarch64-unknown-none-softfloat +KBUILD_RUSTFLAGS += --target=loongarch64-unknown-none-softfloat -Ccode-model=small KBUILD_RUSTFLAGS_KERNEL += -Zdirect-access-external-data=yes KBUILD_RUSTFLAGS_MODULE += -Zdirect-access-external-data=no diff --git a/arch/loongarch/boot/dts/loongson-2k1000.dtsi b/arch/loongarch/boot/dts/loongson-2k1000.dtsi index 92180140eb56..8dff2aa52417 100644 --- a/arch/loongarch/boot/dts/loongson-2k1000.dtsi +++ b/arch/loongarch/boot/dts/loongson-2k1000.dtsi @@ -266,7 +266,7 @@ status = "disabled"; }; - dma-controller@1fe00c20 { + apbdma2: dma-controller@1fe00c20 { compatible = "loongson,ls2k1000-apbdma"; reg = <0x0 0x1fe00c20 0x0 0x8>; interrupt-parent = <&liointc1>; @@ -276,7 +276,7 @@ status = "disabled"; }; - dma-controller@1fe00c30 { + apbdma3: dma-controller@1fe00c30 { compatible = "loongson,ls2k1000-apbdma"; reg = <0x0 0x1fe00c30 0x0 0x8>; interrupt-parent = <&liointc1>; @@ -352,6 +352,19 @@ status = "disabled"; }; + i2s: i2s@1fe2d000 { + compatible = "loongson,ls2k1000-i2s"; + reg = <0 0x1fe2d000 0 0x14>, + <0 0x1fe00438 0 0x8>; + interrupt-parent = <&liointc0>; + interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + dmas = <&apbdma2 0>, <&apbdma3 0>; + dma-names = "tx", "rx"; + #sound-dai-cells = <0>; + status = "disabled"; + }; + spi0: spi@1fff0220 { compatible = "loongson,ls2k1000-spi"; reg = <0x0 0x1fff0220 0x0 0x10>; diff --git a/arch/loongarch/boot/dts/loongson-2k2000.dtsi b/arch/loongarch/boot/dts/loongson-2k2000.dtsi index 0953c5707825..b4ff55a33e90 100644 --- a/arch/loongarch/boot/dts/loongson-2k2000.dtsi +++ b/arch/loongarch/boot/dts/loongson-2k2000.dtsi @@ -173,6 +173,22 @@ status = "disabled"; }; + i2c@1fe00120 { + compatible = "loongson,ls2k-i2c"; + reg = <0x0 0x1fe00120 0x0 0x8>; + interrupt-parent = <&liointc>; + interrupts = <8 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; + }; + + i2c@1fe00130 { + compatible = "loongson,ls2k-i2c"; + reg = <0x0 0x1fe00130 0x0 0x8>; + interrupt-parent = <&liointc>; + interrupts = <9 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; + }; + uart0: serial@1fe001e0 { compatible = "ns16550a"; reg = <0x0 0x1fe001e0 0x0 0x10>; @@ -243,9 +259,11 @@ status = "disabled"; }; - hda@7,0 { + i2s@7,0 { reg = <0x3800 0x0 0x0 0x0 0x0>; - interrupts = <58 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <78 IRQ_TYPE_LEVEL_HIGH>, + <79 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "tx", "rx"; interrupt-parent = <&pic>; status = "disabled"; }; diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 75b366407a60..4dffc90192f7 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -1,4 +1,5 @@ # CONFIG_LOCALVERSION_AUTO is not set +CONFIG_KERNEL_ZSTD=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_NO_HZ=y @@ -70,6 +71,14 @@ CONFIG_ACPI_IPMI=m CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_PCI_SLOT=y CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_BGRT=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_LOONGSON3_CPUFREQ=m CONFIG_VIRTUALIZATION=y CONFIG_KVM=m CONFIG_JUMP_LABEL=y @@ -78,6 +87,9 @@ CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y +CONFIG_MODULE_COMPRESS=y +CONFIG_MODULE_COMPRESS_ZSTD=y +CONFIG_MODULE_DECOMPRESS=y CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_WBT=y @@ -85,6 +97,8 @@ CONFIG_BLK_CGROUP_IOLATENCY=y CONFIG_BLK_CGROUP_FC_APPID=y CONFIG_BLK_CGROUP_IOCOST=y CONFIG_BLK_CGROUP_IOPRIO=y +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y CONFIG_BSD_DISKLABEL=y CONFIG_UNIXWARE_DISKLABEL=y @@ -413,7 +427,16 @@ CONFIG_PARPORT_PC=y CONFIG_PARPORT_SERIAL=y CONFIG_PARPORT_PC_FIFO=y CONFIG_ZRAM=m +CONFIG_ZRAM_BACKEND_LZ4=y +CONFIG_ZRAM_BACKEND_LZ4HC=y +CONFIG_ZRAM_BACKEND_ZSTD=y +CONFIG_ZRAM_BACKEND_DEFLATE=y +CONFIG_ZRAM_BACKEND_842=y +CONFIG_ZRAM_BACKEND_LZO=y CONFIG_ZRAM_DEF_COMP_ZSTD=y +CONFIG_ZRAM_WRITEBACK=y +CONFIG_ZRAM_MEMORY_TRACKING=y +CONFIG_ZRAM_MULTI_COMP=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m @@ -433,6 +456,9 @@ CONFIG_NVME_TARGET_RDMA=m CONFIG_NVME_TARGET_FC=m CONFIG_NVME_TARGET_TCP=m CONFIG_EEPROM_AT24=m +CONFIG_PVPANIC=y +CONFIG_PVPANIC_MMIO=m +CONFIG_PVPANIC_PCI=m CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y CONFIG_CHR_DEV_SG=y @@ -470,12 +496,10 @@ CONFIG_PATA_ATIIXP=y CONFIG_PATA_PCMCIA=m CONFIG_MD=y CONFIG_BLK_DEV_MD=m -CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m CONFIG_MD_RAID1=m CONFIG_MD_RAID10=m CONFIG_MD_RAID456=m -CONFIG_MD_MULTIPATH=m CONFIG_BCACHE=m CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=m @@ -489,6 +513,16 @@ CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_MULTIPATH_HST=m +CONFIG_DM_MULTIPATH_IOA=m +CONFIG_DM_INIT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_INTEGRITY=m +CONFIG_DM_ZONED=m +CONFIG_DM_VDO=m CONFIG_TARGET_CORE=m CONFIG_TCM_IBLOCK=m CONFIG_TCM_FILEIO=m @@ -500,6 +534,13 @@ CONFIG_NETDEVICES=y CONFIG_BONDING=m CONFIG_DUMMY=y CONFIG_WIREGUARD=m +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m @@ -580,12 +621,14 @@ CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=m +CONFIG_USB_USBNET=m # CONFIG_USB_NET_AX8817X is not set # CONFIG_USB_NET_AX88179_178A is not set CONFIG_USB_NET_CDC_EEM=m CONFIG_USB_NET_HUAWEI_CDC_NCM=m CONFIG_USB_NET_CDC_MBIM=m # CONFIG_USB_NET_NET1080 is not set +CONFIG_USB_NET_RNDIS_HOST=m # CONFIG_USB_BELKIN is not set # CONFIG_USB_ARMLINUX is not set # CONFIG_USB_NET_ZAURUS is not set @@ -594,10 +637,11 @@ CONFIG_ATH9K_HTC=m CONFIG_IWLWIFI=m CONFIG_IWLDVM=m CONFIG_IWLMVM=m -CONFIG_HOSTAP=m CONFIG_MT7601U=m CONFIG_RT2X00=m CONFIG_RT2800USB=m +CONFIG_RTL8180=m +CONFIG_RTL8187=m CONFIG_RTL8192CE=m CONFIG_RTL8192SE=m CONFIG_RTL8192DE=m @@ -607,18 +651,26 @@ CONFIG_RTL8188EE=m CONFIG_RTL8192EE=m CONFIG_RTL8821AE=m CONFIG_RTL8192CU=m +CONFIG_RTL8192DU=m # CONFIG_RTLWIFI_DEBUG is not set CONFIG_RTL8XXXU=m CONFIG_RTW88=m CONFIG_RTW88_8822BE=m +CONFIG_RTW88_8822BU=m CONFIG_RTW88_8822CE=m +CONFIG_RTW88_8822CU=m CONFIG_RTW88_8723DE=m +CONFIG_RTW88_8723DU=m CONFIG_RTW88_8821CE=m +CONFIG_RTW88_8821CU=m CONFIG_RTW89=m +CONFIG_RTW89_8851BE=m CONFIG_RTW89_8852AE=m +CONFIG_RTW89_8852BE=m +CONFIG_RTW89_8852BTE=m CONFIG_RTW89_8852CE=m +CONFIG_RTW89_8922AE=m CONFIG_ZD1211RW=m -CONFIG_USB_NET_RNDIS_WLAN=m CONFIG_USB4_NET=m CONFIG_INPUT_MOUSEDEV=y CONFIG_INPUT_MOUSEDEV_PSAUX=y @@ -651,6 +703,9 @@ CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_VIRTIO=m CONFIG_I2C_CHARDEV=y CONFIG_I2C_PIIX4=y +CONFIG_I2C_DESIGNWARE_CORE=y +CONFIG_I2C_DESIGNWARE_SLAVE=y +CONFIG_I2C_DESIGNWARE_PCI=y CONFIG_I2C_GPIO=y CONFIG_I2C_LS2X=y CONFIG_SPI=y @@ -727,11 +782,22 @@ CONFIG_SND_HDA_CODEC_CONEXANT=y CONFIG_SND_USB_AUDIO=m CONFIG_SND_SOC=m CONFIG_SND_SOC_LOONGSON_CARD=m +CONFIG_SND_SOC_ES7134=m +CONFIG_SND_SOC_ES7241=m +CONFIG_SND_SOC_ES8311=m +CONFIG_SND_SOC_ES8316=m +CONFIG_SND_SOC_ES8323=m +CONFIG_SND_SOC_ES8326=m +CONFIG_SND_SOC_ES8328_I2C=m +CONFIG_SND_SOC_ES8328_SPI=m +CONFIG_SND_SOC_UDA1334=m +CONFIG_SND_SOC_UDA1342=m CONFIG_SND_VIRTIO=m CONFIG_HIDRAW=y CONFIG_UHID=m CONFIG_HID_A4TECH=m CONFIG_HID_CHERRY=m +CONFIG_HID_ELAN=m CONFIG_HID_LOGITECH=m CONFIG_HID_LOGITECH_DJ=m CONFIG_LOGITECH_FF=y @@ -740,7 +806,11 @@ CONFIG_LOGIG940_FF=y CONFIG_HID_MICROSOFT=m CONFIG_HID_MULTITOUCH=m CONFIG_HID_SUNPLUS=m +CONFIG_HID_WACOM=m CONFIG_USB_HIDDEV=y +CONFIG_I2C_HID_ACPI=m +CONFIG_I2C_HID_OF=m +CONFIG_I2C_HID_OF_ELAN=m CONFIG_USB=y CONFIG_USB_OTG=y CONFIG_USB_MON=y @@ -775,7 +845,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_EFI=y CONFIG_RTC_DRV_LOONGSON=y CONFIG_DMADEVICES=y -CONFIG_LS2X_APB_DMA=y +CONFIG_LOONGSON2_APB_DMA=y CONFIG_UDMABUF=y CONFIG_DMABUF_HEAPS=y CONFIG_DMABUF_HEAPS_SYSTEM=y @@ -852,6 +922,9 @@ CONFIG_F2FS_FS=m CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_CHECK_FS=y CONFIG_F2FS_FS_COMPRESSION=y +CONFIG_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y +CONFIG_FS_VERITY=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y CONFIG_QUOTA=y @@ -904,16 +977,14 @@ CONFIG_SQUASHFS_ZSTD=y CONFIG_MINIX_FS=m CONFIG_ROMFS_FS=m CONFIG_PSTORE=m -CONFIG_PSTORE_LZO_COMPRESS=m -CONFIG_PSTORE_LZ4_COMPRESS=m -CONFIG_PSTORE_LZ4HC_COMPRESS=m -CONFIG_PSTORE_842_COMPRESS=y -CONFIG_PSTORE_ZSTD_COMPRESS=y -CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS=y CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_EROFS_FS=m CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_ZIP_DEFLATE=y +CONFIG_EROFS_FS_ZIP_ZSTD=y +CONFIG_EROFS_FS_ONDEMAND=y CONFIG_EROFS_FS_PCPU_KTHREAD=y CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h index 8bf0e6f51546..4f5a9441754e 100644 --- a/arch/loongarch/include/asm/thread_info.h +++ b/arch/loongarch/include/asm/thread_info.h @@ -66,8 +66,9 @@ register unsigned long current_stack_pointer __asm__("$sp"); * - pending work-to-be-done flags are in LSW * - other flags in MSW */ -#define TIF_SIGPENDING 1 /* signal pending */ -#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_NEED_RESCHED 0 /* rescheduling necessary */ +#define TIF_NEED_RESCHED_LAZY 1 /* lazy rescheduling necessary */ +#define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NOTIFY_RESUME 3 /* callback before returning to user */ #define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */ #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ @@ -88,8 +89,9 @@ register unsigned long current_stack_pointer __asm__("$sp"); #define TIF_LBT_CTX_LIVE 20 /* LBT context must be preserved */ #define TIF_PATCH_PENDING 21 /* pending live patching update */ -#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) +#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY) +#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL) #define _TIF_NOHZ (1<<TIF_NOHZ) diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index 46d7d40c87e3..a07d7eff4dc5 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -127,7 +127,11 @@ void sync_counter(void) int constant_clockevent_init(void) { unsigned int cpu = smp_processor_id(); - unsigned long min_delta = 0x600; +#ifdef CONFIG_PREEMPT_RT + unsigned long min_delta = 100; +#else + unsigned long min_delta = 1000; +#endif unsigned long max_delta = (1UL << 48) - 1; struct clock_event_device *cd; static int irq = 0, timer_irq_installed = 0; diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c index 5ac9beb5f093..3b427b319db2 100644 --- a/arch/loongarch/mm/tlb.c +++ b/arch/loongarch/mm/tlb.c @@ -289,7 +289,7 @@ static void setup_tlb_handler(int cpu) /* Avoid lockdep warning */ rcutree_report_cpu_starting(cpu); -#ifdef CONFIG_NUMA +#if defined(CONFIG_NUMA) && !defined(CONFIG_PREEMPT_RT) vec_sz = sizeof(exception_handlers); if (pcpu_handlers[cpu]) diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index 7dbefd4ba210..dd350cba1252 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -179,7 +179,7 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call) if (!is_tail_call) { /* Set return value */ - move_reg(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0]); + emit_insn(ctx, addiw, LOONGARCH_GPR_A0, regmap[BPF_REG_0], 0); /* Return to the caller */ emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0); } else { diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile index 40c1175823d6..fdde1bcd4e26 100644 --- a/arch/loongarch/vdso/Makefile +++ b/arch/loongarch/vdso/Makefile @@ -19,7 +19,7 @@ ccflags-vdso := \ cflags-vdso := $(ccflags-vdso) \ -isystem $(shell $(CC) -print-file-name=include) \ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ - -O2 -g -fno-strict-aliasing -fno-common -fno-builtin \ + -std=gnu11 -O2 -g -fno-strict-aliasing -fno-common -fno-builtin \ -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \ $(call cc-option, -fno-asynchronous-unwind-tables) \ $(call cc-option, -fno-stack-protector) diff --git a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi index cce9428afc41..ee71045883e7 100644 --- a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi +++ b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi @@ -70,7 +70,6 @@ device_type = "pci"; #address-cells = <3>; #size-cells = <2>; - #interrupt-cells = <2>; msi-parent = <&msi>; reg = <0 0x1a000000 0 0x02000000>, @@ -234,7 +233,7 @@ }; }; - pci_bridge@9,0 { + pcie@9,0 { compatible = "pci0014,7a19.1", "pci0014,7a19", "pciclass060400", @@ -244,12 +243,16 @@ interrupts = <32 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 32 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@a,0 { + pcie@a,0 { compatible = "pci0014,7a09.1", "pci0014,7a09", "pciclass060400", @@ -259,12 +262,16 @@ interrupts = <33 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 33 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@b,0 { + pcie@b,0 { compatible = "pci0014,7a09.1", "pci0014,7a09", "pciclass060400", @@ -274,12 +281,16 @@ interrupts = <34 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 34 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@c,0 { + pcie@c,0 { compatible = "pci0014,7a09.1", "pci0014,7a09", "pciclass060400", @@ -289,12 +300,16 @@ interrupts = <35 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 35 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@d,0 { + pcie@d,0 { compatible = "pci0014,7a19.1", "pci0014,7a19", "pciclass060400", @@ -304,12 +319,16 @@ interrupts = <36 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 36 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@e,0 { + pcie@e,0 { compatible = "pci0014,7a09.1", "pci0014,7a09", "pciclass060400", @@ -319,12 +338,16 @@ interrupts = <37 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 37 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@f,0 { + pcie@f,0 { compatible = "pci0014,7a29.1", "pci0014,7a29", "pciclass060400", @@ -334,12 +357,16 @@ interrupts = <40 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 40 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@10,0 { + pcie@10,0 { compatible = "pci0014,7a19.1", "pci0014,7a19", "pciclass060400", @@ -349,12 +376,16 @@ interrupts = <41 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 41 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@11,0 { + pcie@11,0 { compatible = "pci0014,7a29.1", "pci0014,7a29", "pciclass060400", @@ -364,12 +395,16 @@ interrupts = <42 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 42 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@12,0 { + pcie@12,0 { compatible = "pci0014,7a19.1", "pci0014,7a19", "pciclass060400", @@ -379,12 +414,16 @@ interrupts = <43 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 43 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@13,0 { + pcie@13,0 { compatible = "pci0014,7a29.1", "pci0014,7a29", "pciclass060400", @@ -394,12 +433,16 @@ interrupts = <38 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 38 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@14,0 { + pcie@14,0 { compatible = "pci0014,7a19.1", "pci0014,7a19", "pciclass060400", @@ -409,9 +452,13 @@ interrupts = <39 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 39 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; }; diff --git a/arch/mips/boot/dts/mobileye/eyeq5-clocks.dtsi b/arch/mips/boot/dts/mobileye/eyeq5-clocks.dtsi deleted file mode 100644 index 17a342cc744e..000000000000 --- a/arch/mips/boot/dts/mobileye/eyeq5-clocks.dtsi +++ /dev/null @@ -1,270 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -/* - * Copyright 2023 Mobileye Vision Technologies Ltd. - */ - -#include <dt-bindings/clock/mobileye,eyeq5-clk.h> - -/ { - /* Fixed clock */ - xtal: xtal { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <30000000>; - }; - -/* PLL_CPU derivatives */ - occ_cpu: occ-cpu { - compatible = "fixed-factor-clock"; - clocks = <&olb EQ5C_PLL_CPU>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - si_css0_ref_clk: si-css0-ref-clk { /* gate ClkRstGen_si_css0_ref */ - compatible = "fixed-factor-clock"; - clocks = <&occ_cpu>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - cpc_clk: cpc-clk { - compatible = "fixed-factor-clock"; - clocks = <&si_css0_ref_clk>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - core0_clk: core0-clk { - compatible = "fixed-factor-clock"; - clocks = <&si_css0_ref_clk>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - core1_clk: core1-clk { - compatible = "fixed-factor-clock"; - clocks = <&si_css0_ref_clk>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - core2_clk: core2-clk { - compatible = "fixed-factor-clock"; - clocks = <&si_css0_ref_clk>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - core3_clk: core3-clk { - compatible = "fixed-factor-clock"; - clocks = <&si_css0_ref_clk>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - cm_clk: cm-clk { - compatible = "fixed-factor-clock"; - clocks = <&si_css0_ref_clk>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - mem_clk: mem-clk { - compatible = "fixed-factor-clock"; - clocks = <&si_css0_ref_clk>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - occ_isram: occ-isram { - compatible = "fixed-factor-clock"; - clocks = <&olb EQ5C_PLL_CPU>; - #clock-cells = <0>; - clock-div = <2>; - clock-mult = <1>; - }; - isram_clk: isram-clk { /* gate ClkRstGen_isram */ - compatible = "fixed-factor-clock"; - clocks = <&occ_isram>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - occ_dbu: occ-dbu { - compatible = "fixed-factor-clock"; - clocks = <&olb EQ5C_PLL_CPU>; - #clock-cells = <0>; - clock-div = <10>; - clock-mult = <1>; - }; - si_dbu_tp_pclk: si-dbu-tp-pclk { /* gate ClkRstGen_dbu */ - compatible = "fixed-factor-clock"; - clocks = <&occ_dbu>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; -/* PLL_VDI derivatives */ - occ_vdi: occ-vdi { - compatible = "fixed-factor-clock"; - clocks = <&olb EQ5C_PLL_VDI>; - #clock-cells = <0>; - clock-div = <2>; - clock-mult = <1>; - }; - vdi_clk: vdi-clk { /* gate ClkRstGen_vdi */ - compatible = "fixed-factor-clock"; - clocks = <&occ_vdi>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - occ_can_ser: occ-can-ser { - compatible = "fixed-factor-clock"; - clocks = <&olb EQ5C_PLL_VDI>; - #clock-cells = <0>; - clock-div = <16>; - clock-mult = <1>; - }; - can_ser_clk: can-ser-clk { /* gate ClkRstGen_can_ser */ - compatible = "fixed-factor-clock"; - clocks = <&occ_can_ser>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - i2c_ser_clk: i2c-ser-clk { - compatible = "fixed-factor-clock"; - clocks = <&olb EQ5C_PLL_VDI>; - #clock-cells = <0>; - clock-div = <20>; - clock-mult = <1>; - }; -/* PLL_PER derivatives */ - occ_periph: occ-periph { - compatible = "fixed-factor-clock"; - clocks = <&olb EQ5C_PLL_PER>; - #clock-cells = <0>; - clock-div = <16>; - clock-mult = <1>; - }; - periph_clk: periph-clk { - compatible = "fixed-factor-clock"; - clocks = <&occ_periph>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - can_clk: can-clk { - compatible = "fixed-factor-clock"; - clocks = <&occ_periph>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - spi_clk: spi-clk { - compatible = "fixed-factor-clock"; - clocks = <&occ_periph>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - uart_clk: uart-clk { - compatible = "fixed-factor-clock"; - clocks = <&occ_periph>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - i2c_clk: i2c-clk { - compatible = "fixed-factor-clock"; - clocks = <&occ_periph>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - clock-output-names = "i2c_clk"; - }; - timer_clk: timer-clk { - compatible = "fixed-factor-clock"; - clocks = <&occ_periph>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - clock-output-names = "timer_clk"; - }; - gpio_clk: gpio-clk { - compatible = "fixed-factor-clock"; - clocks = <&occ_periph>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - clock-output-names = "gpio_clk"; - }; - emmc_sys_clk: emmc-sys-clk { - compatible = "fixed-factor-clock"; - clocks = <&olb EQ5C_PLL_PER>; - #clock-cells = <0>; - clock-div = <10>; - clock-mult = <1>; - clock-output-names = "emmc_sys_clk"; - }; - ccf_ctrl_clk: ccf-ctrl-clk { - compatible = "fixed-factor-clock"; - clocks = <&olb EQ5C_PLL_PER>; - #clock-cells = <0>; - clock-div = <4>; - clock-mult = <1>; - clock-output-names = "ccf_ctrl_clk"; - }; - occ_mjpeg_core: occ-mjpeg-core { - compatible = "fixed-factor-clock"; - clocks = <&olb EQ5C_PLL_PER>; - #clock-cells = <0>; - clock-div = <2>; - clock-mult = <1>; - clock-output-names = "occ_mjpeg_core"; - }; - hsm_clk: hsm-clk { /* gate ClkRstGen_hsm */ - compatible = "fixed-factor-clock"; - clocks = <&occ_mjpeg_core>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - clock-output-names = "hsm_clk"; - }; - mjpeg_core_clk: mjpeg-core-clk { /* gate ClkRstGen_mjpeg_gen */ - compatible = "fixed-factor-clock"; - clocks = <&occ_mjpeg_core>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - clock-output-names = "mjpeg_core_clk"; - }; - fcmu_a_clk: fcmu-a-clk { - compatible = "fixed-factor-clock"; - clocks = <&olb EQ5C_PLL_PER>; - #clock-cells = <0>; - clock-div = <20>; - clock-mult = <1>; - clock-output-names = "fcmu_a_clk"; - }; - occ_pci_sys: occ-pci-sys { - compatible = "fixed-factor-clock"; - clocks = <&olb EQ5C_PLL_PER>; - #clock-cells = <0>; - clock-div = <8>; - clock-mult = <1>; - clock-output-names = "occ_pci_sys"; - }; - pclk: pclk { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <250000000>; /* 250MHz */ - }; - tsu_clk: tsu-clk { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <125000000>; /* 125MHz */ - }; -}; diff --git a/arch/mips/boot/dts/mobileye/eyeq5.dtsi b/arch/mips/boot/dts/mobileye/eyeq5.dtsi index 0708771c193d..5d73e8320b8e 100644 --- a/arch/mips/boot/dts/mobileye/eyeq5.dtsi +++ b/arch/mips/boot/dts/mobileye/eyeq5.dtsi @@ -5,7 +5,7 @@ #include <dt-bindings/interrupt-controller/mips-gic.h> -#include "eyeq5-clocks.dtsi" +#include <dt-bindings/clock/mobileye,eyeq5-clk.h> / { #address-cells = <2>; @@ -17,7 +17,7 @@ device_type = "cpu"; compatible = "img,i6500"; reg = <0>; - clocks = <&core0_clk>; + clocks = <&olb EQ5C_CPU_CORE0>; }; }; @@ -64,6 +64,24 @@ #interrupt-cells = <1>; }; + xtal: xtal { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <30000000>; + }; + + pclk: pclk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <250000000>; /* 250MHz */ + }; + + tsu_clk: tsu-clk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <125000000>; /* 125MHz */ + }; + soc: soc { #address-cells = <2>; #size-cells = <2>; @@ -76,7 +94,7 @@ reg-io-width = <4>; interrupt-parent = <&gic>; interrupts = <GIC_SHARED 6 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&uart_clk>, <&occ_periph>; + clocks = <&olb EQ5C_PER_UART>, <&olb EQ5C_PER_OCC>; clock-names = "uartclk", "apb_pclk"; resets = <&olb 0 10>; pinctrl-names = "default"; @@ -89,7 +107,7 @@ reg-io-width = <4>; interrupt-parent = <&gic>; interrupts = <GIC_SHARED 6 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&uart_clk>, <&occ_periph>; + clocks = <&olb EQ5C_PER_UART>, <&olb EQ5C_PER_OCC>; clock-names = "uartclk", "apb_pclk"; resets = <&olb 0 11>; pinctrl-names = "default"; @@ -102,7 +120,7 @@ reg-io-width = <4>; interrupt-parent = <&gic>; interrupts = <GIC_SHARED 6 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&uart_clk>, <&occ_periph>; + clocks = <&olb EQ5C_PER_UART>, <&olb EQ5C_PER_OCC>; clock-names = "uartclk", "apb_pclk"; resets = <&olb 0 12>; pinctrl-names = "default"; @@ -135,7 +153,7 @@ timer { compatible = "mti,gic-timer"; interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>; - clocks = <&core0_clk>; + clocks = <&olb EQ5C_CPU_CORE0>; }; }; }; diff --git a/arch/mips/boot/dts/mobileye/eyeq6h-fixed-clocks.dtsi b/arch/mips/boot/dts/mobileye/eyeq6h-fixed-clocks.dtsi deleted file mode 100644 index 5fa99e06fde7..000000000000 --- a/arch/mips/boot/dts/mobileye/eyeq6h-fixed-clocks.dtsi +++ /dev/null @@ -1,52 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -/* - * Copyright 2023 Mobileye Vision Technologies Ltd. - */ - -#include <dt-bindings/clock/mobileye,eyeq5-clk.h> - -/ { - xtal: clock-30000000 { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <30000000>; - }; - - pll_west: clock-2000000000-west { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <2000000000>; - }; - - pll_cpu: clock-2000000000-cpu { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <2000000000>; - }; - - /* pll-cpu derivatives */ - occ_cpu: clock-2000000000-occ-cpu { - compatible = "fixed-factor-clock"; - clocks = <&pll_cpu>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - - /* pll-west derivatives */ - occ_periph_w: clock-200000000 { - compatible = "fixed-factor-clock"; - clocks = <&pll_west>; - #clock-cells = <0>; - clock-div = <10>; - clock-mult = <1>; - }; - uart_clk: clock-200000000-uart { - compatible = "fixed-factor-clock"; - clocks = <&occ_periph_w>; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <1>; - }; - -}; diff --git a/arch/mips/boot/dts/mobileye/eyeq6h.dtsi b/arch/mips/boot/dts/mobileye/eyeq6h.dtsi index 1db3c3cda2e3..4a1a43f351d3 100644 --- a/arch/mips/boot/dts/mobileye/eyeq6h.dtsi +++ b/arch/mips/boot/dts/mobileye/eyeq6h.dtsi @@ -5,7 +5,7 @@ #include <dt-bindings/interrupt-controller/mips-gic.h> -#include "eyeq6h-fixed-clocks.dtsi" +#include <dt-bindings/clock/mobileye,eyeq5-clk.h> / { #address-cells = <2>; @@ -17,7 +17,7 @@ device_type = "cpu"; compatible = "img,i6500"; reg = <0>; - clocks = <&occ_cpu>; + clocks = <&olb_central EQ6HC_CENTRAL_CPU_OCC>; }; }; @@ -32,19 +32,42 @@ #interrupt-cells = <1>; }; + xtal: clock-30000000 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <30000000>; + }; + soc: soc { compatible = "simple-bus"; #address-cells = <2>; #size-cells = <2>; ranges; + olb_acc: system-controller@d2003000 { + compatible = "mobileye,eyeq6h-acc-olb", "syscon"; + reg = <0x0 0xd2003000 0x0 0x1000>; + #reset-cells = <1>; + #clock-cells = <1>; + clocks = <&xtal>; + clock-names = "ref"; + }; + + olb_central: system-controller@d3100000 { + compatible = "mobileye,eyeq6h-central-olb", "syscon"; + reg = <0x0 0xd3100000 0x0 0x1000>; + #clock-cells = <1>; + clocks = <&xtal>; + clock-names = "ref"; + }; + uart0: serial@d3331000 { compatible = "arm,pl011", "arm,primecell"; reg = <0 0xd3331000 0x0 0x1000>; reg-io-width = <4>; interrupt-parent = <&gic>; interrupts = <GIC_SHARED 43 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&occ_periph_w>, <&occ_periph_w>; + clocks = <&olb_west EQ6HC_WEST_PER_UART>, <&olb_west EQ6HC_WEST_PER_OCC>; clock-names = "uartclk", "apb_pclk"; }; @@ -56,6 +79,15 @@ pinctrl-single,function-mask = <0xffff>; }; + olb_west: system-controller@d3338000 { + compatible = "mobileye,eyeq6h-west-olb", "syscon"; + reg = <0x0 0xd3338000 0x0 0x1000>; + #reset-cells = <1>; + #clock-cells = <1>; + clocks = <&xtal>; + clock-names = "ref"; + }; + pinctrl_east: pinctrl@d3357000 { compatible = "pinctrl-single"; reg = <0x0 0xd3357000 0x0 0xb0>; @@ -64,6 +96,23 @@ pinctrl-single,function-mask = <0xffff>; }; + olb_east: system-controller@d3358000 { + compatible = "mobileye,eyeq6h-east-olb", "syscon"; + reg = <0x0 0xd3358000 0x0 0x1000>; + #reset-cells = <1>; + #clock-cells = <1>; + clocks = <&xtal>; + clock-names = "ref"; + }; + + olb_south: system-controller@d8013000 { + compatible = "mobileye,eyeq6h-south-olb", "syscon"; + reg = <0x0 0xd8013000 0x0 0x1000>; + #clock-cells = <1>; + clocks = <&xtal>; + clock-names = "ref"; + }; + pinctrl_south: pinctrl@d8014000 { compatible = "pinctrl-single"; reg = <0x0 0xd8014000 0x0 0xf8>; @@ -72,6 +121,22 @@ pinctrl-single,function-mask = <0xffff>; }; + olb_ddr0: system-controller@e4080000 { + compatible = "mobileye,eyeq6h-ddr0-olb", "syscon"; + reg = <0x0 0xe4080000 0x0 0x1000>; + #clock-cells = <1>; + clocks = <&xtal>; + clock-names = "ref"; + }; + + olb_ddr1: system-controller@e4081000 { + compatible = "mobileye,eyeq6h-ddr1-olb", "syscon"; + reg = <0x0 0xe4081000 0x0 0x1000>; + #clock-cells = <1>; + clocks = <&xtal>; + clock-names = "ref"; + }; + gic: interrupt-controller@f0920000 { compatible = "mti,gic"; reg = <0x0 0xf0920000 0x0 0x20000>; @@ -89,7 +154,7 @@ timer { compatible = "mti,gic-timer"; interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>; - clocks = <&occ_cpu>; + clocks = <&olb_central EQ6HC_CENTRAL_CPU_OCC>; }; }; }; diff --git a/arch/mips/boot/dts/realtek/rtl930x.dtsi b/arch/mips/boot/dts/realtek/rtl930x.dtsi index 6a6f3f3fe389..17577457d159 100644 --- a/arch/mips/boot/dts/realtek/rtl930x.dtsi +++ b/arch/mips/boot/dts/realtek/rtl930x.dtsi @@ -61,6 +61,8 @@ }; &soc { + ranges = <0x0 0x18000000 0x20000>; + intc: interrupt-controller@3000 { compatible = "realtek,rtl9300-intc", "realtek,rtl-intc"; reg = <0x3000 0x18>, <0x3018 0x18>; @@ -88,6 +90,17 @@ interrupts = <7>, <8>, <9>, <10>, <11>; clocks = <&lx_clk>; }; + + snand: spi@1a400 { + compatible = "realtek,rtl9301-snand"; + reg = <0x1a400 0x44>; + interrupt-parent = <&intc>; + interrupts = <19>; + clocks = <&lx_clk>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; }; &uart0 { diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 73210e5bcfa7..8e776ba39497 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2848,7 +2848,7 @@ static void __init fixup_device_tree_chrp(void) #endif #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC) -static void __init fixup_device_tree_pmac(void) +static void __init fixup_device_tree_pmac64(void) { phandle u3, i2c, mpic; u32 u3_rev; @@ -2888,7 +2888,31 @@ static void __init fixup_device_tree_pmac(void) &parent, sizeof(parent)); } #else -#define fixup_device_tree_pmac() +#define fixup_device_tree_pmac64() +#endif + +#ifdef CONFIG_PPC_PMAC +static void __init fixup_device_tree_pmac(void) +{ + __be32 val = 1; + char type[8]; + phandle node; + + // Some pmacs are missing #size-cells on escc nodes + for (node = 0; prom_next_node(&node); ) { + type[0] = '\0'; + prom_getprop(node, "device_type", type, sizeof(type)); + if (prom_strcmp(type, "escc")) + continue; + + if (prom_getproplen(node, "#size-cells") != PROM_ERROR) + continue; + + prom_setprop(node, NULL, "#size-cells", &val, sizeof(val)); + } +} +#else +static inline void fixup_device_tree_pmac(void) { } #endif #ifdef CONFIG_PPC_EFIKA @@ -3111,6 +3135,7 @@ static void __init fixup_device_tree(void) { fixup_device_tree_chrp(); fixup_device_tree_pmac(); + fixup_device_tree_pmac64(); fixup_device_tree_efika(); fixup_device_tree_pasemi(); } diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c index c5d0f92c7969..384c9dc1899a 100644 --- a/arch/powerpc/platforms/pseries/svm.c +++ b/arch/powerpc/platforms/pseries/svm.c @@ -10,7 +10,6 @@ #include <linux/memblock.h> #include <linux/mem_encrypt.h> #include <linux/cc_platform.h> -#include <linux/mem_encrypt.h> #include <asm/machdep.h> #include <asm/svm.h> #include <asm/swiotlb.h> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index ff1e353b0d6f..cc63aef41e94 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -83,6 +83,7 @@ config RISCV select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP select ARCH_WANTS_NO_INSTR select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE + select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU select BUILDTIME_TABLE_SORT if MMU select CLINT_TIMER if RISCV_M_MODE @@ -116,6 +117,7 @@ config RISCV select GENERIC_VDSO_TIME_NS if HAVE_GENERIC_VDSO select HARDIRQS_SW_RESEND select HAS_IOPORT if MMU + select HAVE_ALIGNED_STRUCT_PAGE select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT @@ -507,6 +509,39 @@ config NODES_SHIFT Specify the maximum number of NUMA Nodes available on the target system. Increases memory reserved to accommodate various tables. +choice + prompt "RISC-V spinlock type" + default RISCV_COMBO_SPINLOCKS + +config RISCV_TICKET_SPINLOCKS + bool "Using ticket spinlock" + +config RISCV_QUEUED_SPINLOCKS + bool "Using queued spinlock" + depends on SMP && MMU && NONPORTABLE + select ARCH_USE_QUEUED_SPINLOCKS + help + The queued spinlock implementation requires the forward progress + guarantee of cmpxchg()/xchg() atomic operations: CAS with Zabha or + LR/SC with Ziccrse provide such guarantee. + + Select this if and only if Zabha or Ziccrse is available on your + platform, RISCV_QUEUED_SPINLOCKS must not be selected for platforms + without one of those extensions. + + If unsure, select RISCV_COMBO_SPINLOCKS, which will use qspinlocks + when supported and otherwise ticket spinlocks. + +config RISCV_COMBO_SPINLOCKS + bool "Using combo spinlock" + depends on SMP && MMU + select ARCH_USE_QUEUED_SPINLOCKS + help + Embed both queued spinlock and ticket lock so that the spinlock + implementation can be chosen at runtime. + +endchoice + config RISCV_ALTERNATIVE bool depends on !XIP_KERNEL @@ -532,6 +567,17 @@ config RISCV_ISA_C If you don't know what to do here, say Y. +config RISCV_ISA_SUPM + bool "Supm extension for userspace pointer masking" + depends on 64BIT + default y + help + Add support for pointer masking in userspace (Supm) when the + underlying hardware extension (Smnpm or Ssnpm) is detected at boot. + + If this option is disabled, userspace will be unable to use + the prctl(PR_{SET,GET}_TAGGED_ADDR_CTRL) API. + config RISCV_ISA_SVNAPOT bool "Svnapot extension support for supervisor mode NAPOT pages" depends on 64BIT && MMU @@ -633,6 +679,40 @@ config RISCV_ISA_ZAWRS use of these instructions in the kernel when the Zawrs extension is detected at boot. +config TOOLCHAIN_HAS_ZABHA + bool + default y + depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zabha) + depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zabha) + depends on AS_HAS_OPTION_ARCH + +config RISCV_ISA_ZABHA + bool "Zabha extension support for atomic byte/halfword operations" + depends on TOOLCHAIN_HAS_ZABHA + depends on RISCV_ALTERNATIVE + default y + help + Enable the use of the Zabha ISA-extension to implement kernel + byte/halfword atomic memory operations when it is detected at boot. + + If you don't know what to do here, say Y. + +config TOOLCHAIN_HAS_ZACAS + bool + default y + depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zacas) + depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zacas) + depends on AS_HAS_OPTION_ARCH + +config RISCV_ISA_ZACAS + bool "Zacas extension support for atomic CAS" + depends on TOOLCHAIN_HAS_ZACAS + depends on RISCV_ALTERNATIVE + default y + help + Enable the use of the Zacas ISA-extension to implement kernel atomic + cmpxchg operations when it is detected at boot. + If you don't know what to do here, say Y. config TOOLCHAIN_HAS_ZBB @@ -786,10 +866,24 @@ config THREAD_SIZE_ORDER config RISCV_MISALIGNED bool + help + Embed support for detecting and emulating misaligned + scalar or vector loads and stores. + +config RISCV_SCALAR_MISALIGNED + bool + select RISCV_MISALIGNED select SYSCTL_ARCH_UNALIGN_ALLOW help Embed support for emulating misaligned loads and stores. +config RISCV_VECTOR_MISALIGNED + bool + select RISCV_MISALIGNED + depends on RISCV_ISA_V + help + Enable detecting support for vector misaligned loads and stores. + choice prompt "Unaligned Accesses Support" default RISCV_PROBE_UNALIGNED_ACCESS @@ -801,7 +895,7 @@ choice config RISCV_PROBE_UNALIGNED_ACCESS bool "Probe for hardware unaligned access support" - select RISCV_MISALIGNED + select RISCV_SCALAR_MISALIGNED help During boot, the kernel will run a series of tests to determine the speed of unaligned accesses. This probing will dynamically determine @@ -812,7 +906,7 @@ config RISCV_PROBE_UNALIGNED_ACCESS config RISCV_EMULATED_UNALIGNED_ACCESS bool "Emulate unaligned access where system support is missing" - select RISCV_MISALIGNED + select RISCV_SCALAR_MISALIGNED help If unaligned memory accesses trap into the kernel as they are not supported by the system, the kernel will emulate the unaligned @@ -841,6 +935,46 @@ config RISCV_EFFICIENT_UNALIGNED_ACCESS endchoice +choice + prompt "Vector unaligned Accesses Support" + depends on RISCV_ISA_V + default RISCV_PROBE_VECTOR_UNALIGNED_ACCESS + help + This determines the level of support for vector unaligned accesses. This + information is used by the kernel to perform optimizations. It is also + exposed to user space via the hwprobe syscall. The hardware will be + probed at boot by default. + +config RISCV_PROBE_VECTOR_UNALIGNED_ACCESS + bool "Probe speed of vector unaligned accesses" + select RISCV_VECTOR_MISALIGNED + depends on RISCV_ISA_V + help + During boot, the kernel will run a series of tests to determine the + speed of vector unaligned accesses if they are supported. This probing + will dynamically determine the speed of vector unaligned accesses on + the underlying system if they are supported. + +config RISCV_SLOW_VECTOR_UNALIGNED_ACCESS + bool "Assume the system supports slow vector unaligned memory accesses" + depends on NONPORTABLE + help + Assume that the system supports slow vector unaligned memory accesses. The + kernel and userspace programs may not be able to run at all on systems + that do not support unaligned memory accesses. + +config RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS + bool "Assume the system supports fast vector unaligned memory accesses" + depends on NONPORTABLE + help + Assume that the system supports fast vector unaligned memory accesses. When + enabled, this option improves the performance of the kernel on such + systems. However, the kernel and userspace programs will run much more + slowly, or will not be able to run at all, on systems that do not + support efficient unaligned memory accesses. + +endchoice + source "arch/riscv/Kconfig.vendor" endmenu # "Platform type" diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index d469db9f46f4..9fe1ee740dda 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -82,6 +82,12 @@ else riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei endif +# Check if the toolchain supports Zacas +riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZACAS) := $(riscv-march-y)_zacas + +# Check if the toolchain supports Zabha +riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZABHA) := $(riscv-march-y)_zabha + # Remove F,D,V from isa string for all. Keep extensions between "fd" and "v" by # matching non-v and non-multi-letter extensions out with the filter ([^v_]*) KBUILD_CFLAGS += -march=$(shell echo $(riscv-march-y) | sed -E 's/(rv32ima|rv64ima)fd([^v_]*)v?/\1\2/') diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 1d5e13b148f2..b4a37345703e 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -167,6 +167,7 @@ CONFIG_PINCTRL_SOPHGO_CV1800B=y CONFIG_PINCTRL_SOPHGO_CV1812H=y CONFIG_PINCTRL_SOPHGO_SG2000=y CONFIG_PINCTRL_SOPHGO_SG2002=y +CONFIG_GPIO_DWAPB=y CONFIG_GPIO_SIFIVE=y CONFIG_POWER_RESET_GPIO_RESTART=y CONFIG_SENSORS_SFCTEMP=m diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 1461af12da6e..de13d5a234f8 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -6,10 +6,12 @@ generic-y += early_ioremap.h generic-y += flat.h generic-y += kvm_para.h generic-y += mmzone.h +generic-y += mcs_spinlock.h generic-y += parport.h -generic-y += spinlock.h generic-y += spinlock_types.h +generic-y += ticket_spinlock.h generic-y += qrwlock.h generic-y += qrwlock_types.h +generic-y += qspinlock.h generic-y += user.h generic-y += vmlinux.lds.h diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index ebbce134917c..4cadc56220fe 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -12,30 +12,43 @@ #include <asm/fence.h> #include <asm/hwcap.h> #include <asm/insn-def.h> - -#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \ -({ \ - u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ - ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ - ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ - << __s; \ - ulong __newx = (ulong)(n) << __s; \ - ulong __retx; \ - ulong __rc; \ - \ - __asm__ __volatile__ ( \ - prepend \ - "0: lr.w %0, %2\n" \ - " and %1, %0, %z4\n" \ - " or %1, %1, %z3\n" \ - " sc.w" sc_sfx " %1, %1, %2\n" \ - " bnez %1, 0b\n" \ - append \ - : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ - : "rJ" (__newx), "rJ" (~__mask) \ - : "memory"); \ - \ - r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ +#include <asm/cpufeature-macros.h> + +#define __arch_xchg_masked(sc_sfx, swap_sfx, prepend, sc_append, \ + swap_append, r, p, n) \ +({ \ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZABHA)) { \ + __asm__ __volatile__ ( \ + prepend \ + " amoswap" swap_sfx " %0, %z2, %1\n" \ + swap_append \ + : "=&r" (r), "+A" (*(p)) \ + : "rJ" (n) \ + : "memory"); \ + } else { \ + u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ + ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ + ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ + << __s; \ + ulong __newx = (ulong)(n) << __s; \ + ulong __retx; \ + ulong __rc; \ + \ + __asm__ __volatile__ ( \ + prepend \ + "0: lr.w %0, %2\n" \ + " and %1, %0, %z4\n" \ + " or %1, %1, %z3\n" \ + " sc.w" sc_sfx " %1, %1, %2\n" \ + " bnez %1, 0b\n" \ + sc_append \ + : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ + : "rJ" (__newx), "rJ" (~__mask) \ + : "memory"); \ + \ + r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ + } \ }) #define __arch_xchg(sfx, prepend, append, r, p, n) \ @@ -58,8 +71,13 @@ \ switch (sizeof(*__ptr)) { \ case 1: \ + __arch_xchg_masked(sc_sfx, ".b" swap_sfx, \ + prepend, sc_append, swap_append, \ + __ret, __ptr, __new); \ + break; \ case 2: \ - __arch_xchg_masked(sc_sfx, prepend, sc_append, \ + __arch_xchg_masked(sc_sfx, ".h" swap_sfx, \ + prepend, sc_append, swap_append, \ __ret, __ptr, __new); \ break; \ case 4: \ @@ -106,55 +124,90 @@ * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ - -#define __arch_cmpxchg_masked(sc_sfx, prepend, append, r, p, o, n) \ -({ \ - u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ - ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ - ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ - << __s; \ - ulong __newx = (ulong)(n) << __s; \ - ulong __oldx = (ulong)(o) << __s; \ - ulong __retx; \ - ulong __rc; \ - \ - __asm__ __volatile__ ( \ - prepend \ - "0: lr.w %0, %2\n" \ - " and %1, %0, %z5\n" \ - " bne %1, %z3, 1f\n" \ - " and %1, %0, %z6\n" \ - " or %1, %1, %z4\n" \ - " sc.w" sc_sfx " %1, %1, %2\n" \ - " bnez %1, 0b\n" \ - append \ - "1:\n" \ - : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ - : "rJ" ((long)__oldx), "rJ" (__newx), \ - "rJ" (__mask), "rJ" (~__mask) \ - : "memory"); \ - \ - r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ +#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + r, p, o, n) \ +({ \ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \ + IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZABHA) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ + r = o; \ + \ + __asm__ __volatile__ ( \ + cas_prepend \ + " amocas" cas_sfx " %0, %z2, %1\n" \ + cas_append \ + : "+&r" (r), "+A" (*(p)) \ + : "rJ" (n) \ + : "memory"); \ + } else { \ + u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ + ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ + ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ + << __s; \ + ulong __newx = (ulong)(n) << __s; \ + ulong __oldx = (ulong)(o) << __s; \ + ulong __retx; \ + ulong __rc; \ + \ + __asm__ __volatile__ ( \ + sc_prepend \ + "0: lr.w %0, %2\n" \ + " and %1, %0, %z5\n" \ + " bne %1, %z3, 1f\n" \ + " and %1, %0, %z6\n" \ + " or %1, %1, %z4\n" \ + " sc.w" sc_sfx " %1, %1, %2\n" \ + " bnez %1, 0b\n" \ + sc_append \ + "1:\n" \ + : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ + : "rJ" ((long)__oldx), "rJ" (__newx), \ + "rJ" (__mask), "rJ" (~__mask) \ + : "memory"); \ + \ + r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ + } \ }) -#define __arch_cmpxchg(lr_sfx, sc_sfx, prepend, append, r, p, co, o, n) \ +#define __arch_cmpxchg(lr_sfx, sc_sfx, cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + r, p, co, o, n) \ ({ \ - register unsigned int __rc; \ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ + r = o; \ \ - __asm__ __volatile__ ( \ - prepend \ - "0: lr" lr_sfx " %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc" sc_sfx " %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - append \ - "1:\n" \ - : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \ - : "rJ" (co o), "rJ" (n) \ - : "memory"); \ + __asm__ __volatile__ ( \ + cas_prepend \ + " amocas" cas_sfx " %0, %z2, %1\n" \ + cas_append \ + : "+&r" (r), "+A" (*(p)) \ + : "rJ" (n) \ + : "memory"); \ + } else { \ + register unsigned int __rc; \ + \ + __asm__ __volatile__ ( \ + sc_prepend \ + "0: lr" lr_sfx " %0, %2\n" \ + " bne %0, %z3, 1f\n" \ + " sc" sc_sfx " %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ + sc_append \ + "1:\n" \ + : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \ + : "rJ" (co o), "rJ" (n) \ + : "memory"); \ + } \ }) -#define _arch_cmpxchg(ptr, old, new, sc_sfx, prepend, append) \ +#define _arch_cmpxchg(ptr, old, new, sc_sfx, cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ __typeof__(*(__ptr)) __old = (old); \ @@ -163,17 +216,28 @@ \ switch (sizeof(*__ptr)) { \ case 1: \ + __arch_cmpxchg_masked(sc_sfx, ".b" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, __old, __new); \ + break; \ case 2: \ - __arch_cmpxchg_masked(sc_sfx, prepend, append, \ - __ret, __ptr, __old, __new); \ + __arch_cmpxchg_masked(sc_sfx, ".h" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, __old, __new); \ break; \ case 4: \ - __arch_cmpxchg(".w", ".w" sc_sfx, prepend, append, \ - __ret, __ptr, (long), __old, __new); \ + __arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, (long), __old, __new); \ break; \ case 8: \ - __arch_cmpxchg(".d", ".d" sc_sfx, prepend, append, \ - __ret, __ptr, /**/, __old, __new); \ + __arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, /**/, __old, __new); \ break; \ default: \ BUILD_BUG(); \ @@ -181,17 +245,40 @@ (__typeof__(*(__ptr)))__ret; \ }) +/* + * These macros are here to improve the readability of the arch_cmpxchg_XXX() + * macros. + */ +#define SC_SFX(x) x +#define CAS_SFX(x) x +#define SC_PREPEND(x) x +#define SC_APPEND(x) x +#define CAS_PREPEND(x) x +#define CAS_APPEND(x) x + #define arch_cmpxchg_relaxed(ptr, o, n) \ - _arch_cmpxchg((ptr), (o), (n), "", "", "") + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(""), CAS_SFX(""), \ + SC_PREPEND(""), SC_APPEND(""), \ + CAS_PREPEND(""), CAS_APPEND("")) #define arch_cmpxchg_acquire(ptr, o, n) \ - _arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER) + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(""), CAS_SFX(""), \ + SC_PREPEND(""), SC_APPEND(RISCV_ACQUIRE_BARRIER), \ + CAS_PREPEND(""), CAS_APPEND(RISCV_ACQUIRE_BARRIER)) #define arch_cmpxchg_release(ptr, o, n) \ - _arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "") + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(""), CAS_SFX(""), \ + SC_PREPEND(RISCV_RELEASE_BARRIER), SC_APPEND(""), \ + CAS_PREPEND(RISCV_RELEASE_BARRIER), CAS_APPEND("")) #define arch_cmpxchg(ptr, o, n) \ - _arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n") + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(".rl"), CAS_SFX(".aqrl"), \ + SC_PREPEND(""), SC_APPEND(RISCV_FULL_BARRIER), \ + CAS_PREPEND(""), CAS_APPEND("")) #define arch_cmpxchg_local(ptr, o, n) \ arch_cmpxchg_relaxed((ptr), (o), (n)) @@ -226,6 +313,44 @@ arch_cmpxchg_release((ptr), (o), (n)); \ }) +#if defined(CONFIG_64BIT) && defined(CONFIG_RISCV_ISA_ZACAS) + +#define system_has_cmpxchg128() riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS) + +union __u128_halves { + u128 full; + struct { + u64 low, high; + }; +}; + +#define __arch_cmpxchg128(p, o, n, cas_sfx) \ +({ \ + __typeof__(*(p)) __o = (o); \ + union __u128_halves __hn = { .full = (n) }; \ + union __u128_halves __ho = { .full = (__o) }; \ + register unsigned long t1 asm ("t1") = __hn.low; \ + register unsigned long t2 asm ("t2") = __hn.high; \ + register unsigned long t3 asm ("t3") = __ho.low; \ + register unsigned long t4 asm ("t4") = __ho.high; \ + \ + __asm__ __volatile__ ( \ + " amocas.q" cas_sfx " %0, %z3, %2" \ + : "+&r" (t3), "+&r" (t4), "+A" (*(p)) \ + : "rJ" (t1), "rJ" (t2) \ + : "memory"); \ + \ + ((u128)t4 << 64) | t3; \ +}) + +#define arch_cmpxchg128(ptr, o, n) \ + __arch_cmpxchg128((ptr), (o), (n), ".aqrl") + +#define arch_cmpxchg128_local(ptr, o, n) \ + __arch_cmpxchg128((ptr), (o), (n), "") + +#endif /* CONFIG_64BIT && CONFIG_RISCV_ISA_ZACAS */ + #ifdef CONFIG_RISCV_ISA_ZAWRS /* * Despite wrs.nto being "WRS-with-no-timeout", in the absence of changes to @@ -245,6 +370,11 @@ static __always_inline void __cmpwait(volatile void *ptr, : : : : no_zawrs); switch (size) { + case 1: + fallthrough; + case 2: + /* RISC-V doesn't have lr instructions on byte and half-word. */ + goto no_zawrs; case 4: asm volatile( " lr.w %0, %1\n" diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h index aa103530a5c8..6081327e55f5 100644 --- a/arch/riscv/include/asm/compat.h +++ b/arch/riscv/include/asm/compat.h @@ -9,7 +9,6 @@ */ #include <linux/types.h> #include <linux/sched.h> -#include <linux/sched/task_stack.h> #include <asm-generic/compat.h> static inline int is_compat_task(void) diff --git a/arch/riscv/include/asm/cpufeature-macros.h b/arch/riscv/include/asm/cpufeature-macros.h new file mode 100644 index 000000000000..a8103edbf51f --- /dev/null +++ b/arch/riscv/include/asm/cpufeature-macros.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2022-2024 Rivos, Inc + */ + +#ifndef _ASM_CPUFEATURE_MACROS_H +#define _ASM_CPUFEATURE_MACROS_H + +#include <asm/hwcap.h> +#include <asm/alternative-macros.h> + +#define STANDARD_EXT 0 + +bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit); +#define riscv_isa_extension_available(isa_bitmap, ext) \ + __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) + +static __always_inline bool __riscv_has_extension_likely(const unsigned long vendor, + const unsigned long ext) +{ + asm goto(ALTERNATIVE("j %l[l_no]", "nop", %[vendor], %[ext], 1) + : + : [vendor] "i" (vendor), [ext] "i" (ext) + : + : l_no); + + return true; +l_no: + return false; +} + +static __always_inline bool __riscv_has_extension_unlikely(const unsigned long vendor, + const unsigned long ext) +{ + asm goto(ALTERNATIVE("nop", "j %l[l_yes]", %[vendor], %[ext], 1) + : + : [vendor] "i" (vendor), [ext] "i" (ext) + : + : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool riscv_has_extension_unlikely(const unsigned long ext) +{ + compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) + return __riscv_has_extension_unlikely(STANDARD_EXT, ext); + + return __riscv_isa_extension_available(NULL, ext); +} + +static __always_inline bool riscv_has_extension_likely(const unsigned long ext) +{ + compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) + return __riscv_has_extension_likely(STANDARD_EXT, ext); + + return __riscv_isa_extension_available(NULL, ext); +} + +#endif /* _ASM_CPUFEATURE_MACROS_H */ diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index 45f9c1171a48..4bd054c54c21 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -8,9 +8,12 @@ #include <linux/bitmap.h> #include <linux/jump_label.h> +#include <linux/workqueue.h> +#include <linux/kconfig.h> +#include <linux/percpu-defs.h> +#include <linux/threads.h> #include <asm/hwcap.h> -#include <asm/alternative-macros.h> -#include <asm/errno.h> +#include <asm/cpufeature-macros.h> /* * These are probed via a device_initcall(), via either the SBI or directly @@ -31,7 +34,7 @@ DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo); /* Per-cpu ISA extensions. */ extern struct riscv_isainfo hart_isa[NR_CPUS]; -void riscv_user_isa_enable(void); +void __init riscv_user_isa_enable(void); #define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size, _validate) { \ .name = #_name, \ @@ -58,8 +61,9 @@ void riscv_user_isa_enable(void); #define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate) -#if defined(CONFIG_RISCV_MISALIGNED) bool check_unaligned_access_emulated_all_cpus(void); +#if defined(CONFIG_RISCV_SCALAR_MISALIGNED) +void check_unaligned_access_emulated(struct work_struct *work __always_unused); void unaligned_emulation_finish(void); bool unaligned_ctl_available(void); DECLARE_PER_CPU(long, misaligned_access_speed); @@ -70,6 +74,12 @@ static inline bool unaligned_ctl_available(void) } #endif +bool check_vector_unaligned_access_emulated_all_cpus(void); +#if defined(CONFIG_RISCV_VECTOR_MISALIGNED) +void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused); +DECLARE_PER_CPU(long, vector_misaligned_access); +#endif + #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) DECLARE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key); @@ -103,61 +113,6 @@ extern const size_t riscv_isa_ext_count; extern bool riscv_isa_fallback; unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); - -#define STANDARD_EXT 0 - -bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit); -#define riscv_isa_extension_available(isa_bitmap, ext) \ - __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) - -static __always_inline bool __riscv_has_extension_likely(const unsigned long vendor, - const unsigned long ext) -{ - asm goto(ALTERNATIVE("j %l[l_no]", "nop", %[vendor], %[ext], 1) - : - : [vendor] "i" (vendor), [ext] "i" (ext) - : - : l_no); - - return true; -l_no: - return false; -} - -static __always_inline bool __riscv_has_extension_unlikely(const unsigned long vendor, - const unsigned long ext) -{ - asm goto(ALTERNATIVE("nop", "j %l[l_yes]", %[vendor], %[ext], 1) - : - : [vendor] "i" (vendor), [ext] "i" (ext) - : - : l_yes); - - return false; -l_yes: - return true; -} - -static __always_inline bool riscv_has_extension_unlikely(const unsigned long ext) -{ - compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); - - if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) - return __riscv_has_extension_unlikely(STANDARD_EXT, ext); - - return __riscv_isa_extension_available(NULL, ext); -} - -static __always_inline bool riscv_has_extension_likely(const unsigned long ext) -{ - compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); - - if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) - return __riscv_has_extension_likely(STANDARD_EXT, ext); - - return __riscv_isa_extension_available(NULL, ext); -} - static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext) { compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 25966995da04..fe5d4eb9adea 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -119,6 +119,10 @@ /* HSTATUS flags */ #ifdef CONFIG_64BIT +#define HSTATUS_HUPMM _AC(0x3000000000000, UL) +#define HSTATUS_HUPMM_PMLEN_0 _AC(0x0000000000000, UL) +#define HSTATUS_HUPMM_PMLEN_7 _AC(0x2000000000000, UL) +#define HSTATUS_HUPMM_PMLEN_16 _AC(0x3000000000000, UL) #define HSTATUS_VSXL _AC(0x300000000, UL) #define HSTATUS_VSXL_SHIFT 32 #endif @@ -195,6 +199,10 @@ /* xENVCFG flags */ #define ENVCFG_STCE (_AC(1, ULL) << 63) #define ENVCFG_PBMTE (_AC(1, ULL) << 62) +#define ENVCFG_PMM (_AC(0x3, ULL) << 32) +#define ENVCFG_PMM_PMLEN_0 (_AC(0x0, ULL) << 32) +#define ENVCFG_PMM_PMLEN_7 (_AC(0x2, ULL) << 32) +#define ENVCFG_PMM_PMLEN_16 (_AC(0x3, ULL) << 32) #define ENVCFG_CBZE (_AC(1, UL) << 7) #define ENVCFG_CBCFE (_AC(1, UL) << 6) #define ENVCFG_CBIE_SHIFT 4 @@ -216,6 +224,12 @@ #define SMSTATEEN0_SSTATEEN0_SHIFT 63 #define SMSTATEEN0_SSTATEEN0 (_ULL(1) << SMSTATEEN0_SSTATEEN0_SHIFT) +/* mseccfg bits */ +#define MSECCFG_PMM ENVCFG_PMM +#define MSECCFG_PMM_PMLEN_0 ENVCFG_PMM_PMLEN_0 +#define MSECCFG_PMM_PMLEN_7 ENVCFG_PMM_PMLEN_7 +#define MSECCFG_PMM_PMLEN_16 ENVCFG_PMM_PMLEN_16 + /* symbolic CSR names: */ #define CSR_CYCLE 0xc00 #define CSR_TIME 0xc01 @@ -382,6 +396,8 @@ #define CSR_MIP 0x344 #define CSR_PMPCFG0 0x3a0 #define CSR_PMPADDR0 0x3b0 +#define CSR_MSECCFG 0x747 +#define CSR_MSECCFGH 0x757 #define CSR_MVENDORID 0xf11 #define CSR_MARCHID 0xf12 #define CSR_MIMPID 0xf13 diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h index 2293e535f865..b28ccc6cdeea 100644 --- a/arch/riscv/include/asm/entry-common.h +++ b/arch/riscv/include/asm/entry-common.h @@ -33,6 +33,7 @@ static inline int handle_misaligned_load(struct pt_regs *regs) { return -1; } + static inline int handle_misaligned_store(struct pt_regs *regs) { return -1; diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 46d9de54179e..08d2a5697466 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -93,6 +93,11 @@ #define RISCV_ISA_EXT_ZCMOP 84 #define RISCV_ISA_EXT_ZAWRS 85 #define RISCV_ISA_EXT_SVVPTC 86 +#define RISCV_ISA_EXT_SMMPM 87 +#define RISCV_ISA_EXT_SMNPM 88 +#define RISCV_ISA_EXT_SSNPM 89 +#define RISCV_ISA_EXT_ZABHA 90 +#define RISCV_ISA_EXT_ZICCRSE 91 #define RISCV_ISA_EXT_XLINUXENVCFG 127 @@ -101,8 +106,10 @@ #ifdef CONFIG_RISCV_M_MODE #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA +#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SMNPM #else #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SSAIA +#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SSNPM #endif #endif /* _ASM_RISCV_HWCAP_H */ diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h index ffb9484531af..1ce1df6d0ff3 100644 --- a/arch/riscv/include/asm/hwprobe.h +++ b/arch/riscv/include/asm/hwprobe.h @@ -8,7 +8,7 @@ #include <uapi/asm/hwprobe.h> -#define RISCV_HWPROBE_MAX_KEY 9 +#define RISCV_HWPROBE_MAX_KEY 10 static inline bool riscv_hwprobe_key_is_valid(__s64 key) { diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index c9e03e9da3dc..1cc90465d75b 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -26,8 +26,15 @@ typedef struct { unsigned long exec_fdpic_loadmap; unsigned long interp_fdpic_loadmap; #endif + unsigned long flags; +#ifdef CONFIG_RISCV_ISA_SUPM + u8 pmlen; +#endif } mm_context_t; +/* Lock the pointer masking mode because this mm is multithreaded */ +#define MM_CONTEXT_LOCK_PMLEN 0 + #define cntx2asid(cntx) ((cntx) & SATP_ASID_MASK) #define cntx2version(cntx) ((cntx) & ~SATP_ASID_MASK) diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index 7030837adc1a..8c4bc49a3a0f 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h @@ -20,6 +20,9 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { +#ifdef CONFIG_RISCV_ISA_SUPM + next->context.pmlen = 0; +#endif switch_mm(prev, next, NULL); } @@ -30,11 +33,21 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_MMU atomic_long_set(&mm->context.id, 0); #endif + if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM)) + clear_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags); return 0; } DECLARE_STATIC_KEY_FALSE(use_asid_allocator); +#ifdef CONFIG_RISCV_ISA_SUPM +#define mm_untag_mask mm_untag_mask +static inline unsigned long mm_untag_mask(struct mm_struct *mm) +{ + return -1UL >> mm->context.pmlen; +} +#endif + #include <asm-generic/mmu_context.h> #endif /* _ASM_RISCV_MMU_CONTEXT_H */ diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index efa1b3519b23..5f56eb9d114a 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -102,6 +102,7 @@ struct thread_struct { unsigned long s[12]; /* s[0]: frame pointer */ struct __riscv_d_ext_state fstate; unsigned long bad_cause; + unsigned long envcfg; u32 riscv_v_flags; u32 vstate_ctrl; struct __riscv_v_ext_state vstate; @@ -177,6 +178,14 @@ extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); #define RISCV_SET_ICACHE_FLUSH_CTX(arg1, arg2) riscv_set_icache_flush_ctx(arg1, arg2) extern int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long per_thread); +#ifdef CONFIG_RISCV_ISA_SUPM +/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */ +long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg); +long get_tagged_addr_ctrl(struct task_struct *task); +#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(current, arg) +#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current) +#endif + #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_PROCESSOR_H */ diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h new file mode 100644 index 000000000000..e5121b89acea --- /dev/null +++ b/arch/riscv/include/asm/spinlock.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_RISCV_SPINLOCK_H +#define __ASM_RISCV_SPINLOCK_H + +#ifdef CONFIG_RISCV_COMBO_SPINLOCKS +#define _Q_PENDING_LOOPS (1 << 9) + +#define __no_arch_spinlock_redefine +#include <asm/ticket_spinlock.h> +#include <asm/qspinlock.h> +#include <asm/jump_label.h> + +/* + * TODO: Use an alternative instead of a static key when we are able to parse + * the extensions string earlier in the boot process. + */ +DECLARE_STATIC_KEY_TRUE(qspinlock_key); + +#define SPINLOCK_BASE_DECLARE(op, type, type_lock) \ +static __always_inline type arch_spin_##op(type_lock lock) \ +{ \ + if (static_branch_unlikely(&qspinlock_key)) \ + return queued_spin_##op(lock); \ + return ticket_spin_##op(lock); \ +} + +SPINLOCK_BASE_DECLARE(lock, void, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(unlock, void, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(is_locked, int, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(is_contended, int, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(trylock, bool, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t) + +#elif defined(CONFIG_RISCV_QUEUED_SPINLOCKS) + +#include <asm/qspinlock.h> + +#else + +#include <asm/ticket_spinlock.h> + +#endif + +#include <asm/qrwlock.h> + +#endif /* __ASM_RISCV_SPINLOCK_H */ diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index 7594df37cc9f..94e33216b2d9 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -70,6 +70,24 @@ static __always_inline bool has_fpu(void) { return false; } #define __switch_to_fpu(__prev, __next) do { } while (0) #endif +static inline void envcfg_update_bits(struct task_struct *task, + unsigned long mask, unsigned long val) +{ + unsigned long envcfg; + + envcfg = (task->thread.envcfg & ~mask) | val; + task->thread.envcfg = envcfg; + if (task == current) + csr_write(CSR_ENVCFG, envcfg); +} + +static inline void __switch_to_envcfg(struct task_struct *next) +{ + asm volatile (ALTERNATIVE("nop", "csrw " __stringify(CSR_ENVCFG) ", %0", + 0, RISCV_ISA_EXT_XLINUXENVCFG, 1) + :: "r" (next->thread.envcfg) : "memory"); +} + extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *); @@ -103,6 +121,7 @@ do { \ __switch_to_vector(__prev, __next); \ if (switch_to_should_flush_icache(__next)) \ local_flush_icache_all(); \ + __switch_to_envcfg(__next); \ ((last) = __switch_to(__prev, __next)); \ } while (0) diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 72ec1d9bd3f3..fee56b0c8058 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -9,8 +9,41 @@ #define _ASM_RISCV_UACCESS_H #include <asm/asm-extable.h> +#include <asm/cpufeature.h> #include <asm/pgtable.h> /* for TASK_SIZE */ +#ifdef CONFIG_RISCV_ISA_SUPM +static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigned long addr) +{ + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) { + u8 pmlen = mm->context.pmlen; + + /* Virtual addresses are sign-extended; physical addresses are zero-extended. */ + if (IS_ENABLED(CONFIG_MMU)) + return (long)(addr << pmlen) >> pmlen; + else + return (addr << pmlen) >> pmlen; + } + + return addr; +} + +#define untagged_addr(addr) ({ \ + unsigned long __addr = (__force unsigned long)(addr); \ + (__force __typeof__(addr))__untagged_addr_remote(current->mm, __addr); \ +}) + +#define untagged_addr_remote(mm, addr) ({ \ + unsigned long __addr = (__force unsigned long)(addr); \ + mmap_assert_locked(mm); \ + (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \ +}) + +#define access_ok(addr, size) likely(__access_ok(untagged_addr(addr), size)) +#else +#define untagged_addr(addr) (addr) +#endif + /* * User space memory access functions */ @@ -130,7 +163,7 @@ do { \ */ #define __get_user(x, ptr) \ ({ \ - const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + const __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \ long __gu_err = 0; \ \ __chk_user_ptr(__gu_ptr); \ @@ -246,7 +279,7 @@ do { \ */ #define __put_user(x, ptr) \ ({ \ - __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \ __typeof__(*__gu_ptr) __val = (x); \ long __pu_err = 0; \ \ @@ -293,13 +326,13 @@ unsigned long __must_check __asm_copy_from_user(void *to, static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - return __asm_copy_from_user(to, from, n); + return __asm_copy_from_user(to, untagged_addr(from), n); } static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - return __asm_copy_to_user(to, from, n); + return __asm_copy_to_user(untagged_addr(to), from, n); } extern long strncpy_from_user(char *dest, const char __user *src, long count); @@ -314,7 +347,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n) { might_fault(); return access_ok(to, n) ? - __clear_user(to, n) : n; + __clear_user(untagged_addr(to), n) : n; } #define __get_kernel_nofault(dst, src, type, err_label) \ diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h index be7d309cca8a..c7c023afbacd 100644 --- a/arch/riscv/include/asm/vector.h +++ b/arch/riscv/include/asm/vector.h @@ -21,6 +21,7 @@ extern unsigned long riscv_v_vsize; int riscv_v_setup_vsize(void); +bool insn_is_vector(u32 insn_buf); bool riscv_v_first_use_handler(struct pt_regs *regs); void kernel_vector_begin(void); void kernel_vector_end(void); @@ -268,6 +269,7 @@ struct pt_regs; static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP; } static __always_inline bool has_vector(void) { return false; } +static __always_inline bool insn_is_vector(u32 insn_buf) { return false; } static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; } static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; } static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; } diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h index 1e153cda57db..3af142b99f77 100644 --- a/arch/riscv/include/uapi/asm/hwprobe.h +++ b/arch/riscv/include/uapi/asm/hwprobe.h @@ -72,6 +72,7 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_EXT_ZCF (1ULL << 46) #define RISCV_HWPROBE_EXT_ZCMOP (1ULL << 47) #define RISCV_HWPROBE_EXT_ZAWRS (1ULL << 48) +#define RISCV_HWPROBE_EXT_SUPM (1ULL << 49) #define RISCV_HWPROBE_KEY_CPUPERF_0 5 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0) #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0) @@ -88,6 +89,11 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW 2 #define RISCV_HWPROBE_MISALIGNED_SCALAR_FAST 3 #define RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED 4 +#define RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF 10 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN 0 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW 2 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_FAST 3 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED 4 /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */ /* Flags */ diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index e97db3296456..4f24201376b1 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -175,6 +175,8 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_ZCF, KVM_RISCV_ISA_EXT_ZCMOP, KVM_RISCV_ISA_EXT_ZAWRS, + KVM_RISCV_ISA_EXT_SMNPM, + KVM_RISCV_ISA_EXT_SSNPM, KVM_RISCV_ISA_EXT_MAX, }; diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 69dc8aaab3fb..063d1faf5a53 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -75,7 +75,8 @@ obj-$(CONFIG_MMU) += vdso.o vdso/ obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o obj-$(CONFIG_RISCV_MISALIGNED) += unaligned_access_speed.o -obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o +obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o +obj-$(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS) += vec-copy-unaligned.o obj-$(CONFIG_FPU) += fpu.o obj-$(CONFIG_FPU) += kernel_mode_fpu.o diff --git a/arch/riscv/kernel/copy-unaligned.h b/arch/riscv/kernel/copy-unaligned.h index e3d70d35b708..85d4d11450cb 100644 --- a/arch/riscv/kernel/copy-unaligned.h +++ b/arch/riscv/kernel/copy-unaligned.h @@ -10,4 +10,9 @@ void __riscv_copy_words_unaligned(void *dst, const void *src, size_t size); void __riscv_copy_bytes_unaligned(void *dst, const void *src, size_t size); +#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS +void __riscv_copy_vec_words_unaligned(void *dst, const void *src, size_t size); +void __riscv_copy_vec_bytes_unaligned(void *dst, const void *src, size_t size); +#endif + #endif /* __RISCV_KERNEL_COPY_UNALIGNED_H */ diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 826f46b21f2e..467c5c735bf5 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -29,6 +29,8 @@ #define NUM_ALPHA_EXTS ('z' - 'a' + 1) +static bool any_cpu_has_zicboz; + unsigned long elf_hwcap __read_mostly; /* Host ISA bitmap */ @@ -99,6 +101,7 @@ static int riscv_ext_zicboz_validate(const struct riscv_isa_ext_data *data, pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n"); return -EINVAL; } + any_cpu_has_zicboz = true; return 0; } @@ -315,6 +318,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { riscv_ext_zicbom_validate), __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts, riscv_ext_zicboz_validate), + __RISCV_ISA_EXT_DATA(ziccrse, RISCV_ISA_EXT_ZICCRSE), __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR), __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND), __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR), @@ -323,6 +327,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM), __RISCV_ISA_EXT_DATA(zimop, RISCV_ISA_EXT_ZIMOP), + __RISCV_ISA_EXT_DATA(zabha, RISCV_ISA_EXT_ZABHA), __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS), __RISCV_ISA_EXT_DATA(zawrs, RISCV_ISA_EXT_ZAWRS), __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA), @@ -375,9 +380,12 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts), __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT), __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA), + __RISCV_ISA_EXT_DATA(smmpm, RISCV_ISA_EXT_SMMPM), + __RISCV_ISA_EXT_SUPERSET(smnpm, RISCV_ISA_EXT_SMNPM, riscv_xlinuxenvcfg_exts), __RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN), __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA), __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF), + __RISCV_ISA_EXT_SUPERSET(ssnpm, RISCV_ISA_EXT_SSNPM, riscv_xlinuxenvcfg_exts), __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC), __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL), __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT), @@ -918,10 +926,12 @@ unsigned long riscv_get_elf_hwcap(void) return hwcap; } -void riscv_user_isa_enable(void) +void __init riscv_user_isa_enable(void) { - if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ)) - csr_set(CSR_ENVCFG, ENVCFG_CBZE); + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOZ)) + current->thread.envcfg |= ENVCFG_CBZE; + else if (any_cpu_has_zicboz) + pr_warn("Zicboz disabled as it is unavailable on some harts\n"); } #ifdef CONFIG_RISCV_ALTERNATIVE diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S index 327cf527dd7e..f74f6b60e347 100644 --- a/arch/riscv/kernel/fpu.S +++ b/arch/riscv/kernel/fpu.S @@ -170,7 +170,7 @@ SYM_FUNC_END(__fstate_restore) __access_func(f31) -#ifdef CONFIG_RISCV_MISALIGNED +#ifdef CONFIG_RISCV_SCALAR_MISALIGNED /* * Disable compressed instructions set to keep a constant offset between FP @@ -224,4 +224,4 @@ SYM_FUNC_START(get_f64_reg) fp_access_epilogue SYM_FUNC_END(get_f64_reg) -#endif /* CONFIG_RISCV_MISALIGNED */ +#endif /* CONFIG_RISCV_SCALAR_MISALIGNED */ diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index e3142d8a6e28..58b6482c2bf6 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -7,6 +7,7 @@ * Copyright (C) 2017 SiFive */ +#include <linux/bitfield.h> #include <linux/cpu.h> #include <linux/kernel.h> #include <linux/sched.h> @@ -180,6 +181,10 @@ void flush_thread(void) memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state)); clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE); #endif +#ifdef CONFIG_RISCV_ISA_SUPM + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) + envcfg_update_bits(current, ENVCFG_PMM, ENVCFG_PMM_PMLEN_0); +#endif } void arch_release_task_struct(struct task_struct *tsk) @@ -208,6 +213,10 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) unsigned long tls = args->tls; struct pt_regs *childregs = task_pt_regs(p); + /* Ensure all threads in this mm have the same pointer masking mode. */ + if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM) && p->mm && (clone_flags & CLONE_VM)) + set_bit(MM_CONTEXT_LOCK_PMLEN, &p->mm->context.flags); + memset(&p->thread.s, 0, sizeof(p->thread.s)); /* p->thread holds context to be restored by __switch_to() */ @@ -242,3 +251,148 @@ void __init arch_task_cache_init(void) { riscv_v_setup_ctx_cache(); } + +#ifdef CONFIG_RISCV_ISA_SUPM +enum { + PMLEN_0 = 0, + PMLEN_7 = 7, + PMLEN_16 = 16, +}; + +static bool have_user_pmlen_7; +static bool have_user_pmlen_16; + +/* + * Control the relaxed ABI allowing tagged user addresses into the kernel. + */ +static unsigned int tagged_addr_disabled; + +long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) +{ + unsigned long valid_mask = PR_PMLEN_MASK | PR_TAGGED_ADDR_ENABLE; + struct thread_info *ti = task_thread_info(task); + struct mm_struct *mm = task->mm; + unsigned long pmm; + u8 pmlen; + + if (is_compat_thread(ti)) + return -EINVAL; + + if (arg & ~valid_mask) + return -EINVAL; + + /* + * Prefer the smallest PMLEN that satisfies the user's request, + * in case choosing a larger PMLEN has a performance impact. + */ + pmlen = FIELD_GET(PR_PMLEN_MASK, arg); + if (pmlen == PMLEN_0) { + pmm = ENVCFG_PMM_PMLEN_0; + } else if (pmlen <= PMLEN_7 && have_user_pmlen_7) { + pmlen = PMLEN_7; + pmm = ENVCFG_PMM_PMLEN_7; + } else if (pmlen <= PMLEN_16 && have_user_pmlen_16) { + pmlen = PMLEN_16; + pmm = ENVCFG_PMM_PMLEN_16; + } else { + return -EINVAL; + } + + /* + * Do not allow the enabling of the tagged address ABI if globally + * disabled via sysctl abi.tagged_addr_disabled, if pointer masking + * is disabled for userspace. + */ + if (arg & PR_TAGGED_ADDR_ENABLE && (tagged_addr_disabled || !pmlen)) + return -EINVAL; + + if (!(arg & PR_TAGGED_ADDR_ENABLE)) + pmlen = PMLEN_0; + + if (mmap_write_lock_killable(mm)) + return -EINTR; + + if (test_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags) && mm->context.pmlen != pmlen) { + mmap_write_unlock(mm); + return -EBUSY; + } + + envcfg_update_bits(task, ENVCFG_PMM, pmm); + mm->context.pmlen = pmlen; + + mmap_write_unlock(mm); + + return 0; +} + +long get_tagged_addr_ctrl(struct task_struct *task) +{ + struct thread_info *ti = task_thread_info(task); + long ret = 0; + + if (is_compat_thread(ti)) + return -EINVAL; + + /* + * The mm context's pmlen is set only when the tagged address ABI is + * enabled, so the effective PMLEN must be extracted from envcfg.PMM. + */ + switch (task->thread.envcfg & ENVCFG_PMM) { + case ENVCFG_PMM_PMLEN_7: + ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_7); + break; + case ENVCFG_PMM_PMLEN_16: + ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_16); + break; + } + + if (task->mm->context.pmlen) + ret |= PR_TAGGED_ADDR_ENABLE; + + return ret; +} + +static bool try_to_set_pmm(unsigned long value) +{ + csr_set(CSR_ENVCFG, value); + return (csr_read_clear(CSR_ENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value; +} + +/* + * Global sysctl to disable the tagged user addresses support. This control + * only prevents the tagged address ABI enabling via prctl() and does not + * disable it for tasks that already opted in to the relaxed ABI. + */ + +static struct ctl_table tagged_addr_sysctl_table[] = { + { + .procname = "tagged_addr_disabled", + .mode = 0644, + .data = &tagged_addr_disabled, + .maxlen = sizeof(int), + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +}; + +static int __init tagged_addr_init(void) +{ + if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) + return 0; + + /* + * envcfg.PMM is a WARL field. Detect which values are supported. + * Assume the supported PMLEN values are the same on all harts. + */ + csr_clear(CSR_ENVCFG, ENVCFG_PMM); + have_user_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7); + have_user_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16); + + if (!register_sysctl("abi", tagged_addr_sysctl_table)) + return -EINVAL; + + return 0; +} +core_initcall(tagged_addr_init); +#endif /* CONFIG_RISCV_ISA_SUPM */ diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 92731ff8c79a..ea67e9fb7a58 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -28,6 +28,9 @@ enum riscv_regset { #ifdef CONFIG_RISCV_ISA_V REGSET_V, #endif +#ifdef CONFIG_RISCV_ISA_SUPM + REGSET_TAGGED_ADDR_CTRL, +#endif }; static int riscv_gpr_get(struct task_struct *target, @@ -152,6 +155,35 @@ static int riscv_vr_set(struct task_struct *target, } #endif +#ifdef CONFIG_RISCV_ISA_SUPM +static int tagged_addr_ctrl_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + long ctrl = get_tagged_addr_ctrl(target); + + if (IS_ERR_VALUE(ctrl)) + return ctrl; + + return membuf_write(&to, &ctrl, sizeof(ctrl)); +} + +static int tagged_addr_ctrl_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + long ctrl; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); + if (ret) + return ret; + + return set_tagged_addr_ctrl(target, ctrl); +} +#endif + static const struct user_regset riscv_user_regset[] = { [REGSET_X] = { .core_note_type = NT_PRSTATUS, @@ -182,6 +214,16 @@ static const struct user_regset riscv_user_regset[] = { .set = riscv_vr_set, }, #endif +#ifdef CONFIG_RISCV_ISA_SUPM + [REGSET_TAGGED_ADDR_CTRL] = { + .core_note_type = NT_RISCV_TAGGED_ADDR_CTRL, + .n = 1, + .size = sizeof(long), + .align = sizeof(long), + .regset_get = tagged_addr_ctrl_get, + .set = tagged_addr_ctrl_set, + }, +#endif }; static const struct user_regset_view riscv_user_native_view = { diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 26c886db4fb3..016b48fcd6f2 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -244,6 +244,42 @@ static void __init parse_dtb(void) #endif } +#if defined(CONFIG_RISCV_COMBO_SPINLOCKS) +DEFINE_STATIC_KEY_TRUE(qspinlock_key); +EXPORT_SYMBOL(qspinlock_key); +#endif + +static void __init riscv_spinlock_init(void) +{ + char *using_ext = NULL; + + if (IS_ENABLED(CONFIG_RISCV_TICKET_SPINLOCKS)) { + pr_info("Ticket spinlock: enabled\n"); + return; + } + + if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && + IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && + riscv_isa_extension_available(NULL, ZABHA) && + riscv_isa_extension_available(NULL, ZACAS)) { + using_ext = "using Zabha"; + } else if (riscv_isa_extension_available(NULL, ZICCRSE)) { + using_ext = "using Ziccrse"; + } +#if defined(CONFIG_RISCV_COMBO_SPINLOCKS) + else { + static_branch_disable(&qspinlock_key); + pr_info("Ticket spinlock: enabled\n"); + return; + } +#endif + + if (!using_ext) + pr_err("Queued spinlock without Zabha or Ziccrse"); + else + pr_info("Queued spinlock %s: enabled\n", using_ext); +} + extern void __init init_rt_signal_env(void); void __init setup_arch(char **cmdline_p) @@ -297,6 +333,7 @@ void __init setup_arch(char **cmdline_p) riscv_set_dma_cache_alignment(); riscv_user_isa_enable(); + riscv_spinlock_init(); } bool arch_cpu_is_hotpluggable(int cpu) diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 0f8f1c95ac38..e36d20205bd7 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -233,8 +233,6 @@ asmlinkage __visible void smp_callin(void) numa_add_cpu(curr_cpuid); set_cpu_online(curr_cpuid, true); - riscv_user_isa_enable(); - /* * Remote cache and TLB flushes are ignored while the CPU is offline, * so flush them both right now just in case. diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c index c8cec0cc5833..9a8a0dc035b2 100644 --- a/arch/riscv/kernel/suspend.c +++ b/arch/riscv/kernel/suspend.c @@ -14,7 +14,7 @@ void suspend_save_csrs(struct suspend_context *context) { - if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG)) + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG)) context->envcfg = csr_read(CSR_ENVCFG); context->tvec = csr_read(CSR_TVEC); context->ie = csr_read(CSR_IE); @@ -37,7 +37,7 @@ void suspend_save_csrs(struct suspend_context *context) void suspend_restore_csrs(struct suspend_context *context) { csr_write(CSR_SCRATCH, 0); - if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG)) + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG)) csr_write(CSR_ENVCFG, context->envcfg); csr_write(CSR_TVEC, context->tvec); csr_write(CSR_IE, context->ie); diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c index 711a31f27c3d..cb93adfffc48 100644 --- a/arch/riscv/kernel/sys_hwprobe.c +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -150,6 +150,9 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, EXT_KEY(ZFH); EXT_KEY(ZFHMIN); } + + if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM)) + EXT_KEY(SUPM); #undef EXT_KEY } @@ -201,6 +204,43 @@ static u64 hwprobe_misaligned(const struct cpumask *cpus) } #endif +#ifdef CONFIG_RISCV_VECTOR_MISALIGNED +static u64 hwprobe_vec_misaligned(const struct cpumask *cpus) +{ + int cpu; + u64 perf = -1ULL; + + /* Return if supported or not even if speed wasn't probed */ + for_each_cpu(cpu, cpus) { + int this_perf = per_cpu(vector_misaligned_access, cpu); + + if (perf == -1ULL) + perf = this_perf; + + if (perf != this_perf) { + perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; + break; + } + } + + if (perf == -1ULL) + return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; + + return perf; +} +#else +static u64 hwprobe_vec_misaligned(const struct cpumask *cpus) +{ + if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS)) + return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST; + + if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS)) + return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW; + + return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; +} +#endif + static void hwprobe_one_pair(struct riscv_hwprobe *pair, const struct cpumask *cpus) { @@ -229,6 +269,10 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair, pair->value = hwprobe_misaligned(cpus); break; + case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF: + pair->value = hwprobe_vec_misaligned(cpus); + break; + case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE: pair->value = 0; if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ)) diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index 1b9867136b61..7cc108aed74e 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -16,6 +16,7 @@ #include <asm/entry-common.h> #include <asm/hwprobe.h> #include <asm/cpufeature.h> +#include <asm/vector.h> #define INSN_MATCH_LB 0x3 #define INSN_MASK_LB 0x707f @@ -320,12 +321,37 @@ union reg_data { u64 data_u64; }; -static bool unaligned_ctl __read_mostly; - /* sysctl hooks */ int unaligned_enabled __read_mostly = 1; /* Enabled by default */ -int handle_misaligned_load(struct pt_regs *regs) +#ifdef CONFIG_RISCV_VECTOR_MISALIGNED +static int handle_vector_misaligned_load(struct pt_regs *regs) +{ + unsigned long epc = regs->epc; + unsigned long insn; + + if (get_insn(regs, epc, &insn)) + return -1; + + /* Only return 0 when in check_vector_unaligned_access_emulated */ + if (*this_cpu_ptr(&vector_misaligned_access) == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) { + *this_cpu_ptr(&vector_misaligned_access) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED; + regs->epc = epc + INSN_LEN(insn); + return 0; + } + + /* If vector instruction we don't emulate it yet */ + regs->epc = epc; + return -1; +} +#else +static int handle_vector_misaligned_load(struct pt_regs *regs) +{ + return -1; +} +#endif + +static int handle_scalar_misaligned_load(struct pt_regs *regs) { union reg_data val; unsigned long epc = regs->epc; @@ -433,7 +459,7 @@ int handle_misaligned_load(struct pt_regs *regs) return 0; } -int handle_misaligned_store(struct pt_regs *regs) +static int handle_scalar_misaligned_store(struct pt_regs *regs) { union reg_data val; unsigned long epc = regs->epc; @@ -524,11 +550,96 @@ int handle_misaligned_store(struct pt_regs *regs) return 0; } -static bool check_unaligned_access_emulated(int cpu) +int handle_misaligned_load(struct pt_regs *regs) +{ + unsigned long epc = regs->epc; + unsigned long insn; + + if (IS_ENABLED(CONFIG_RISCV_VECTOR_MISALIGNED)) { + if (get_insn(regs, epc, &insn)) + return -1; + + if (insn_is_vector(insn)) + return handle_vector_misaligned_load(regs); + } + + if (IS_ENABLED(CONFIG_RISCV_SCALAR_MISALIGNED)) + return handle_scalar_misaligned_load(regs); + + return -1; +} + +int handle_misaligned_store(struct pt_regs *regs) { + if (IS_ENABLED(CONFIG_RISCV_SCALAR_MISALIGNED)) + return handle_scalar_misaligned_store(regs); + + return -1; +} + +#ifdef CONFIG_RISCV_VECTOR_MISALIGNED +void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused) +{ + long *mas_ptr = this_cpu_ptr(&vector_misaligned_access); + unsigned long tmp_var; + + *mas_ptr = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; + + kernel_vector_begin(); + /* + * In pre-13.0.0 versions of GCC, vector registers cannot appear in + * the clobber list. This inline asm clobbers v0, but since we do not + * currently build the kernel with V enabled, the v0 clobber arg is not + * needed (as the compiler will not emit vector code itself). If the kernel + * is changed to build with V enabled, the clobber arg will need to be + * added here. + */ + __asm__ __volatile__ ( + ".balign 4\n\t" + ".option push\n\t" + ".option arch, +zve32x\n\t" + " vsetivli zero, 1, e16, m1, ta, ma\n\t" // Vectors of 16b + " vle16.v v0, (%[ptr])\n\t" // Load bytes + ".option pop\n\t" + : : [ptr] "r" ((u8 *)&tmp_var + 1)); + kernel_vector_end(); +} + +bool check_vector_unaligned_access_emulated_all_cpus(void) +{ + int cpu; + + if (!has_vector()) { + for_each_online_cpu(cpu) + per_cpu(vector_misaligned_access, cpu) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED; + return false; + } + + schedule_on_each_cpu(check_vector_unaligned_access_emulated); + + for_each_online_cpu(cpu) + if (per_cpu(vector_misaligned_access, cpu) + == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) + return false; + + return true; +} +#else +bool check_vector_unaligned_access_emulated_all_cpus(void) +{ + return false; +} +#endif + +#ifdef CONFIG_RISCV_SCALAR_MISALIGNED + +static bool unaligned_ctl __read_mostly; + +void check_unaligned_access_emulated(struct work_struct *work __always_unused) +{ + int cpu = smp_processor_id(); long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu); unsigned long tmp_var, tmp_val; - bool misaligned_emu_detected; *mas_ptr = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN; @@ -536,19 +647,16 @@ static bool check_unaligned_access_emulated(int cpu) " "REG_L" %[tmp], 1(%[ptr])\n" : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory"); - misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED); /* * If unaligned_ctl is already set, this means that we detected that all * CPUS uses emulated misaligned access at boot time. If that changed * when hotplugging the new cpu, this is something we don't handle. */ - if (unlikely(unaligned_ctl && !misaligned_emu_detected)) { + if (unlikely(unaligned_ctl && (*mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED))) { pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n"); while (true) cpu_relax(); } - - return misaligned_emu_detected; } bool check_unaligned_access_emulated_all_cpus(void) @@ -560,8 +668,11 @@ bool check_unaligned_access_emulated_all_cpus(void) * accesses emulated since tasks requesting such control can run on any * CPU. */ + schedule_on_each_cpu(check_unaligned_access_emulated); + for_each_online_cpu(cpu) - if (!check_unaligned_access_emulated(cpu)) + if (per_cpu(misaligned_access_speed, cpu) + != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED) return false; unaligned_ctl = true; @@ -572,3 +683,9 @@ bool unaligned_ctl_available(void) { return unaligned_ctl; } +#else +bool check_unaligned_access_emulated_all_cpus(void) +{ + return false; +} +#endif diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c index 160628a2116d..91f189cf1611 100644 --- a/arch/riscv/kernel/unaligned_access_speed.c +++ b/arch/riscv/kernel/unaligned_access_speed.c @@ -6,11 +6,13 @@ #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/jump_label.h> +#include <linux/kthread.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/types.h> #include <asm/cpufeature.h> #include <asm/hwprobe.h> +#include <asm/vector.h> #include "copy-unaligned.h" @@ -19,7 +21,8 @@ #define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE) #define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80) -DEFINE_PER_CPU(long, misaligned_access_speed); +DEFINE_PER_CPU(long, misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN; +DEFINE_PER_CPU(long, vector_misaligned_access) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED; #ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS static cpumask_t fast_misaligned_access; @@ -191,6 +194,7 @@ static int riscv_online_cpu(unsigned int cpu) if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) goto exit; + check_unaligned_access_emulated(NULL); buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); if (!buf) { pr_warn("Allocation failure, not measuring misaligned performance\n"); @@ -259,23 +263,159 @@ out: kfree(bufs); return 0; } +#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */ +static int check_unaligned_access_speed_all_cpus(void) +{ + return 0; +} +#endif -static int check_unaligned_access_all_cpus(void) +#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS +static void check_vector_unaligned_access(struct work_struct *work __always_unused) { - bool all_cpus_emulated = check_unaligned_access_emulated_all_cpus(); + int cpu = smp_processor_id(); + u64 start_cycles, end_cycles; + u64 word_cycles; + u64 byte_cycles; + int ratio; + unsigned long start_jiffies, now; + struct page *page; + void *dst; + void *src; + long speed = RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW; - if (!all_cpus_emulated) - return check_unaligned_access_speed_all_cpus(); + if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) + return; + + page = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); + if (!page) { + pr_warn("Allocation failure, not measuring vector misaligned performance\n"); + return; + } + + /* Make an unaligned destination buffer. */ + dst = (void *)((unsigned long)page_address(page) | 0x1); + /* Unalign src as well, but differently (off by 1 + 2 = 3). */ + src = dst + (MISALIGNED_BUFFER_SIZE / 2); + src += 2; + word_cycles = -1ULL; + + /* Do a warmup. */ + kernel_vector_begin(); + __riscv_copy_vec_words_unaligned(dst, src, MISALIGNED_COPY_SIZE); + start_jiffies = jiffies; + while ((now = jiffies) == start_jiffies) + cpu_relax(); + + /* + * For a fixed amount of time, repeatedly try the function, and take + * the best time in cycles as the measurement. + */ + while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) { + start_cycles = get_cycles64(); + /* Ensure the CSR read can't reorder WRT to the copy. */ + mb(); + __riscv_copy_vec_words_unaligned(dst, src, MISALIGNED_COPY_SIZE); + /* Ensure the copy ends before the end time is snapped. */ + mb(); + end_cycles = get_cycles64(); + if ((end_cycles - start_cycles) < word_cycles) + word_cycles = end_cycles - start_cycles; + } + + byte_cycles = -1ULL; + __riscv_copy_vec_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE); + start_jiffies = jiffies; + while ((now = jiffies) == start_jiffies) + cpu_relax(); + + while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) { + start_cycles = get_cycles64(); + /* Ensure the CSR read can't reorder WRT to the copy. */ + mb(); + __riscv_copy_vec_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE); + /* Ensure the copy ends before the end time is snapped. */ + mb(); + end_cycles = get_cycles64(); + if ((end_cycles - start_cycles) < byte_cycles) + byte_cycles = end_cycles - start_cycles; + } + + kernel_vector_end(); + + /* Don't divide by zero. */ + if (!word_cycles || !byte_cycles) { + pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned vector access speed\n", + cpu); + + return; + } + + if (word_cycles < byte_cycles) + speed = RISCV_HWPROBE_MISALIGNED_VECTOR_FAST; + + ratio = div_u64((byte_cycles * 100), word_cycles); + pr_info("cpu%d: Ratio of vector byte access time to vector unaligned word access is %d.%02d, unaligned accesses are %s\n", + cpu, + ratio / 100, + ratio % 100, + (speed == RISCV_HWPROBE_MISALIGNED_VECTOR_FAST) ? "fast" : "slow"); + + per_cpu(vector_misaligned_access, cpu) = speed; +} + +static int riscv_online_cpu_vec(unsigned int cpu) +{ + if (!has_vector()) + return 0; + + if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED) + return 0; + + check_vector_unaligned_access_emulated(NULL); + check_vector_unaligned_access(NULL); return 0; } -#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */ -static int check_unaligned_access_all_cpus(void) + +/* Measure unaligned access speed on all CPUs present at boot in parallel. */ +static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused) { - check_unaligned_access_emulated_all_cpus(); + schedule_on_each_cpu(check_vector_unaligned_access); + + /* + * Setup hotplug callbacks for any new CPUs that come online or go + * offline. + */ + cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online", + riscv_online_cpu_vec, NULL); return 0; } +#else /* CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS */ +static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused) +{ + return 0; +} #endif +static int check_unaligned_access_all_cpus(void) +{ + bool all_cpus_emulated, all_cpus_vec_unsupported; + + all_cpus_emulated = check_unaligned_access_emulated_all_cpus(); + all_cpus_vec_unsupported = check_vector_unaligned_access_emulated_all_cpus(); + + if (!all_cpus_vec_unsupported && + IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) { + kthread_run(vec_check_unaligned_access_speed_all_cpus, + NULL, "vec_check_unaligned_access_speed_all_cpus"); + } + + if (!all_cpus_emulated) + return check_unaligned_access_speed_all_cpus(); + + return 0; +} + arch_initcall(check_unaligned_access_all_cpus); diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 3f1c4b2d0b06..9a1b555e8733 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -45,7 +45,7 @@ $(obj)/vdso.o: $(obj)/vdso.so # link rule for the .so file, .lds has to be first $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE - $(call if_changed,vdsold) + $(call if_changed,vdsold_and_check) LDFLAGS_vdso.so.dbg = -shared -soname=linux-vdso.so.1 \ --build-id=sha1 --hash-style=both --eh-frame-hdr @@ -65,7 +65,8 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE # actual build commands # The DSO images are built using a special linker script # Make sure only to export the intended __vdso_xxx symbol offsets. -quiet_cmd_vdsold = VDSOLD $@ - cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \ +quiet_cmd_vdsold_and_check = VDSOLD $@ + cmd_vdsold_and_check = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \ $(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ - rm $@.tmp + rm $@.tmp && \ + $(cmd_vdso_check) diff --git a/arch/riscv/kernel/vec-copy-unaligned.S b/arch/riscv/kernel/vec-copy-unaligned.S new file mode 100644 index 000000000000..d16f19f1b3b6 --- /dev/null +++ b/arch/riscv/kernel/vec-copy-unaligned.S @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2024 Rivos Inc. */ + +#include <linux/args.h> +#include <linux/linkage.h> +#include <asm/asm.h> + + .text + +#define WORD_EEW 32 + +#define WORD_SEW CONCATENATE(e, WORD_EEW) +#define VEC_L CONCATENATE(vle, WORD_EEW).v +#define VEC_S CONCATENATE(vle, WORD_EEW).v + +/* void __riscv_copy_vec_words_unaligned(void *, const void *, size_t) */ +/* Performs a memcpy without aligning buffers, using word loads and stores. */ +/* Note: The size is truncated to a multiple of WORD_EEW */ +SYM_FUNC_START(__riscv_copy_vec_words_unaligned) + andi a4, a2, ~(WORD_EEW-1) + beqz a4, 2f + add a3, a1, a4 + .option push + .option arch, +zve32x +1: + vsetivli t0, 8, WORD_SEW, m8, ta, ma + VEC_L v0, (a1) + VEC_S v0, (a0) + addi a0, a0, WORD_EEW + addi a1, a1, WORD_EEW + bltu a1, a3, 1b + +2: + .option pop + ret +SYM_FUNC_END(__riscv_copy_vec_words_unaligned) + +/* void __riscv_copy_vec_bytes_unaligned(void *, const void *, size_t) */ +/* Performs a memcpy without aligning buffers, using only byte accesses. */ +/* Note: The size is truncated to a multiple of 8 */ +SYM_FUNC_START(__riscv_copy_vec_bytes_unaligned) + andi a4, a2, ~(8-1) + beqz a4, 2f + add a3, a1, a4 + .option push + .option arch, +zve32x +1: + vsetivli t0, 8, e8, m8, ta, ma + vle8.v v0, (a1) + vse8.v v0, (a0) + addi a0, a0, 8 + addi a1, a1, 8 + bltu a1, a3, 1b + +2: + .option pop + ret +SYM_FUNC_END(__riscv_copy_vec_bytes_unaligned) diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c index 682b3feee451..821818886fab 100644 --- a/arch/riscv/kernel/vector.c +++ b/arch/riscv/kernel/vector.c @@ -66,7 +66,7 @@ void __init riscv_v_setup_ctx_cache(void) #endif } -static bool insn_is_vector(u32 insn_buf) +bool insn_is_vector(u32 insn_buf) { u32 opcode = insn_buf & __INSN_OPCODE_MASK; u32 width, csr; diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index b319c4c13c54..5b68490ad9b7 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -34,9 +34,11 @@ static const unsigned long kvm_isa_ext_arr[] = { [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m, [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v, /* Multi letter extensions (alphabetically sorted) */ + [KVM_RISCV_ISA_EXT_SMNPM] = RISCV_ISA_EXT_SSNPM, KVM_ISA_EXT_ARR(SMSTATEEN), KVM_ISA_EXT_ARR(SSAIA), KVM_ISA_EXT_ARR(SSCOFPMF), + KVM_ISA_EXT_ARR(SSNPM), KVM_ISA_EXT_ARR(SSTC), KVM_ISA_EXT_ARR(SVINVAL), KVM_ISA_EXT_ARR(SVNAPOT), @@ -127,8 +129,10 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_C: case KVM_RISCV_ISA_EXT_I: case KVM_RISCV_ISA_EXT_M: + case KVM_RISCV_ISA_EXT_SMNPM: /* There is not architectural config bit to disable sscofpmf completely */ case KVM_RISCV_ISA_EXT_SSCOFPMF: + case KVM_RISCV_ISA_EXT_SSNPM: case KVM_RISCV_ISA_EXT_SSTC: case KVM_RISCV_ISA_EXT_SVINVAL: case KVM_RISCV_ISA_EXT_SVNAPOT: diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c64b2987d108..0077969170e8 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -87,6 +87,7 @@ config S390 select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_MEM_ENCRYPT select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS + select ARCH_HAS_PREEMPT_LAZY select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SCALED_CPUTIME select ARCH_HAS_SET_DIRECT_MAP @@ -218,6 +219,7 @@ config S390 select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_DYNAMIC_KEY select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE select HAVE_RETHOOK diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h index ccd4e148b5ed..a7f7bdc9e19c 100644 --- a/arch/s390/include/asm/debug.h +++ b/arch/s390/include/asm/debug.h @@ -66,14 +66,15 @@ typedef int (debug_header_proc_t) (debug_info_t *id, struct debug_view *view, int area, debug_entry_t *entry, - char *out_buf); + char *out_buf, size_t out_buf_size); typedef int (debug_format_proc_t) (debug_info_t *id, struct debug_view *view, char *out_buf, + size_t out_buf_size, const char *in_buf); typedef int (debug_prolog_proc_t) (debug_info_t *id, struct debug_view *view, - char *out_buf); + char *out_buf, size_t out_buf_size); typedef int (debug_input_proc_t) (debug_info_t *id, struct debug_view *view, struct file *file, @@ -81,7 +82,8 @@ typedef int (debug_input_proc_t) (debug_info_t *id, size_t in_buf_size, loff_t *offset); int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view, - int area, debug_entry_t *entry, char *out_buf); + int area, debug_entry_t *entry, + char *out_buf, size_t out_buf_size); struct debug_view { char name[DEBUG_MAX_NAME_LEN]; diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index 64761c78f774..13f51a6a5bb1 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -17,8 +17,8 @@ #define GMAP_NOTIFY_MPROT 0x1 /* Status bits only for huge segment entries */ -#define _SEGMENT_ENTRY_GMAP_IN 0x8000 /* invalidation notify bit */ -#define _SEGMENT_ENTRY_GMAP_UC 0x4000 /* dirty (migration) */ +#define _SEGMENT_ENTRY_GMAP_IN 0x0800 /* invalidation notify bit */ +#define _SEGMENT_ENTRY_GMAP_UC 0x0002 /* dirty (migration) */ /** * struct gmap_struct - guest address space diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index 6f815d4ba0ca..a40664b236e9 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h @@ -10,6 +10,8 @@ #define _ASM_S390_HUGETLB_H #include <linux/pgtable.h> +#include <linux/swap.h> +#include <linux/swapops.h> #include <asm/page.h> #define hugepages_supported() (MACHINE_HAS_EDAT1) @@ -78,7 +80,7 @@ static inline int huge_pte_none(pte_t pte) #define __HAVE_ARCH_HUGE_PTE_NONE_MOSTLY static inline int huge_pte_none_mostly(pte_t pte) { - return huge_pte_none(pte); + return huge_pte_none(pte) || is_pte_marker(pte); } #define __HAVE_ARCH_HUGE_PTE_MKUFFD_WP diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 1cd8eaebd3c0..97c7c8127543 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -94,11 +94,16 @@ union ipte_control { }; }; +/* + * Utility is defined as two bytes but having it four bytes wide + * generates more efficient code. Since the following bytes are + * reserved this makes no functional difference. + */ union sca_utility { - __u16 val; + __u32 val; struct { - __u16 mtcr : 1; - __u16 reserved : 15; + __u32 mtcr : 1; + __u32 : 31; }; }; @@ -107,7 +112,7 @@ struct bsca_block { __u64 reserved[5]; __u64 mcn; union sca_utility utility; - __u8 reserved2[6]; + __u8 reserved2[4]; struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS]; }; @@ -115,7 +120,7 @@ struct esca_block { union ipte_control ipte_control; __u64 reserved1[6]; union sca_utility utility; - __u8 reserved2[6]; + __u8 reserved2[4]; __u64 mcn[4]; __u64 reserved3[20]; struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS]; diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 8b67036edb69..48268095b0a3 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -277,7 +277,8 @@ static inline int is_module_addr(void *addr) #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID) #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID) -#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) +#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH | \ + _REGION3_ENTRY_PRESENT) #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) #define _REGION3_ENTRY_HARDWARE_BITS 0xfffffffffffff6ffUL @@ -285,18 +286,27 @@ static inline int is_module_addr(void *addr) #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */ #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */ #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */ +#define _REGION3_ENTRY_COMM 0x0010 /* Common-Region, marks swap entry */ #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */ -#define _REGION3_ENTRY_WRITE 0x0002 /* SW region write bit */ -#define _REGION3_ENTRY_READ 0x0001 /* SW region read bit */ +#define _REGION3_ENTRY_WRITE 0x8000 /* SW region write bit */ +#define _REGION3_ENTRY_READ 0x4000 /* SW region read bit */ #ifdef CONFIG_MEM_SOFT_DIRTY -#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */ +#define _REGION3_ENTRY_SOFT_DIRTY 0x0002 /* SW region soft dirty bit */ #else #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */ #endif #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL +/* + * SW region present bit. For non-leaf region-third-table entries, bits 62-63 + * indicate the TABLE LENGTH and both must be set to 1. But such entries + * would always be considered as present, so it is safe to use bit 63 as + * PRESENT bit for PUD. + */ +#define _REGION3_ENTRY_PRESENT 0x0001 + /* Bits in the segment table entry */ #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe3fUL #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe3cUL @@ -308,21 +318,29 @@ static inline int is_module_addr(void *addr) #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ #define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */ -#define _SEGMENT_ENTRY (0) +#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PRESENT) #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */ #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ + +#define _SEGMENT_ENTRY_COMM 0x0010 /* Common-Segment, marks swap entry */ #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ -#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */ -#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */ +#define _SEGMENT_ENTRY_WRITE 0x8000 /* SW segment write bit */ +#define _SEGMENT_ENTRY_READ 0x4000 /* SW segment read bit */ #ifdef CONFIG_MEM_SOFT_DIRTY -#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */ +#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0002 /* SW segment soft dirty bit */ #else #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */ #endif +#define _SEGMENT_ENTRY_PRESENT 0x0001 /* SW segment present bit */ + +/* Common bits in region and segment table entries, for swap entries */ +#define _RST_ENTRY_COMM 0x0010 /* Common-Region/Segment, marks swap entry */ +#define _RST_ENTRY_INVALID 0x0020 /* invalid region/segment table entry */ + #define _CRST_ENTRIES 2048 /* number of region/segment table entries */ #define _PAGE_ENTRIES 256 /* number of page table entries */ @@ -454,17 +472,22 @@ static inline int is_module_addr(void *addr) /* * Segment entry (large page) protection definitions. */ -#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ +#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_PRESENT | \ + _SEGMENT_ENTRY_INVALID | \ _SEGMENT_ENTRY_PROTECT) -#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \ +#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PRESENT | \ + _SEGMENT_ENTRY_PROTECT | \ _SEGMENT_ENTRY_READ | \ _SEGMENT_ENTRY_NOEXEC) -#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \ +#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PRESENT | \ + _SEGMENT_ENTRY_PROTECT | \ _SEGMENT_ENTRY_READ) -#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \ +#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_PRESENT | \ + _SEGMENT_ENTRY_READ | \ _SEGMENT_ENTRY_WRITE | \ _SEGMENT_ENTRY_NOEXEC) -#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \ +#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_PRESENT | \ + _SEGMENT_ENTRY_READ | \ _SEGMENT_ENTRY_WRITE) #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \ _SEGMENT_ENTRY_LARGE | \ @@ -491,6 +514,7 @@ static inline int is_module_addr(void *addr) */ #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \ + _REGION3_ENTRY_PRESENT | \ _REGION3_ENTRY_LARGE | \ _REGION3_ENTRY_READ | \ _REGION3_ENTRY_WRITE | \ @@ -498,12 +522,14 @@ static inline int is_module_addr(void *addr) _REGION3_ENTRY_DIRTY | \ _REGION_ENTRY_NOEXEC) #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \ + _REGION3_ENTRY_PRESENT | \ _REGION3_ENTRY_LARGE | \ _REGION3_ENTRY_READ | \ _REGION3_ENTRY_YOUNG | \ _REGION_ENTRY_PROTECT | \ _REGION_ENTRY_NOEXEC) #define REGION3_KERNEL_EXEC __pgprot(_REGION_ENTRY_TYPE_R3 | \ + _REGION3_ENTRY_PRESENT | \ _REGION3_ENTRY_LARGE | \ _REGION3_ENTRY_READ | \ _REGION3_ENTRY_WRITE | \ @@ -746,7 +772,7 @@ static inline int pud_present(pud_t pud) { if (pud_folded(pud)) return 1; - return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; + return (pud_val(pud) & _REGION3_ENTRY_PRESENT) != 0; } static inline int pud_none(pud_t pud) @@ -761,13 +787,18 @@ static inline bool pud_leaf(pud_t pud) { if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3) return 0; - return !!(pud_val(pud) & _REGION3_ENTRY_LARGE); + return (pud_present(pud) && (pud_val(pud) & _REGION3_ENTRY_LARGE) != 0); +} + +static inline int pmd_present(pmd_t pmd) +{ + return (pmd_val(pmd) & _SEGMENT_ENTRY_PRESENT) != 0; } #define pmd_leaf pmd_leaf static inline bool pmd_leaf(pmd_t pmd) { - return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; + return (pmd_present(pmd) && (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0); } static inline int pmd_bad(pmd_t pmd) @@ -799,11 +830,6 @@ static inline int p4d_bad(p4d_t p4d) return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0; } -static inline int pmd_present(pmd_t pmd) -{ - return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY; -} - static inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY; @@ -1851,7 +1877,7 @@ static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, static inline int pmd_trans_huge(pmd_t pmd) { - return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; + return pmd_leaf(pmd); } #define has_transparent_hugepage has_transparent_hugepage @@ -1911,6 +1937,53 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +/* + * 64 bit swap entry format for REGION3 and SEGMENT table entries (RSTE) + * Bits 59 and 63 are used to indicate the swap entry. Bit 58 marks the rste + * as invalid. + * A swap entry is indicated by bit pattern (rste & 0x011) == 0x010 + * | offset |Xtype |11TT|S0| + * |0000000000111111111122222222223333333333444444444455|555555|5566|66| + * |0123456789012345678901234567890123456789012345678901|234567|8901|23| + * + * Bits 0-51 store the offset. + * Bits 53-57 store the type. + * Bit 62 (S) is used for softdirty tracking. + * Bits 60-61 (TT) indicate the table type: 0x01 for REGION3 and 0x00 for SEGMENT. + * Bit 52 (X) is unused. + */ + +#define __SWP_OFFSET_MASK_RSTE ((1UL << 52) - 1) +#define __SWP_OFFSET_SHIFT_RSTE 12 +#define __SWP_TYPE_MASK_RSTE ((1UL << 5) - 1) +#define __SWP_TYPE_SHIFT_RSTE 6 + +/* + * TT bits set to 0x00 == SEGMENT. For REGION3 entries, caller must add R3 + * bits 0x01. See also __set_huge_pte_at(). + */ +static inline unsigned long mk_swap_rste(unsigned long type, unsigned long offset) +{ + unsigned long rste; + + rste = _RST_ENTRY_INVALID | _RST_ENTRY_COMM; + rste |= (offset & __SWP_OFFSET_MASK_RSTE) << __SWP_OFFSET_SHIFT_RSTE; + rste |= (type & __SWP_TYPE_MASK_RSTE) << __SWP_TYPE_SHIFT_RSTE; + return rste; +} + +static inline unsigned long __swp_type_rste(swp_entry_t entry) +{ + return (entry.val >> __SWP_TYPE_SHIFT_RSTE) & __SWP_TYPE_MASK_RSTE; +} + +static inline unsigned long __swp_offset_rste(swp_entry_t entry) +{ + return (entry.val >> __SWP_OFFSET_SHIFT_RSTE) & __SWP_OFFSET_MASK_RSTE; +} + +#define __rste_to_swp_entry(rste) ((swp_entry_t) { rste }) + extern int vmem_add_mapping(unsigned long start, unsigned long size); extern void vmem_remove_mapping(unsigned long start, unsigned long size); extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc); diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h index 0cde7e240373..2c29bdf12127 100644 --- a/arch/s390/include/asm/preempt.h +++ b/arch/s390/include/asm/preempt.h @@ -130,10 +130,24 @@ static __always_inline bool should_resched(int preempt_offset) #define init_idle_preempt_count(p, cpu) do { } while (0) #ifdef CONFIG_PREEMPTION -extern void preempt_schedule(void); -#define __preempt_schedule() preempt_schedule() -extern void preempt_schedule_notrace(void); -#define __preempt_schedule_notrace() preempt_schedule_notrace() + +void preempt_schedule(void); +void preempt_schedule_notrace(void); + +#ifdef CONFIG_PREEMPT_DYNAMIC + +void dynamic_preempt_schedule(void); +void dynamic_preempt_schedule_notrace(void); +#define __preempt_schedule() dynamic_preempt_schedule() +#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace() + +#else /* CONFIG_PREEMPT_DYNAMIC */ + +#define __preempt_schedule() preempt_schedule() +#define __preempt_schedule_notrace() preempt_schedule_notrace() + +#endif /* CONFIG_PREEMPT_DYNAMIC */ + #endif /* CONFIG_PREEMPTION */ #endif /* __ASM_PREEMPT_H */ diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index ac868a9bb0d1..f87dd0a84855 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -82,9 +82,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) kcsan_release(); asm_inline volatile( ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", ALT_FACILITY(49)) /* NIAI 7 */ - " sth %1,%0\n" - : "=R" (((unsigned short *) &lp->lock)[1]) - : "d" (0) : "cc", "memory"); + " mvhhi %[lock],0\n" + : [lock] "=Q" (((unsigned short *)&lp->lock)[1]) + : + : "memory"); } /* diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 00ac01874a12..c33f7144d1b9 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -61,44 +61,45 @@ void arch_setup_new_exec(void); /* * thread information flags bit numbers */ -/* _TIF_WORK bits */ #define TIF_NOTIFY_RESUME 0 /* callback before returning to user */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_UPROBE 3 /* breakpointed or single-stepping */ -#define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */ +#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling needed */ +#define TIF_UPROBE 4 /* breakpointed or single-stepping */ #define TIF_PATCH_PENDING 5 /* pending live patching update */ #define TIF_PGSTE 6 /* New mm's will use 4K page tables */ #define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ +#define TIF_GUARDED_STORAGE 8 /* load guarded storage control block */ #define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */ #define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */ - #define TIF_31BIT 16 /* 32bit process */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */ #define TIF_SINGLE_STEP 19 /* This task is single stepped */ #define TIF_BLOCK_STEP 20 /* This task is block stepped */ #define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */ - -/* _TIF_TRACE bits */ #define TIF_SYSCALL_TRACE 24 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 25 /* syscall auditing active */ #define TIF_SECCOMP 26 /* secure computing */ #define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */ #define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME) -#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL) #define _TIF_SIGPENDING BIT(TIF_SIGPENDING) #define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED) +#define _TIF_NEED_RESCHED_LAZY BIT(TIF_NEED_RESCHED_LAZY) #define _TIF_UPROBE BIT(TIF_UPROBE) -#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE) #define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING) +#define _TIF_PGSTE BIT(TIF_PGSTE) +#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL) +#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE) #define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST) #define _TIF_PER_TRAP BIT(TIF_PER_TRAP) - #define _TIF_31BIT BIT(TIF_31BIT) +#define _TIF_MEMDIE BIT(TIF_MEMDIE) +#define _TIF_RESTORE_SIGMASK BIT(TIF_RESTORE_SIGMASK) #define _TIF_SINGLE_STEP BIT(TIF_SINGLE_STEP) - +#define _TIF_BLOCK_STEP BIT(TIF_BLOCK_STEP) +#define _TIF_UPROBE_SINGLESTEP BIT(TIF_UPROBE_SINGLESTEP) #define _TIF_SYSCALL_TRACE BIT(TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT BIT(TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP BIT(TIF_SECCOMP) diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index a6e2cd89b609..9dfd46dd03c6 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -46,11 +46,6 @@ static inline void __tlb_flush_mm(struct mm_struct *mm) { unsigned long gmap_asce; - /* - * If the machine has IDTE we prefer to do a per mm flush - * on all cpus instead of doing a local flush if the mm - * only ran on the local cpu. - */ preempt_disable(); atomic_inc(&mm->context.flush_count); /* Reset TLB flush mask */ diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index b3f2103694e4..de19fd8a6a95 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -77,12 +77,14 @@ static debug_info_t *debug_info_create(const char *name, int pages_per_area, static void debug_info_get(debug_info_t *); static void debug_info_put(debug_info_t *); static int debug_prolog_level_fn(debug_info_t *id, - struct debug_view *view, char *out_buf); + struct debug_view *view, char *out_buf, + size_t out_buf_size); static int debug_input_level_fn(debug_info_t *id, struct debug_view *view, struct file *file, const char __user *user_buf, size_t user_buf_size, loff_t *offset); static int debug_prolog_pages_fn(debug_info_t *id, - struct debug_view *view, char *out_buf); + struct debug_view *view, char *out_buf, + size_t out_buf_size); static int debug_input_pages_fn(debug_info_t *id, struct debug_view *view, struct file *file, const char __user *user_buf, size_t user_buf_size, loff_t *offset); @@ -90,9 +92,11 @@ static int debug_input_flush_fn(debug_info_t *id, struct debug_view *view, struct file *file, const char __user *user_buf, size_t user_buf_size, loff_t *offset); static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view, - char *out_buf, const char *in_buf); + char *out_buf, size_t out_buf_size, + const char *in_buf); static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view, - char *out_buf, const char *inbuf); + char *out_buf, size_t out_buf_size, + const char *inbuf); static void debug_areas_swap(debug_info_t *a, debug_info_t *b); static void debug_events_append(debug_info_t *dest, debug_info_t *src); @@ -391,8 +395,10 @@ static int debug_format_entry(file_private_info_t *p_info) if (p_info->act_entry == DEBUG_PROLOG_ENTRY) { /* print prolog */ - if (view->prolog_proc) - len += view->prolog_proc(id_snap, view, p_info->temp_buf); + if (view->prolog_proc) { + len += view->prolog_proc(id_snap, view, p_info->temp_buf, + sizeof(p_info->temp_buf)); + } goto out; } if (!id_snap->areas) /* this is true, if we have a prolog only view */ @@ -402,12 +408,16 @@ static int debug_format_entry(file_private_info_t *p_info) if (act_entry->clock == 0LL) goto out; /* empty entry */ - if (view->header_proc) + if (view->header_proc) { len += view->header_proc(id_snap, view, p_info->act_area, - act_entry, p_info->temp_buf + len); - if (view->format_proc) + act_entry, p_info->temp_buf + len, + sizeof(p_info->temp_buf) - len); + } + if (view->format_proc) { len += view->format_proc(id_snap, view, p_info->temp_buf + len, + sizeof(p_info->temp_buf) - len, DEBUG_DATA(act_entry)); + } out: return len; } @@ -1292,9 +1302,9 @@ static inline int debug_get_uint(char *buf) */ static int debug_prolog_pages_fn(debug_info_t *id, struct debug_view *view, - char *out_buf) + char *out_buf, size_t out_buf_size) { - return sprintf(out_buf, "%i\n", id->pages_per_area); + return scnprintf(out_buf, out_buf_size, "%i\n", id->pages_per_area); } /* @@ -1341,14 +1351,14 @@ out: * prints out actual debug level */ static int debug_prolog_level_fn(debug_info_t *id, struct debug_view *view, - char *out_buf) + char *out_buf, size_t out_buf_size) { int rc = 0; if (id->level == DEBUG_OFF_LEVEL) - rc = sprintf(out_buf, "-\n"); + rc = scnprintf(out_buf, out_buf_size, "-\n"); else - rc = sprintf(out_buf, "%i\n", id->level); + rc = scnprintf(out_buf, out_buf_size, "%i\n", id->level); return rc; } @@ -1465,22 +1475,24 @@ out: * prints debug data in hex/ascii format */ static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view, - char *out_buf, const char *in_buf) + char *out_buf, size_t out_buf_size, const char *in_buf) { int i, rc = 0; - for (i = 0; i < id->buf_size; i++) - rc += sprintf(out_buf + rc, "%02x ", ((unsigned char *) in_buf)[i]); - rc += sprintf(out_buf + rc, "| "); + for (i = 0; i < id->buf_size; i++) { + rc += scnprintf(out_buf + rc, out_buf_size - rc, + "%02x ", ((unsigned char *)in_buf)[i]); + } + rc += scnprintf(out_buf + rc, out_buf_size - rc, "| "); for (i = 0; i < id->buf_size; i++) { unsigned char c = in_buf[i]; if (isascii(c) && isprint(c)) - rc += sprintf(out_buf + rc, "%c", c); + rc += scnprintf(out_buf + rc, out_buf_size - rc, "%c", c); else - rc += sprintf(out_buf + rc, "."); + rc += scnprintf(out_buf + rc, out_buf_size - rc, "."); } - rc += sprintf(out_buf + rc, "\n"); + rc += scnprintf(out_buf + rc, out_buf_size - rc, "\n"); return rc; } @@ -1488,7 +1500,8 @@ static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view, * prints header for debug entry */ int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view, - int area, debug_entry_t *entry, char *out_buf) + int area, debug_entry_t *entry, char *out_buf, + size_t out_buf_size) { unsigned long sec, usec; unsigned long caller; @@ -1505,9 +1518,9 @@ int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view, else except_str = "-"; caller = (unsigned long) entry->caller; - rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %04u %px ", - area, sec, usec, level, except_str, - entry->cpu, (void *)caller); + rc += scnprintf(out_buf, out_buf_size, "%02i %011ld:%06lu %1u %1s %04u %px ", + area, sec, usec, level, except_str, + entry->cpu, (void *)caller); return rc; } EXPORT_SYMBOL(debug_dflt_header_fn); @@ -1520,7 +1533,7 @@ EXPORT_SYMBOL(debug_dflt_header_fn); #define DEBUG_SPRINTF_MAX_ARGS 10 static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view, - char *out_buf, const char *inbuf) + char *out_buf, size_t out_buf_size, const char *inbuf) { debug_sprintf_entry_t *curr_event = (debug_sprintf_entry_t *)inbuf; int num_longs, num_used_args = 0, i, rc = 0; @@ -1533,8 +1546,9 @@ static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view, goto out; /* bufsize of entry too small */ if (num_longs == 1) { /* no args, we use only the string */ - strcpy(out_buf, curr_event->string); - rc = strlen(curr_event->string); + rc = strscpy(out_buf, curr_event->string, out_buf_size); + if (rc == -E2BIG) + rc = out_buf_size; goto out; } @@ -1546,12 +1560,13 @@ static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view, for (i = 0; i < num_used_args; i++) index[i] = i; - rc = sprintf(out_buf, curr_event->string, curr_event->args[index[0]], - curr_event->args[index[1]], curr_event->args[index[2]], - curr_event->args[index[3]], curr_event->args[index[4]], - curr_event->args[index[5]], curr_event->args[index[6]], - curr_event->args[index[7]], curr_event->args[index[8]], - curr_event->args[index[9]]); + rc = scnprintf(out_buf, out_buf_size, + curr_event->string, curr_event->args[index[0]], + curr_event->args[index[1]], curr_event->args[index[2]], + curr_event->args[index[3]], curr_event->args[index[4]], + curr_event->args[index[5]], curr_event->args[index[6]], + curr_event->args[index[7]], curr_event->args[index[8]], + curr_event->args[index[9]]); out: return rc; } diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 1ff13239d4e5..960c08700cf6 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -430,9 +430,13 @@ SYM_CODE_START(\name) SYM_CODE_END(\name) .endm + .section .irqentry.text, "ax" + INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq + .section .kprobes.text, "ax" + /* * Machine check handler routines */ diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 6295faf0987d..8b80ea57125f 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -489,6 +489,12 @@ int __init arch_init_kprobes(void) return 0; } +int __init arch_populate_kprobe_blacklist(void) +{ + return kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, + (unsigned long)__irqentry_text_end); +} + int arch_trampoline_kprobe(struct kprobe *p) { return 0; diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 0cde42f8af6e..1e99514fb7ae 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -180,39 +180,27 @@ static int sf_buffer_available(struct cpu_hw_sf *cpuhw) */ static void free_sampling_buffer(struct sf_buffer *sfb) { - unsigned long *sdbt, *curr; - - if (!sfb->sdbt) - return; + unsigned long *sdbt, *curr, *head; sdbt = sfb->sdbt; - curr = sdbt; - + if (!sdbt) + return; + sfb->sdbt = NULL; /* Free the SDBT after all SDBs are processed... */ - while (1) { - if (!*curr || !sdbt) - break; - - /* Process table-link entries */ + head = sdbt; + curr = sdbt; + do { if (is_link_entry(curr)) { + /* Process table-link entries */ curr = get_next_sdbt(curr); - if (sdbt) - free_page((unsigned long)sdbt); - - /* If the origin is reached, sampling buffer is freed */ - if (curr == sfb->sdbt) - break; - else - sdbt = curr; + free_page((unsigned long)sdbt); + sdbt = curr; } else { /* Process SDB pointer */ - if (*curr) { - free_page((unsigned long)phys_to_virt(*curr)); - curr++; - } + free_page((unsigned long)phys_to_virt(*curr)); + curr++; } - } - + } while (curr != head); memset(sfb, 0, sizeof(*sfb)); } diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 9f59837d159e..40edfde25f5b 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -151,7 +151,7 @@ void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *coo break; } if (!store_ip(consume_entry, cookie, entry, perf, ip)) - return; + break; first = false; } pagefault_enable(); diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index a688351f4ab5..9816b0060fbe 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -129,8 +129,8 @@ static void ipte_lock_simple(struct kvm *kvm) retry: read_lock(&kvm->arch.sca_lock); ic = kvm_s390_get_ipte_control(kvm); + old = READ_ONCE(*ic); do { - old = READ_ONCE(*ic); if (old.k) { read_unlock(&kvm->arch.sca_lock); cond_resched(); @@ -138,7 +138,7 @@ retry: } new = old; new.k = 1; - } while (cmpxchg(&ic->val, old.val, new.val) != old.val); + } while (!try_cmpxchg(&ic->val, &old.val, new.val)); read_unlock(&kvm->arch.sca_lock); out: mutex_unlock(&kvm->arch.ipte_mutex); @@ -154,11 +154,11 @@ static void ipte_unlock_simple(struct kvm *kvm) goto out; read_lock(&kvm->arch.sca_lock); ic = kvm_s390_get_ipte_control(kvm); + old = READ_ONCE(*ic); do { - old = READ_ONCE(*ic); new = old; new.k = 0; - } while (cmpxchg(&ic->val, old.val, new.val) != old.val); + } while (!try_cmpxchg(&ic->val, &old.val, new.val)); read_unlock(&kvm->arch.sca_lock); wake_up(&kvm->arch.ipte_wq); out: @@ -172,8 +172,8 @@ static void ipte_lock_siif(struct kvm *kvm) retry: read_lock(&kvm->arch.sca_lock); ic = kvm_s390_get_ipte_control(kvm); + old = READ_ONCE(*ic); do { - old = READ_ONCE(*ic); if (old.kg) { read_unlock(&kvm->arch.sca_lock); cond_resched(); @@ -182,7 +182,7 @@ retry: new = old; new.k = 1; new.kh++; - } while (cmpxchg(&ic->val, old.val, new.val) != old.val); + } while (!try_cmpxchg(&ic->val, &old.val, new.val)); read_unlock(&kvm->arch.sca_lock); } @@ -192,13 +192,13 @@ static void ipte_unlock_siif(struct kvm *kvm) read_lock(&kvm->arch.sca_lock); ic = kvm_s390_get_ipte_control(kvm); + old = READ_ONCE(*ic); do { - old = READ_ONCE(*ic); new = old; new.kh--; if (!new.kh) new.k = 0; - } while (cmpxchg(&ic->val, old.val, new.val) != old.val); + } while (!try_cmpxchg(&ic->val, &old.val, new.val)); read_unlock(&kvm->arch.sca_lock); if (!new.kh) wake_up(&kvm->arch.ipte_wq); diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 4f0e7f61edf7..ea8dce299954 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -118,8 +118,6 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) static void sca_clear_ext_call(struct kvm_vcpu *vcpu) { - int rc, expect; - if (!kvm_s390_use_sca_entries()) return; kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND); @@ -128,23 +126,16 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu) struct esca_block *sca = vcpu->kvm->arch.sca; union esca_sigp_ctrl *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); - union esca_sigp_ctrl old; - old = READ_ONCE(*sigp_ctrl); - expect = old.value; - rc = cmpxchg(&sigp_ctrl->value, old.value, 0); + WRITE_ONCE(sigp_ctrl->value, 0); } else { struct bsca_block *sca = vcpu->kvm->arch.sca; union bsca_sigp_ctrl *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); - union bsca_sigp_ctrl old; - old = READ_ONCE(*sigp_ctrl); - expect = old.value; - rc = cmpxchg(&sigp_ctrl->value, old.value, 0); + WRITE_ONCE(sigp_ctrl->value, 0); } read_unlock(&vcpu->kvm->arch.sca_lock); - WARN_ON(rc != expect); /* cannot clear? */ } int psw_extint_disabled(struct kvm_vcpu *vcpu) @@ -247,12 +238,12 @@ static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam) { u64 word, _word; + word = READ_ONCE(gisa->u64.word[0]); do { - word = READ_ONCE(gisa->u64.word[0]); if ((u64)gisa != word >> 32) return -EBUSY; _word = (word & ~0xffUL) | iam; - } while (cmpxchg(&gisa->u64.word[0], word, _word) != word); + } while (!try_cmpxchg(&gisa->u64.word[0], &word, _word)); return 0; } @@ -270,10 +261,10 @@ static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa) { u64 word, _word; + word = READ_ONCE(gisa->u64.word[0]); do { - word = READ_ONCE(gisa->u64.word[0]); _word = word & ~(0xffUL << 24); - } while (cmpxchg(&gisa->u64.word[0], word, _word) != word); + } while (!try_cmpxchg(&gisa->u64.word[0], &word, _word)); } /** @@ -291,14 +282,14 @@ static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi) u8 pending_mask, alert_mask; u64 word, _word; + word = READ_ONCE(gi->origin->u64.word[0]); do { - word = READ_ONCE(gi->origin->u64.word[0]); alert_mask = READ_ONCE(gi->alert.mask); pending_mask = (u8)(word >> 24) & alert_mask; if (pending_mask) return pending_mask; _word = (word & ~0xffUL) | alert_mask; - } while (cmpxchg(&gi->origin->u64.word[0], word, _word) != word); + } while (!try_cmpxchg(&gi->origin->u64.word[0], &word, _word)); return 0; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 442d4a227c0e..d8080c27d45b 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1937,11 +1937,11 @@ static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val) read_lock(&kvm->arch.sca_lock); sca = kvm->arch.sca; + old = READ_ONCE(sca->utility); do { - old = READ_ONCE(sca->utility); new = old; new.mtcr = val; - } while (cmpxchg(&sca->utility.val, old.val, new.val) != old.val); + } while (!try_cmpxchg(&sca->utility.val, &old.val, new.val)); read_unlock(&kvm->arch.sca_lock); } diff --git a/arch/s390/kvm/pci.c b/arch/s390/kvm/pci.c index a61518b549f0..9b9e7fdd5380 100644 --- a/arch/s390/kvm/pci.c +++ b/arch/s390/kvm/pci.c @@ -208,13 +208,12 @@ static inline int account_mem(unsigned long nr_pages) page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + cur_pages = atomic_long_read(&user->locked_vm); do { - cur_pages = atomic_long_read(&user->locked_vm); new_pages = cur_pages + nr_pages; if (new_pages > page_limit) return -ENOMEM; - } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages, - new_pages) != cur_pages); + } while (!atomic_long_try_cmpxchg(&user->locked_vm, &cur_pages, new_pages)); atomic64_add(nr_pages, ¤t->mm->pinned_vm); diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 09d735010ee1..a81a01c44927 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -15,6 +15,7 @@ #include <linux/percpu.h> #include <linux/io.h> #include <asm/alternative.h> +#include <asm/asm.h> int spin_retry = -1; @@ -76,24 +77,43 @@ static inline int arch_load_niai4(int *lock) asm_inline volatile( ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", ALT_FACILITY(49)) /* NIAI 4 */ - " l %0,%1\n" - : "=d" (owner) : "Q" (*lock) : "memory"); + " l %[owner],%[lock]\n" + : [owner] "=d" (owner) : [lock] "R" (*lock) : "memory"); return owner; } -static inline int arch_cmpxchg_niai8(int *lock, int old, int new) +#ifdef __HAVE_ASM_FLAG_OUTPUTS__ + +static inline int arch_try_cmpxchg_niai8(int *lock, int old, int new) +{ + int cc; + + asm_inline volatile( + ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */ + " cs %[old],%[new],%[lock]\n" + : [old] "+d" (old), [lock] "+Q" (*lock), "=@cc" (cc) + : [new] "d" (new) + : "memory"); + return cc == 0; +} + +#else /* __HAVE_ASM_FLAG_OUTPUTS__ */ + +static inline int arch_try_cmpxchg_niai8(int *lock, int old, int new) { int expected = old; asm_inline volatile( ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */ - " cs %0,%3,%1\n" - : "=d" (old), "=Q" (*lock) - : "0" (old), "d" (new), "Q" (*lock) + " cs %[old],%[new],%[lock]\n" + : [old] "+d" (old), [lock] "+Q" (*lock) + : [new] "d" (new) : "cc", "memory"); return expected == old; } +#endif /* __HAVE_ASM_FLAG_OUTPUTS__ */ + static inline struct spin_wait *arch_spin_decode_tail(int lock) { int ix, cpu; @@ -226,7 +246,7 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp) /* Try to get the lock if it is free. */ if (!owner) { new = (old & _Q_TAIL_MASK) | lockval; - if (arch_cmpxchg_niai8(&lp->lock, old, new)) { + if (arch_try_cmpxchg_niai8(&lp->lock, old, new)) { /* Got the lock */ return; } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 646326fa0fad..9b681f74dccc 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -338,7 +338,8 @@ done: handle_fault_error_nolock(regs, 0); else do_sigsegv(regs, SEGV_MAPERR); - } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON)) { + } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | + VM_FAULT_HWPOISON_LARGE)) { if (!user_mode(regs)) handle_fault_error_nolock(regs, 0); else diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 643e47bfaddc..16b8a36c56de 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -587,7 +587,8 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) if (pmd_leaf(*pmd)) { *table = (pmd_val(*pmd) & _SEGMENT_ENTRY_HARDWARE_BITS_LARGE) - | _SEGMENT_ENTRY_GMAP_UC; + | _SEGMENT_ENTRY_GMAP_UC + | _SEGMENT_ENTRY; } else *table = pmd_val(*pmd) & _SEGMENT_ENTRY_HARDWARE_BITS; @@ -2396,7 +2397,8 @@ static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr, gaddr = __gmap_segment_gaddr((unsigned long *)pmdp); pmdp_notify_gmap(gmap, pmdp, gaddr); WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE | - _SEGMENT_ENTRY_GMAP_UC)); + _SEGMENT_ENTRY_GMAP_UC | + _SEGMENT_ENTRY)); if (purge) __pmdp_csp(pmdp); set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); @@ -2450,7 +2452,8 @@ void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr) gaddr = __gmap_segment_gaddr(entry); pmdp_notify_gmap(gmap, pmdp, gaddr); WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE | - _SEGMENT_ENTRY_GMAP_UC)); + _SEGMENT_ENTRY_GMAP_UC | + _SEGMENT_ENTRY)); if (MACHINE_HAS_TLB_GUEST) __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE, gmap->asce, IDTE_LOCAL); @@ -2485,7 +2488,8 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr) gaddr = __gmap_segment_gaddr(entry); pmdp_notify_gmap(gmap, pmdp, gaddr); WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE | - _SEGMENT_ENTRY_GMAP_UC)); + _SEGMENT_ENTRY_GMAP_UC | + _SEGMENT_ENTRY)); if (MACHINE_HAS_TLB_GUEST) __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE, gmap->asce, IDTE_GLOBAL); diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 7c79cf1bc7d7..d9ce199953de 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -24,6 +24,7 @@ static inline unsigned long __pte_to_rste(pte_t pte) { + swp_entry_t arch_entry; unsigned long rste; /* @@ -48,6 +49,7 @@ static inline unsigned long __pte_to_rste(pte_t pte) */ if (pte_present(pte)) { rste = pte_val(pte) & PAGE_MASK; + rste |= _SEGMENT_ENTRY_PRESENT; rste |= move_set_bit(pte_val(pte), _PAGE_READ, _SEGMENT_ENTRY_READ); rste |= move_set_bit(pte_val(pte), _PAGE_WRITE, @@ -66,6 +68,10 @@ static inline unsigned long __pte_to_rste(pte_t pte) #endif rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC, _SEGMENT_ENTRY_NOEXEC); + } else if (!pte_none(pte)) { + /* swap pte */ + arch_entry = __pte_to_swp_entry(pte); + rste = mk_swap_rste(__swp_type(arch_entry), __swp_offset(arch_entry)); } else rste = _SEGMENT_ENTRY_EMPTY; return rste; @@ -73,13 +79,18 @@ static inline unsigned long __pte_to_rste(pte_t pte) static inline pte_t __rste_to_pte(unsigned long rste) { + swp_entry_t arch_entry; unsigned long pteval; - int present; + int present, none; + pte_t pte; - if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) + if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) { present = pud_present(__pud(rste)); - else + none = pud_none(__pud(rste)); + } else { present = pmd_present(__pmd(rste)); + none = pmd_none(__pmd(rste)); + } /* * Convert encoding pmd / pud bits pte bits @@ -114,6 +125,11 @@ static inline pte_t __rste_to_pte(unsigned long rste) pteval |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, _PAGE_SOFT_DIRTY); #endif pteval |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, _PAGE_NOEXEC); + } else if (!none) { + /* swap rste */ + arch_entry = __rste_to_swp_entry(rste); + pte = mk_swap_pte(__swp_type_rste(arch_entry), __swp_offset_rste(arch_entry)); + pteval = pte_val(pte); } else pteval = _PAGE_INVALID; return __pte(pteval); @@ -148,8 +164,6 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, unsigned long rste; rste = __pte_to_rste(pte); - if (!MACHINE_HAS_NX) - rste &= ~_SEGMENT_ENTRY_NOEXEC; /* Set correct table type for 2G hugepages */ if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) { @@ -223,11 +237,10 @@ pte_t *huge_pte_offset(struct mm_struct *mm, p4dp = p4d_offset(pgdp, addr); if (p4d_present(*p4dp)) { pudp = pud_offset(p4dp, addr); - if (pud_present(*pudp)) { - if (pud_leaf(*pudp)) - return (pte_t *) pudp; + if (sz == PUD_SIZE) + return (pte_t *)pudp; + if (pud_present(*pudp)) pmdp = pmd_offset(pudp, addr); - } } } return (pte_t *) pmdp; diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index cbff587dc4e3..88f72745fa59 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -779,8 +779,9 @@ int zpci_hot_reset_device(struct zpci_dev *zdev) * @fh: Current Function Handle of the device to be created * @state: Initial state after creation either Standby or Configured * - * Creates a new zpci device and adds it to its, possibly newly created, zbus - * as well as zpci_list. + * Allocates a new struct zpci_dev and queries the platform for its details. + * If successful the device can subsequently be added to the zPCI subsystem + * using zpci_add_device(). * * Returns: the zdev on success or an error pointer otherwise */ @@ -803,7 +804,6 @@ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state) goto error; zdev->state = state; - kref_init(&zdev->kref); mutex_init(&zdev->state_lock); mutex_init(&zdev->fmb_lock); mutex_init(&zdev->kzdev_lock); @@ -816,6 +816,17 @@ error: return ERR_PTR(rc); } +/** + * zpci_add_device() - Add a previously created zPCI device to the zPCI subsystem + * @zdev: The zPCI device to be added + * + * A struct zpci_dev is added to the zPCI subsystem and to a virtual PCI bus creating + * a new one as necessary. A hotplug slot is created and events start to be handled. + * If successful from this point on zpci_zdev_get() and zpci_zdev_put() must be used. + * If adding the struct zpci_dev fails the device was not added and should be freed. + * + * Return: 0 on success, or an error code otherwise + */ int zpci_add_device(struct zpci_dev *zdev) { int rc; @@ -829,6 +840,7 @@ int zpci_add_device(struct zpci_dev *zdev) if (rc) goto error_destroy_iommu; + kref_init(&zdev->kref); spin_lock(&zpci_list_lock); list_add_tail(&zdev->entry, &zpci_list); spin_unlock(&zpci_list_lock); @@ -928,10 +940,8 @@ void zpci_device_reserved(struct zpci_dev *zdev) void zpci_release_device(struct kref *kref) { struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref); - int ret; - if (zdev->has_hp_slot) - zpci_exit_slot(zdev); + WARN_ON(zdev->state != ZPCI_FN_STATE_RESERVED); if (zdev->zbus->bus) zpci_bus_remove_device(zdev, false); @@ -939,28 +949,14 @@ void zpci_release_device(struct kref *kref) if (zdev_enabled(zdev)) zpci_disable_device(zdev); - switch (zdev->state) { - case ZPCI_FN_STATE_CONFIGURED: - ret = sclp_pci_deconfigure(zdev->fid); - zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret); - fallthrough; - case ZPCI_FN_STATE_STANDBY: - if (zdev->has_hp_slot) - zpci_exit_slot(zdev); - spin_lock(&zpci_list_lock); - list_del(&zdev->entry); - spin_unlock(&zpci_list_lock); - zpci_dbg(3, "rsv fid:%x\n", zdev->fid); - fallthrough; - case ZPCI_FN_STATE_RESERVED: - if (zdev->has_resources) - zpci_cleanup_bus_resources(zdev); - zpci_bus_device_unregister(zdev); - zpci_destroy_iommu(zdev); - fallthrough; - default: - break; - } + if (zdev->has_hp_slot) + zpci_exit_slot(zdev); + + if (zdev->has_resources) + zpci_cleanup_bus_resources(zdev); + + zpci_bus_device_unregister(zdev); + zpci_destroy_iommu(zdev); zpci_dbg(3, "rem fid:%x\n", zdev->fid); kfree_rcu(zdev, rcu); } @@ -1121,7 +1117,8 @@ static void zpci_add_devices(struct list_head *scan_list) list_sort(NULL, scan_list, &zpci_cmp_rid); list_for_each_entry_safe(zdev, tmp, scan_list, entry) { list_del_init(&zdev->entry); - zpci_add_device(zdev); + if (zpci_add_device(zdev)) + kfree(zdev); } } diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 47f934f4e828..7f7b732b3f3e 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -340,7 +340,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) zdev = zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_CONFIGURED); if (IS_ERR(zdev)) break; - zpci_add_device(zdev); + if (zpci_add_device(zdev)) { + kfree(zdev); + break; + } } else { /* the configuration request may be stale */ if (zdev->state != ZPCI_FN_STATE_STANDBY) @@ -354,7 +357,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) zdev = zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY); if (IS_ERR(zdev)) break; - zpci_add_device(zdev); + if (zpci_add_device(zdev)) { + kfree(zdev); + break; + } } else { zpci_update_fh(zdev, ccdf->fh); } diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile index 757451c3ea1d..0400078076e5 100644 --- a/arch/sparc/Makefile +++ b/arch/sparc/Makefile @@ -29,7 +29,7 @@ UTS_MACHINE := sparc # versions of gcc. Some gcc versions won't pass -Av8 to binutils when you # give -mcpu=v8. This silently worked with older bintutils versions but # does not any more. -KBUILD_CFLAGS += -m32 -mcpu=v8 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7 +KBUILD_CFLAGS += -m32 -mcpu=v8 -pipe -mno-fpu $(call cc-option,-fcall-used-g5) $(call cc-option,-fcall-used-g7) KBUILD_CFLAGS += -Wa,-Av8 KBUILD_AFLAGS += -m32 -Wa,-Av8 @@ -45,7 +45,7 @@ export BITS := 64 UTS_MACHINE := sparc64 KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow -KBUILD_CFLAGS += -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare +KBUILD_CFLAGS += -ffixed-g4 -ffixed-g5 $(call cc-option,-fcall-used-g7) -Wno-sign-compare KBUILD_CFLAGS += -Wa,--undeclared-regs KBUILD_CFLAGS += $(call cc-option,-mtune=ultrasparc3) KBUILD_AFLAGS += -m64 -mcpu=ultrasparc -Wa,--undeclared-regs diff --git a/arch/sparc/include/asm/hvtramp.h b/arch/sparc/include/asm/hvtramp.h index 688ea43af0f5..ce2453ea4f2b 100644 --- a/arch/sparc/include/asm/hvtramp.h +++ b/arch/sparc/include/asm/hvtramp.h @@ -17,7 +17,7 @@ struct hvtramp_descr { __u64 fault_info_va; __u64 fault_info_pa; __u64 thread_reg; - struct hvtramp_mapping maps[1]; + struct hvtramp_mapping maps[]; }; void hv_cpu_startup(unsigned long hvdescr_pa); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index e40c395db202..5cbd6ed5ef6f 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -297,9 +297,7 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, unsigned long hv_err; int i; - hdesc = kzalloc(sizeof(*hdesc) + - (sizeof(struct hvtramp_mapping) * - num_kernel_image_mappings - 1), + hdesc = kzalloc(struct_size(hdesc, maps, num_kernel_image_mappings), GFP_KERNEL); if (!hdesc) { printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile index 243dbfc4609d..50ec2978cda5 100644 --- a/arch/sparc/vdso/Makefile +++ b/arch/sparc/vdso/Makefile @@ -46,7 +46,7 @@ CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables -m64 -fno-omit-frame-pointer -foptimize-sibling-calls \ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO -SPARC_REG_CFLAGS = -ffixed-g4 -ffixed-g5 -fcall-used-g5 -fcall-used-g7 +SPARC_REG_CFLAGS = -ffixed-g4 -ffixed-g5 $(call cc-option,-fcall-used-g5) $(call cc-option,-fcall-used-g7) $(vobjs): KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c index e794edde6755..79607804ea1b 100644 --- a/arch/sparc/vdso/vclock_gettime.c +++ b/arch/sparc/vdso/vclock_gettime.c @@ -86,6 +86,11 @@ notrace static long vdso_fallback_gettimeofday(struct __kernel_old_timeval *tv, } #ifdef CONFIG_SPARC64 +notrace static __always_inline u64 __shr64(u64 val, int amt) +{ + return val >> amt; +} + notrace static __always_inline u64 vread_tick(void) { u64 ret; @@ -102,6 +107,21 @@ notrace static __always_inline u64 vread_tick_stick(void) return ret; } #else +notrace static __always_inline u64 __shr64(u64 val, int amt) +{ + u64 ret; + + __asm__ __volatile__("sllx %H1, 32, %%g1\n\t" + "srl %L1, 0, %L1\n\t" + "or %%g1, %L1, %%g1\n\t" + "srlx %%g1, %2, %L0\n\t" + "srlx %L0, 32, %H0" + : "=r" (ret) + : "r" (val), "r" (amt) + : "g1"); + return ret; +} + notrace static __always_inline u64 vread_tick(void) { register unsigned long long ret asm("o4"); @@ -154,7 +174,7 @@ notrace static __always_inline int do_realtime(struct vvar_data *vvar, ts->tv_sec = vvar->wall_time_sec; ns = vvar->wall_time_snsec; ns += vgetsns(vvar); - ns >>= vvar->clock.shift; + ns = __shr64(ns, vvar->clock.shift); } while (unlikely(vvar_read_retry(vvar, seq))); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); @@ -174,7 +194,7 @@ notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar, ts->tv_sec = vvar->wall_time_sec; ns = vvar->wall_time_snsec; ns += vgetsns_stick(vvar); - ns >>= vvar->clock.shift; + ns = __shr64(ns, vvar->clock.shift); } while (unlikely(vvar_read_retry(vvar, seq))); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); @@ -194,7 +214,7 @@ notrace static __always_inline int do_monotonic(struct vvar_data *vvar, ts->tv_sec = vvar->monotonic_time_sec; ns = vvar->monotonic_time_snsec; ns += vgetsns(vvar); - ns >>= vvar->clock.shift; + ns = __shr64(ns, vvar->clock.shift); } while (unlikely(vvar_read_retry(vvar, seq))); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); @@ -214,7 +234,7 @@ notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar, ts->tv_sec = vvar->monotonic_time_sec; ns = vvar->monotonic_time_snsec; ns += vgetsns_stick(vvar); - ns >>= vvar->clock.shift; + ns = __shr64(ns, vvar->clock.shift); } while (unlikely(vvar_read_retry(vvar, seq))); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); diff --git a/drivers/acpi/arm64/init.c b/drivers/acpi/arm64/init.c index d0c8aed90fd1..7a47d8095a7d 100644 --- a/drivers/acpi/arm64/init.c +++ b/drivers/acpi/arm64/init.c @@ -2,7 +2,7 @@ #include <linux/acpi.h> #include "init.h" -void __init acpi_arm_init(void) +void __init acpi_arch_init(void) { if (IS_ENABLED(CONFIG_ACPI_AGDI)) acpi_agdi_init(); diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index aed4a37da03e..3d5342f8d7b3 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -853,6 +853,7 @@ static int sysfs_add_battery(struct acpi_battery *battery) struct power_supply_config psy_cfg = { .drv_data = battery, .attr_grp = acpi_battery_groups, + .no_wakeup_source = true, }; bool full_cap_broken = false; @@ -888,7 +889,7 @@ static int sysfs_add_battery(struct acpi_battery *battery) battery->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY; battery->bat_desc.get_property = acpi_battery_get_property; - battery->bat = power_supply_register_no_ws(&battery->device->dev, + battery->bat = power_supply_register(&battery->device->dev, &battery->bat_desc, &psy_cfg); if (IS_ERR(battery->bat)) { diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 16917dc3ad60..058910af82bc 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1434,6 +1434,8 @@ static int __init acpi_bus_init(void) struct kobject *acpi_kobj; EXPORT_SYMBOL_GPL(acpi_kobj); +void __weak __init acpi_arch_init(void) { } + static int __init acpi_init(void) { int result; @@ -1461,8 +1463,7 @@ static int __init acpi_init(void) acpi_viot_early_init(); acpi_hest_init(); acpi_ghes_init(); - acpi_arm_init(); - acpi_riscv_init(); + acpi_arch_init(); acpi_scan_init(); acpi_ec_init(); acpi_debugfs_init(); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index ce728cf7e301..698897b29de2 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -800,12 +800,12 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) state->enter = acpi_idle_enter; state->flags = 0; - if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 || - cx->type == ACPI_STATE_C3) { - state->enter_dead = acpi_idle_play_dead; - if (cx->type != ACPI_STATE_C3) - drv->safe_state_index = count; - } + + state->enter_dead = acpi_idle_play_dead; + + if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) + drv->safe_state_index = count; + /* * Halt-induced C1 is not good for ->enter_s2idle, because it * re-enables interrupts on exit. Moreover, C1 is generally not diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c index 5ef97905a727..673e4d5dd752 100644 --- a/drivers/acpi/riscv/init.c +++ b/drivers/acpi/riscv/init.c @@ -7,7 +7,7 @@ #include <linux/acpi.h> #include "init.h" -void __init acpi_riscv_init(void) +void __init acpi_arch_init(void) { riscv_acpi_init_gsi_mapping(); } diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index 423565c31d5e..cb45ef5240da 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -296,6 +296,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { /* * 2. Devices which also have the skip i2c/serdev quirks and which * need the x86-android-tablets module to properly work. + * Sorted alphabetically. */ #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS) { @@ -309,6 +310,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { + /* Acer Iconia One 8 A1-840 (non FHD version) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "BayTrail"), + /* Above strings are too generic also match BIOS date */ + DMI_MATCH(DMI_BIOS_DATE, "04/01/2014"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), + }, + { + /* Asus ME176C tablet */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"), @@ -319,23 +333,24 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { - /* Lenovo Yoga Book X90F/L */ + /* Asus TF103C transformer 2-in-1 */ .matches = { - DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), - DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"), + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | - ACPI_QUIRK_UART1_SKIP | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { + /* Lenovo Yoga Book X90F/L */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"), + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_UART1_SKIP | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, @@ -425,6 +440,7 @@ static const struct acpi_device_id i2c_acpi_known_good_ids[] = { { "10EC5640", 0 }, /* RealTek ALC5640 audio codec */ { "10EC5651", 0 }, /* RealTek ALC5651 audio codec */ { "INT33F4", 0 }, /* X-Powers AXP288 PMIC */ + { "INT33F5", 0 }, /* TI Dollar Cove PMIC */ { "INT33FD", 0 }, /* Intel Crystal Cove PMIC */ { "INT34D3", 0 }, /* Intel Whiskey Cove PMIC */ { "NPCE69A", 0 }, /* Asus Transformer keyboard dock */ diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 0230c43377c1..8ef259b4d037 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -449,6 +449,12 @@ const struct bus_type amba_bustype = { }; EXPORT_SYMBOL_GPL(amba_bustype); +bool dev_is_amba(const struct device *dev) +{ + return dev->bus == &amba_bustype; +} +EXPORT_SYMBOL_GPL(dev_is_amba); + static int __init amba_init(void) { return bus_register(&amba_bustype); diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c index d228b4d18d56..14462c092039 100644 --- a/drivers/cpuidle/cpuidle-riscv-sbi.c +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c @@ -26,6 +26,7 @@ #include <asm/smp.h> #include <asm/suspend.h> +#include "cpuidle.h" #include "dt_idle_states.h" #include "dt_idle_genpd.h" @@ -329,6 +330,9 @@ static int sbi_cpuidle_init_cpu(struct device *dev, int cpu) return ret; } + if (cpuidle_disabled()) + return 0; + ret = cpuidle_register(drv, NULL); if (ret) goto deinit; @@ -538,7 +542,10 @@ static int sbi_cpuidle_probe(struct platform_device *pdev) /* Setup CPU hotplut notifiers */ sbi_idle_init_cpuhp(); - pr_info("idle driver registered for all CPUs\n"); + if (cpuidle_disabled()) + pr_info("cpuidle is disabled\n"); + else + pr_info("idle driver registered for all CPUs\n"); return 0; @@ -582,4 +589,4 @@ static int __init sbi_cpuidle_init(void) return 0; } -device_initcall(sbi_cpuidle_init); +arch_initcall(sbi_cpuidle_init); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d9ec1e69e428..e994d6e0779e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -378,6 +378,20 @@ config LOONGSON1_APB_DMA This selects support for the APB DMA controller in Loongson1 SoCs, which is required by Loongson1 NAND and audio support. +config LOONGSON2_APB_DMA + tristate "Loongson2 APB DMA support" + depends on LOONGARCH || COMPILE_TEST + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support for the Loongson2 APB DMA controller driver. The + DMA controller is having single DMA channel which can be + configured for different peripherals like audio, nand, sdio + etc which is in APB bus. + + This DMA controller transfers data from memory to peripheral fifo. + It does not support memory to memory data transfer. + config LPC18XX_DMAMUX bool "NXP LPC18xx/43xx DMA MUX for PL080" depends on ARCH_LPC18XX || COMPILE_TEST @@ -396,20 +410,6 @@ config LPC32XX_DMAMUX Support for PL080 multiplexed DMA request lines on LPC32XX platrofm. -config LS2X_APB_DMA - tristate "Loongson LS2X APB DMA support" - depends on LOONGARCH || COMPILE_TEST - select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS - help - Support for the Loongson LS2X APB DMA controller driver. The - DMA controller is having single DMA channel which can be - configured for different peripherals like audio, nand, sdio - etc which is in APB bus. - - This DMA controller transfers data from memory to peripheral fifo. - It does not support memory to memory data transfer. - config MCF_EDMA tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs" depends on M5441x || (COMPILE_TEST && FSL_EDMA=n) diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index ad6a03c052ec..5b2a52f4f2ee 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -50,9 +50,9 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-y += idxd/ obj-$(CONFIG_K3_DMA) += k3dma.o obj-$(CONFIG_LOONGSON1_APB_DMA) += loongson1-apb-dma.o +obj-$(CONFIG_LOONGSON2_APB_DMA) += loongson2-apb-dma.o obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o obj-$(CONFIG_LPC32XX_DMAMUX) += lpc32xx-dmamux.o -obj-$(CONFIG_LS2X_APB_DMA) += ls2x-apb-dma.o obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index a58a1600dd65..2abbe11e797e 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -9,18 +9,21 @@ * Mika Westerberg <mika.westerberg@linux.intel.com> */ +#include <linux/acpi.h> +#include <linux/acpi_dma.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/err.h> -#include <linux/module.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/ioport.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/mutex.h> -#include <linux/slab.h> -#include <linux/ioport.h> -#include <linux/acpi.h> -#include <linux/acpi_dma.h> #include <linux/property.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/types.h> static LIST_HEAD(acpi_dma_list); static DEFINE_MUTEX(acpi_dma_lock); @@ -236,7 +239,7 @@ int acpi_dma_controller_free(struct device *dev) } EXPORT_SYMBOL_GPL(acpi_dma_controller_free); -static void devm_acpi_dma_release(struct device *dev, void *res) +static void devm_acpi_dma_free(void *dev) { acpi_dma_controller_free(dev); } @@ -259,37 +262,15 @@ int devm_acpi_dma_controller_register(struct device *dev, (struct acpi_dma_spec *, struct acpi_dma *), void *data) { - void *res; int ret; - res = devres_alloc(devm_acpi_dma_release, 0, GFP_KERNEL); - if (!res) - return -ENOMEM; - ret = acpi_dma_controller_register(dev, acpi_dma_xlate, data); - if (ret) { - devres_free(res); + if (ret) return ret; - } - devres_add(dev, res); - return 0; -} -EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register); -/** - * devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free() - * @dev: device that is unregistering as DMA controller - * - * Unregister a DMA controller registered with - * devm_acpi_dma_controller_register(). Normally this function will not need to - * be called and the resource management code will ensure that the resource is - * freed. - */ -void devm_acpi_dma_controller_free(struct device *dev) -{ - WARN_ON(devres_release(dev, devm_acpi_dma_release, NULL, NULL)); + return devm_add_action_or_reset(dev, devm_acpi_dma_free, dev); } -EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); +EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register); /** * acpi_dma_update_dma_spec - prepare dma specifier to pass to translation function diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c index e6a6566b309e..a203fdd84950 100644 --- a/drivers/dma/altera-msgdma.c +++ b/drivers/dma/altera-msgdma.c @@ -954,7 +954,7 @@ static struct platform_driver msgdma_driver = { .of_match_table = of_match_ptr(msgdma_match), }, .probe = msgdma_probe, - .remove_new = msgdma_remove, + .remove = msgdma_remove, }; module_platform_driver(msgdma_driver); diff --git a/drivers/dma/amd/qdma/qdma.c b/drivers/dma/amd/qdma/qdma.c index b0a1f3ad851b..6d9079458fe9 100644 --- a/drivers/dma/amd/qdma/qdma.c +++ b/drivers/dma/amd/qdma/qdma.c @@ -1133,7 +1133,7 @@ static struct platform_driver amd_qdma_driver = { .name = "amd-qdma", }, .probe = amd_qdma_probe, - .remove_new = amd_qdma_remove, + .remove = amd_qdma_remove, }; module_platform_driver(amd_qdma_driver); diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c index 9588773dd2eb..c499173d80b2 100644 --- a/drivers/dma/apple-admac.c +++ b/drivers/dma/apple-admac.c @@ -950,7 +950,7 @@ static struct platform_driver apple_admac_driver = { .of_match_table = admac_of_match, }, .probe = admac_probe, - .remove_new = admac_remove, + .remove = admac_remove, }; module_platform_driver(apple_admac_driver); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index baebddc740b0..2d147712cbc6 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -2250,7 +2250,7 @@ static const struct dev_pm_ops __maybe_unused at_dma_dev_pm_ops = { }; static struct platform_driver at_dma_driver = { - .remove_new = at_dma_remove, + .remove = at_dma_remove, .shutdown = at_dma_shutdown, .id_table = atdma_devtypes, .driver = { diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 299396121e6d..9c7b40220004 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -2476,7 +2476,7 @@ MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids); static struct platform_driver at_xdmac_driver = { .probe = at_xdmac_probe, - .remove_new = at_xdmac_remove, + .remove = at_xdmac_remove, .driver = { .name = "at_xdmac", .of_match_table = of_match_ptr(atmel_xdmac_dt_ids), diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index cfa6e1167a1f..7f0e76439ce5 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c @@ -1756,7 +1756,7 @@ MODULE_DEVICE_TABLE(of, sba_of_match); static struct platform_driver sba_driver = { .probe = sba_probe, - .remove_new = sba_remove, + .remove = sba_remove, .driver = { .name = "bcm-sba-raid", .of_match_table = sba_of_match, diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index e1b92b4d7b05..7ba52dee40a9 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -1029,7 +1029,7 @@ static void bcm2835_dma_remove(struct platform_device *pdev) static struct platform_driver bcm2835_dma_driver = { .probe = bcm2835_dma_probe, - .remove_new = bcm2835_dma_remove, + .remove = bcm2835_dma_remove, .driver = { .name = "bcm2835-dma", .of_match_table = of_match_ptr(bcm2835_dma_of_match), diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c index 0bbaa7620bdd..6c4d655ffe77 100644 --- a/drivers/dma/bestcomm/bestcomm.c +++ b/drivers/dma/bestcomm/bestcomm.c @@ -486,7 +486,7 @@ MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match); static struct platform_driver mpc52xx_bcom_of_platform_driver = { .probe = mpc52xx_bcom_probe, - .remove_new = mpc52xx_bcom_remove, + .remove = mpc52xx_bcom_remove, .driver = { .name = DRIVER_NAME, .of_match_table = mpc52xx_bcom_of_match, diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index c9cfa341db51..100057603fd4 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -1122,7 +1122,7 @@ MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match); static struct platform_driver jz4780_dma_driver = { .probe = jz4780_dma_probe, - .remove_new = jz4780_dma_remove, + .remove = jz4780_dma_remove, .driver = { .name = "jz4780-dma", .of_match_table = jz4780_dma_dt_match, diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index fffafa86d964..b23536645ff7 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -1676,7 +1676,7 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); static struct platform_driver dw_driver = { .probe = dw_probe, - .remove_new = dw_remove, + .remove = dw_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = dw_dma_of_id_table, diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 47c58ad468cb..2606cf9cd429 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -191,7 +191,7 @@ static const struct dev_pm_ops dw_dev_pm_ops = { static struct platform_driver dw_driver = { .probe = dw_probe, - .remove_new = dw_remove, + .remove = dw_remove, .shutdown = dw_shutdown, .driver = { .name = DRV_NAME, diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 6b98a23e3332..e424bb5c40e7 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -929,8 +929,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) /* Sanity check the channel parameters */ if (!edmac->edma->m2m) { - if (edmac->dma_cfg.port < EP93XX_DMA_I2S1 || - edmac->dma_cfg.port > EP93XX_DMA_IRDA) + if (edmac->dma_cfg.port > EP93XX_DMA_IRDA) return -EINVAL; if (edmac->dma_cfg.dir != ep93xx_dma_chan_direction(chan)) return -EINVAL; diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index f9f1eda79254..60de1003193a 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -740,7 +740,7 @@ static struct platform_driver fsl_edma_driver = { .pm = &fsl_edma_pm_ops, }, .probe = fsl_edma_probe, - .remove_new = fsl_edma_remove, + .remove = fsl_edma_remove, }; static int __init fsl_edma_init(void) diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index 5005e138fc23..823f5c6bc2e1 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c @@ -1288,7 +1288,7 @@ static struct platform_driver fsl_qdma_driver = { .of_match_table = fsl_qdma_dt_ids, }, .probe = fsl_qdma_probe, - .remove_new = fsl_qdma_remove, + .remove = fsl_qdma_remove, }; module_platform_driver(fsl_qdma_driver); diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c index 014ff523d5ec..6aa97e258a55 100644 --- a/drivers/dma/fsl_raid.c +++ b/drivers/dma/fsl_raid.c @@ -886,7 +886,7 @@ static struct platform_driver fsl_re_driver = { .of_match_table = fsl_re_ids, }, .probe = fsl_re_probe, - .remove_new = fsl_re_remove, + .remove = fsl_re_remove, }; module_platform_driver(fsl_re_driver); diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 18a6c4bf6275..b5e7d18b9766 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1404,7 +1404,7 @@ static struct platform_driver fsldma_of_driver = { #endif }, .probe = fsldma_of_probe, - .remove_new = fsldma_of_remove, + .remove = fsldma_of_remove, }; /*----------------------------------------------------------------------------*/ diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 3c648308a54a..d147353d47ab 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -693,7 +693,7 @@ static const struct dev_pm_ops idma64_dev_pm_ops = { static struct platform_driver idma64_platform_driver = { .probe = idma64_platform_probe, - .remove_new = idma64_platform_remove, + .remove = idma64_platform_remove, .driver = { .name = LPSS_IDMA64_DRIVER_NAME, .pm = &idma64_dev_pm_ops, diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index e16dbf9ab324..c426511f2104 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -6,6 +6,10 @@ #include <uapi/linux/idxd.h> /* PCI Config */ +#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb +#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212 +#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216 + #define DEVICE_VERSION_1 0x100 #define DEVICE_VERSION_2 0x200 diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c index 0532dd2640dc..4127c1bdcca7 100644 --- a/drivers/dma/img-mdc-dma.c +++ b/drivers/dma/img-mdc-dma.c @@ -1076,7 +1076,7 @@ static struct platform_driver mdc_dma_driver = { .of_match_table = of_match_ptr(mdc_dma_of_match), }, .probe = mdc_dma_probe, - .remove_new = mdc_dma_remove, + .remove = mdc_dma_remove, }; module_platform_driver(mdc_dma_driver); diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index e913f0db99da..a651e0995ce8 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -1233,7 +1233,7 @@ static struct platform_driver imxdma_driver = { .name = "imx-dma", .of_match_table = imx_dma_of_dev_id, }, - .remove_new = imxdma_remove, + .remove = imxdma_remove, }; static int __init imxdma_module_init(void) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 72299a08af44..3449006cd14b 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -2440,7 +2440,7 @@ static struct platform_driver sdma_driver = { .name = "imx-sdma", .of_match_table = sdma_dt_ids, }, - .remove_new = sdma_remove, + .remove = sdma_remove, .probe = sdma_probe, }; diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 5de8c21d41e7..acc2983e28e0 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -1028,7 +1028,7 @@ static struct platform_driver k3_pdma_driver = { .of_match_table = k3_pdma_dt_ids, }, .probe = k3_dma_probe, - .remove_new = k3_dma_remove, + .remove = k3_dma_remove, }; module_platform_driver(k3_pdma_driver); diff --git a/drivers/dma/ls2x-apb-dma.c b/drivers/dma/loongson2-apb-dma.c index 9652e8666722..367ed34ce4da 100644 --- a/drivers/dma/ls2x-apb-dma.c +++ b/drivers/dma/loongson2-apb-dma.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Driver for the Loongson LS2X APB DMA Controller + * Driver for the Loongson-2 APB DMA Controller * * Copyright (C) 2017-2023 Loongson Corporation */ @@ -692,7 +692,7 @@ MODULE_DEVICE_TABLE(of, ls2x_dma_of_match_table); static struct platform_driver ls2x_dmac_driver = { .probe = ls2x_dma_probe, - .remove_new = ls2x_dma_remove, + .remove = ls2x_dma_remove, .driver = { .name = "ls2x-apbdma", .of_match_table = ls2x_dma_of_match_table, @@ -700,6 +700,6 @@ static struct platform_driver ls2x_dmac_driver = { }; module_platform_driver(ls2x_dmac_driver); -MODULE_DESCRIPTION("Loongson LS2X APB DMA Controller driver"); +MODULE_DESCRIPTION("Loongson-2 APB DMA Controller driver"); MODULE_AUTHOR("Loongson Technology Corporation Limited"); MODULE_LICENSE("GPL"); diff --git a/drivers/dma/mcf-edma-main.c b/drivers/dma/mcf-edma-main.c index 0c5862bf26f8..9e1c6400c77b 100644 --- a/drivers/dma/mcf-edma-main.c +++ b/drivers/dma/mcf-edma-main.c @@ -267,7 +267,7 @@ static struct platform_driver mcf_edma_driver = { .name = "mcf-edma", }, .probe = mcf_edma_probe, - .remove_new = mcf_edma_remove, + .remove = mcf_edma_remove, }; bool mcf_edma_filter_fn(struct dma_chan *chan, void *param) diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c index b69eabf12a24..d5ddb4e30e71 100644 --- a/drivers/dma/mediatek/mtk-cqdma.c +++ b/drivers/dma/mediatek/mtk-cqdma.c @@ -922,7 +922,7 @@ static void mtk_cqdma_remove(struct platform_device *pdev) static struct platform_driver mtk_cqdma_driver = { .probe = mtk_cqdma_probe, - .remove_new = mtk_cqdma_remove, + .remove = mtk_cqdma_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = mtk_cqdma_match, diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c index 58c7961ab9ad..fa77bb24a430 100644 --- a/drivers/dma/mediatek/mtk-hsdma.c +++ b/drivers/dma/mediatek/mtk-hsdma.c @@ -1038,7 +1038,7 @@ static void mtk_hsdma_remove(struct platform_device *pdev) static struct platform_driver mtk_hsdma_driver = { .probe = mtk_hsdma_probe, - .remove_new = mtk_hsdma_remove, + .remove = mtk_hsdma_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = mtk_hsdma_match, diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index 1bdc1500be40..08e15177427b 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -637,7 +637,7 @@ static const struct dev_pm_ops mtk_uart_apdma_pm_ops = { static struct platform_driver mtk_uart_apdma_driver = { .probe = mtk_uart_apdma_probe, - .remove_new = mtk_uart_apdma_remove, + .remove = mtk_uart_apdma_remove, .driver = { .name = KBUILD_MODNAME, .pm = &mtk_uart_apdma_pm_ops, diff --git a/drivers/dma/milbeaut-hdmac.c b/drivers/dma/milbeaut-hdmac.c index 7b41c670970a..9a5ec247ed6d 100644 --- a/drivers/dma/milbeaut-hdmac.c +++ b/drivers/dma/milbeaut-hdmac.c @@ -571,7 +571,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_hdmac_match); static struct platform_driver milbeaut_hdmac_driver = { .probe = milbeaut_hdmac_probe, - .remove_new = milbeaut_hdmac_remove, + .remove = milbeaut_hdmac_remove, .driver = { .name = "milbeaut-m10v-hdmac", .of_match_table = milbeaut_hdmac_match, diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c index 2cce529b448e..58d4fd6df0bf 100644 --- a/drivers/dma/milbeaut-xdmac.c +++ b/drivers/dma/milbeaut-xdmac.c @@ -409,7 +409,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match); static struct platform_driver milbeaut_xdmac_driver = { .probe = milbeaut_xdmac_probe, - .remove_new = milbeaut_xdmac_remove, + .remove = milbeaut_xdmac_remove, .driver = { .name = "milbeaut-m10v-xdmac", .of_match_table = milbeaut_xdmac_match, diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 136fcaeff8dd..a95d31103d30 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -1137,7 +1137,7 @@ static struct platform_driver mmp_pdma_driver = { }, .id_table = mmp_pdma_id_table, .probe = mmp_pdma_probe, - .remove_new = mmp_pdma_remove, + .remove = mmp_pdma_remove, }; module_platform_driver(mmp_pdma_driver); diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index b76fe99e1151..c8dc504510f1 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -736,7 +736,7 @@ static struct platform_driver mmp_tdma_driver = { .of_match_table = mmp_tdma_dt_ids, }, .probe = mmp_tdma_probe, - .remove_new = mmp_tdma_remove, + .remove = mmp_tdma_remove, }; module_platform_driver(mmp_tdma_driver); diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index 66dc6d31b603..de09e1ab7767 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c @@ -644,7 +644,7 @@ MODULE_DEVICE_TABLE(of, moxart_dma_match); static struct platform_driver moxart_driver = { .probe = moxart_probe, - .remove_new = moxart_remove, + .remove = moxart_remove, .driver = { .name = "moxart-dma-engine", .of_match_table = moxart_dma_match, diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 68c247a46321..bf131cb5db66 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -1110,7 +1110,7 @@ MODULE_DEVICE_TABLE(of, mpc_dma_match); static struct platform_driver mpc_dma_driver = { .probe = mpc_dma_probe, - .remove_new = mpc_dma_remove, + .remove = mpc_dma_remove, .driver = { .name = DRV_NAME, .of_match_table = mpc_dma_match, diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index c8c67f4d982c..cad4d4fb51ac 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -635,7 +635,7 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) writel(MV_XOR_V2_DESC_NUM, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF); - /* write the DESQ address to the DMA enngine*/ + /* write the DESQ address to the DMA engine*/ writel(lower_32_bits(xor_dev->hw_desq), xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF); writel(upper_32_bits(xor_dev->hw_desq), @@ -884,7 +884,7 @@ static struct platform_driver mv_xor_v2_driver = { .probe = mv_xor_v2_probe, .suspend = mv_xor_v2_suspend, .resume = mv_xor_v2_resume, - .remove_new = mv_xor_v2_remove, + .remove = mv_xor_v2_remove, .driver = { .name = "mv_xor_v2", .of_match_table = of_match_ptr(mv_xor_v2_dt_ids), diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 3b011a91d48e..0d6324c4e2be 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1515,7 +1515,7 @@ static struct platform_driver nbpf_driver = { }, .id_table = nbpf_ids, .probe = nbpf_probe, - .remove_new = nbpf_remove, + .remove = nbpf_remove, }; module_platform_driver(nbpf_driver); diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c index aa436f9e3571..57cec757d8f5 100644 --- a/drivers/dma/owl-dma.c +++ b/drivers/dma/owl-dma.c @@ -1252,7 +1252,7 @@ static void owl_dma_remove(struct platform_device *pdev) static struct platform_driver owl_dma_driver = { .probe = owl_dma_probe, - .remove_new = owl_dma_remove, + .remove = owl_dma_remove, .driver = { .name = "dma-owl", .of_match_table = of_match_ptr(owl_dma_match), diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 7b78759ac734..9d2a5a967a99 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -4549,7 +4549,7 @@ MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match); static struct platform_driver ppc440spe_adma_driver = { .probe = ppc440spe_adma_probe, - .remove_new = ppc440spe_adma_remove, + .remove = ppc440spe_adma_remove, .driver = { .name = "PPC440SP(E)-ADMA", .of_match_table = ppc440spe_adma_of_match, diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 31f8da810c05..e50cf3357e5e 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -1442,7 +1442,7 @@ static struct platform_driver pxad_driver = { }, .id_table = pxad_id_table, .probe = pxad_probe, - .remove_new = pxad_remove, + .remove = pxad_remove, }; static bool pxad_filter_fn(struct dma_chan *chan, void *param) diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index d43a881e43b9..bbc3276992bb 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -1469,7 +1469,7 @@ static const struct dev_pm_ops bam_dma_pm_ops = { static struct platform_driver bam_dma_driver = { .probe = bam_dma_probe, - .remove_new = bam_dma_remove, + .remove = bam_dma_remove, .driver = { .name = "bam-dma-engine", .pm = &bam_dma_pm_ops, diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 4d2cd8d9ec74..c2b3e4452e71 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -948,7 +948,7 @@ MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids); static struct platform_driver hidma_driver = { .probe = hidma_probe, - .remove_new = hidma_remove, + .remove = hidma_remove, .shutdown = hidma_shutdown, .driver = { .name = "hidma", diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c index c1db398adc84..6be54fddcee1 100644 --- a/drivers/dma/qcom/qcom_adm.c +++ b/drivers/dma/qcom/qcom_adm.c @@ -937,7 +937,7 @@ MODULE_DEVICE_TABLE(of, adm_of_match); static struct platform_driver adm_dma_driver = { .probe = adm_dma_probe, - .remove_new = adm_dma_remove, + .remove = adm_dma_remove, .driver = { .name = "adm-dma-engine", .of_match_table = adm_of_match, diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 01e656c69e6c..dc1a9a05252e 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -1079,7 +1079,7 @@ static struct platform_driver sa11x0_dma_driver = { .pm = &sa11x0_dma_pm_ops, }, .probe = sa11x0_dma_probe, - .remove_new = sa11x0_dma_remove, + .remove = sa11x0_dma_remove, }; static int __init sa11x0_dma_init(void) diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index 428473611115..7ad3c29be146 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -354,7 +354,7 @@ static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id) if (!residue) { tasklet_hi_schedule(&chan->done_tasklet); } else { - /* submit next trascatioin if possible */ + /* submit next transaction if possible */ struct sf_pdma_desc *desc = chan->desc; desc->src_addr += desc->xfer_size - residue; @@ -633,7 +633,7 @@ MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids); static struct platform_driver sf_pdma_driver = { .probe = sf_pdma_probe, - .remove_new = sf_pdma_remove, + .remove = sf_pdma_remove, .driver = { .name = "sf-pdma", .of_match_table = sf_pdma_dt_ids, diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index c0b2997ab7fd..6ea5a880b433 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig @@ -49,10 +49,10 @@ config RENESAS_USB_DMAC SoCs. config RZ_DMAC - tristate "Renesas RZ/{G2L,V2L} DMA Controller" - depends on ARCH_RZG2L || COMPILE_TEST + tristate "Renesas RZ DMA Controller" + depends on ARCH_R7S72100 || ARCH_RZG2L || COMPILE_TEST select RENESAS_DMA select DMA_VIRTUAL_CHANNELS help - This driver supports the general purpose DMA controller found in the - Renesas RZ/{G2L,V2L} SoC variants. + This driver supports the general purpose DMA controller typically + found in the Renesas RZ SoC variants. diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 1094a2f82164..2679c1f09faf 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -2037,7 +2037,7 @@ static struct platform_driver rcar_dmac_driver = { .of_match_table = rcar_dmac_of_ids, }, .probe = rcar_dmac_probe, - .remove_new = rcar_dmac_remove, + .remove = rcar_dmac_remove, .shutdown = rcar_dmac_shutdown, }; diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c index 811389fc9cb8..9235db551026 100644 --- a/drivers/dma/sh/rz-dmac.c +++ b/drivers/dma/sh/rz-dmac.c @@ -893,7 +893,7 @@ static int rz_dmac_probe(struct platform_device *pdev) /* Initialize the channels. */ INIT_LIST_HEAD(&dmac->engine.channels); - dmac->rstc = devm_reset_control_array_get_exclusive(&pdev->dev); + dmac->rstc = devm_reset_control_array_get_optional_exclusive(&pdev->dev); if (IS_ERR(dmac->rstc)) return dev_err_probe(&pdev->dev, PTR_ERR(dmac->rstc), "failed to get resets\n"); @@ -1004,7 +1004,7 @@ static struct platform_driver rz_dmac_driver = { .of_match_table = of_rz_dmac_match, }, .probe = rz_dmac_probe, - .remove_new = rz_dmac_remove, + .remove = rz_dmac_remove, }; module_platform_driver(rz_dmac_driver); diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 588c5f409a80..fdd41e1c2263 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -961,7 +961,7 @@ void shdma_chan_probe(struct shdma_dev *sdev, spin_lock_init(&schan->chan_lock); - /* Init descripter manage list */ + /* Init descriptor manage list */ INIT_LIST_HEAD(&schan->ld_queue); INIT_LIST_HEAD(&schan->ld_free); diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 8ead0a1fd237..093e449e19ee 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -906,7 +906,7 @@ static struct platform_driver sh_dmae_driver = { .pm = &sh_dmae_pm, .name = SH_DMAE_DRV_NAME, }, - .remove_new = sh_dmae_remove, + .remove = sh_dmae_remove, }; static int __init sh_dmae_init(void) diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index f7cd0cad056c..7e2b6c97fa2f 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -301,7 +301,7 @@ static struct usb_dmac_desc *usb_dmac_desc_get(struct usb_dmac_chan *chan, struct usb_dmac_desc *desc = NULL; unsigned long flags; - /* Get a freed descritpor */ + /* Get a freed descriptor */ spin_lock_irqsave(&chan->vc.lock, flags); list_for_each_entry(desc, &chan->desc_freed, node) { if (sg_len <= desc->sg_allocated_len) { @@ -899,7 +899,7 @@ static struct platform_driver usb_dmac_driver = { .of_match_table = usb_dmac_of_ids, }, .probe = usb_dmac_probe, - .remove_new = usb_dmac_remove, + .remove = usb_dmac_remove, .shutdown = usb_dmac_shutdown, }; diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 3f54ff37c5e0..187a090463ce 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -1298,7 +1298,7 @@ static const struct dev_pm_ops sprd_dma_pm_ops = { static struct platform_driver sprd_dma_driver = { .probe = sprd_dma_probe, - .remove_new = sprd_dma_remove, + .remove = sprd_dma_remove, .driver = { .name = "sprd-dma", .of_match_table = sprd_dma_match, diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c index 8880b5e336f8..c65ee0c7bfbd 100644 --- a/drivers/dma/st_fdma.c +++ b/drivers/dma/st_fdma.c @@ -858,7 +858,7 @@ static struct platform_driver st_fdma_platform_driver = { .of_match_table = st_fdma_match, }, .probe = st_fdma_probe, - .remove_new = st_fdma_remove, + .remove = st_fdma_remove, }; module_platform_driver(st_fdma_platform_driver); diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c index 0be6e944df6f..0c6c4258b195 100644 --- a/drivers/dma/stm32/stm32-dma3.c +++ b/drivers/dma/stm32/stm32-dma3.c @@ -221,6 +221,8 @@ enum stm32_dma3_port_data_width { #define STM32_DMA3_DT_BREQ BIT(8) /* CTR2_BREQ */ #define STM32_DMA3_DT_PFREQ BIT(9) /* CTR2_PFREQ */ #define STM32_DMA3_DT_TCEM GENMASK(13, 12) /* CTR2_TCEM */ +#define STM32_DMA3_DT_NOPACK BIT(16) /* CTR1_PAM */ +#define STM32_DMA3_DT_NOREFACT BIT(17) /* struct stm32_dma3_chan .config_set bitfield */ #define STM32_DMA3_CFG_SET_DT BIT(0) @@ -228,6 +230,8 @@ enum stm32_dma3_port_data_width { #define STM32_DMA3_CFG_SET_BOTH (STM32_DMA3_CFG_SET_DT | STM32_DMA3_CFG_SET_DMA) #define STM32_DMA3_MAX_BLOCK_SIZE ALIGN_DOWN(CBR1_BNDT, 64) +#define STM32_DMA3_MAX_BURST_LEN (1 + min_t(u32, FIELD_MAX(CTR1_SBL_1), \ + FIELD_MAX(CTR1_DBL_1))) #define port_is_ahb(maxdw) ({ typeof(maxdw) (_maxdw) = (maxdw); \ ((_maxdw) != DW_INVALID) && ((_maxdw) == DW_32); }) #define port_is_axi(maxdw) ({ typeof(maxdw) (_maxdw) = (maxdw); \ @@ -293,6 +297,10 @@ struct stm32_dma3_chan { u32 dma_status; }; +struct stm32_dma3_pdata { + u32 axi_max_burst_len; +}; + struct stm32_dma3_ddata { struct dma_device dma_dev; void __iomem *base; @@ -301,6 +309,7 @@ struct stm32_dma3_ddata { u32 dma_channels; u32 dma_requests; enum stm32_dma3_port_data_width ports_max_dw[2]; + u32 axi_max_burst_len; }; static inline struct stm32_dma3_ddata *to_stm32_dma3_ddata(struct stm32_dma3_chan *chan) @@ -533,7 +542,8 @@ static enum dma_slave_buswidth stm32_dma3_get_max_dw(u32 chan_max_burst, return 1 << __ffs(len | addr | max_dw); } -static u32 stm32_dma3_get_max_burst(u32 len, enum dma_slave_buswidth dw, u32 chan_max_burst) +static u32 stm32_dma3_get_max_burst(u32 len, enum dma_slave_buswidth dw, + u32 chan_max_burst, u32 bus_max_burst) { u32 max_burst = chan_max_burst ? chan_max_burst / dw : 1; @@ -544,8 +554,9 @@ static u32 stm32_dma3_get_max_burst(u32 len, enum dma_slave_buswidth dw, u32 cha /* * HW doesn't modify the burst if burst size <= half of the fifo size. * If len is not a multiple of burst size, last burst is shortened by HW. + * Take care of maximum burst supported on interconnect bus. */ - return max_burst; + return min_t(u32, max_burst, bus_max_burst); } static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transfer_direction dir, @@ -554,6 +565,7 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf { struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); struct dma_device dma_device = ddata->dma_dev; + u32 src_max_burst = STM32_DMA3_MAX_BURST_LEN, dst_max_burst = STM32_DMA3_MAX_BURST_LEN; u32 sdw, ddw, sbl_max, dbl_max, tcem, init_dw, init_bl_max; u32 _ctr1 = 0, _ctr2 = 0; u32 ch_conf = chan->dt_config.ch_conf; @@ -594,10 +606,14 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf _ctr1 |= CTR1_SINC; if (sap) _ctr1 |= CTR1_SAP; + if (port_is_axi(sap_max_dw)) /* AXI - apply axi maximum burst limitation */ + src_max_burst = ddata->axi_max_burst_len; if (FIELD_GET(STM32_DMA3_DT_DINC, tr_conf)) _ctr1 |= CTR1_DINC; if (dap) _ctr1 |= CTR1_DAP; + if (port_is_axi(dap_max_dw)) /* AXI - apply axi maximum burst limitation */ + dst_max_burst = ddata->axi_max_burst_len; _ctr2 |= FIELD_PREP(CTR2_REQSEL, chan->dt_config.req_line) & ~CTR2_SWREQ; if (FIELD_GET(STM32_DMA3_DT_BREQ, tr_conf)) @@ -617,11 +633,16 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf /* Set destination (device) data width and burst */ ddw = min_t(u32, ddw, stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, len, dst_addr)); - dbl_max = min_t(u32, dbl_max, stm32_dma3_get_max_burst(len, ddw, chan->max_burst)); + dbl_max = min_t(u32, dbl_max, stm32_dma3_get_max_burst(len, ddw, chan->max_burst, + dst_max_burst)); /* Set source (memory) data width and burst */ sdw = stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr); - sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst); + sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst, src_max_burst); + if (!!FIELD_GET(STM32_DMA3_DT_NOPACK, tr_conf)) { + sdw = ddw; + sbl_max = dbl_max; + } _ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw)); _ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1); @@ -647,11 +668,17 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf /* Set source (device) data width and burst */ sdw = min_t(u32, sdw, stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr)); - sbl_max = min_t(u32, sbl_max, stm32_dma3_get_max_burst(len, sdw, chan->max_burst)); + sbl_max = min_t(u32, sbl_max, stm32_dma3_get_max_burst(len, sdw, chan->max_burst, + src_max_burst)); /* Set destination (memory) data width and burst */ ddw = stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, len, dst_addr); - dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst); + dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst, dst_max_burst); + if (!!FIELD_GET(STM32_DMA3_DT_NOPACK, tr_conf) || + ((_ctr2 & CTR2_PFREQ) && ddw > sdw)) { /* Packing to wider ddw not supported */ + ddw = sdw; + dbl_max = sbl_max; + } _ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw)); _ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1); @@ -678,22 +705,24 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf init_dw = sdw; init_bl_max = sbl_max; sdw = stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr); - sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst); + sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst, src_max_burst); if (chan->config_set & STM32_DMA3_CFG_SET_DMA) { sdw = min_t(u32, init_dw, sdw); - sbl_max = min_t(u32, init_bl_max, - stm32_dma3_get_max_burst(len, sdw, chan->max_burst)); + sbl_max = min_t(u32, init_bl_max, stm32_dma3_get_max_burst(len, sdw, + chan->max_burst, + src_max_burst)); } /* Set destination (memory) data width and burst */ init_dw = ddw; init_bl_max = dbl_max; ddw = stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, len, dst_addr); - dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst); + dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst, dst_max_burst); if (chan->config_set & STM32_DMA3_CFG_SET_DMA) { ddw = min_t(u32, init_dw, ddw); - dbl_max = min_t(u32, init_bl_max, - stm32_dma3_get_max_burst(len, ddw, chan->max_burst)); + dbl_max = min_t(u32, init_bl_max, stm32_dma3_get_max_burst(len, ddw, + chan->max_burst, + dst_max_burst)); } _ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw)); @@ -1116,6 +1145,28 @@ static void stm32_dma3_free_chan_resources(struct dma_chan *c) chan->config_set = 0; } +static u32 stm32_dma3_get_ll_count(struct stm32_dma3_chan *chan, size_t len, bool prevent_refactor) +{ + u32 count; + + if (prevent_refactor) + return DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE); + + count = len / STM32_DMA3_MAX_BLOCK_SIZE; + len -= (len / STM32_DMA3_MAX_BLOCK_SIZE) * STM32_DMA3_MAX_BLOCK_SIZE; + + if (len >= chan->max_burst) { + count += 1; /* len < STM32_DMA3_MAX_BLOCK_SIZE here, so it fits in one item */ + len -= (len / chan->max_burst) * chan->max_burst; + } + + /* Unaligned remainder fits in one extra item */ + if (len > 0) + count += 1; + + return count; +} + static void stm32_dma3_init_chan_config_for_memcpy(struct stm32_dma3_chan *chan, dma_addr_t dst, dma_addr_t src) { @@ -1150,8 +1201,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_memcpy(struct dma_cha struct stm32_dma3_swdesc *swdesc; size_t next_size, offset; u32 count, i, ctr1, ctr2; + bool prevent_refactor = !!FIELD_GET(STM32_DMA3_DT_NOPACK, chan->dt_config.tr_conf) || + !!FIELD_GET(STM32_DMA3_DT_NOREFACT, chan->dt_config.tr_conf); - count = DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE); + count = stm32_dma3_get_ll_count(chan, len, prevent_refactor); swdesc = stm32_dma3_chan_desc_alloc(chan, count); if (!swdesc) @@ -1167,6 +1220,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_memcpy(struct dma_cha remaining = len - offset; next_size = min_t(size_t, remaining, STM32_DMA3_MAX_BLOCK_SIZE); + if (!prevent_refactor && + (next_size < STM32_DMA3_MAX_BLOCK_SIZE && next_size >= chan->max_burst)) + next_size = chan->max_burst * (remaining / chan->max_burst); + ret = stm32_dma3_chan_prep_hw(chan, DMA_MEM_TO_MEM, &swdesc->ccr, &ctr1, &ctr2, src + offset, dst + offset, next_size); if (ret) @@ -1203,14 +1260,13 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan size_t len; dma_addr_t sg_addr, dev_addr, src, dst; u32 i, j, count, ctr1, ctr2; + bool prevent_refactor = !!FIELD_GET(STM32_DMA3_DT_NOPACK, chan->dt_config.tr_conf) || + !!FIELD_GET(STM32_DMA3_DT_NOREFACT, chan->dt_config.tr_conf); int ret; - count = sg_len; - for_each_sg(sgl, sg, sg_len, i) { - len = sg_dma_len(sg); - if (len > STM32_DMA3_MAX_BLOCK_SIZE) - count += DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE) - 1; - } + count = 0; + for_each_sg(sgl, sg, sg_len, i) + count += stm32_dma3_get_ll_count(chan, sg_dma_len(sg), prevent_refactor); swdesc = stm32_dma3_chan_desc_alloc(chan, count); if (!swdesc) @@ -1227,6 +1283,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan do { size_t chunk = min_t(size_t, len, STM32_DMA3_MAX_BLOCK_SIZE); + if (!prevent_refactor && + (chunk < STM32_DMA3_MAX_BLOCK_SIZE && chunk >= chan->max_burst)) + chunk = chan->max_burst * (len / chan->max_burst); + if (dir == DMA_MEM_TO_DEV) { src = sg_addr; dst = dev_addr; @@ -1259,6 +1319,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan } while (len); } + if (count != sg_len && chan->tcem != CTR2_TCEM_CHANNEL) + dev_warn(chan2dev(chan), "Linked-list refactored, %d items instead of %d\n", + count, sg_len); + /* Enable Error interrupts */ swdesc->ccr |= CCR_USEIE | CCR_ULEIE | CCR_DTEIE; /* Enable Transfer state interrupts */ @@ -1601,8 +1665,12 @@ static u32 stm32_dma3_check_rif(struct stm32_dma3_ddata *ddata) return chan_reserved; } +static struct stm32_dma3_pdata stm32mp25_pdata = { + .axi_max_burst_len = 16, +}; + static const struct of_device_id stm32_dma3_of_match[] = { - { .compatible = "st,stm32mp25-dma3", }, + { .compatible = "st,stm32mp25-dma3", .data = &stm32mp25_pdata, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, stm32_dma3_of_match); @@ -1610,6 +1678,7 @@ MODULE_DEVICE_TABLE(of, stm32_dma3_of_match); static int stm32_dma3_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; + const struct stm32_dma3_pdata *pdata; struct stm32_dma3_ddata *ddata; struct reset_control *reset; struct stm32_dma3_chan *chan; @@ -1704,6 +1773,16 @@ static int stm32_dma3_probe(struct platform_device *pdev) else /* Dual master ports */ ddata->ports_max_dw[1] = FIELD_GET(G_M1_DATA_WIDTH_ENC, hwcfgr); + /* axi_max_burst_len is optional, if not defined, use STM32_DMA3_MAX_BURST_LEN */ + ddata->axi_max_burst_len = STM32_DMA3_MAX_BURST_LEN; + pdata = device_get_match_data(&pdev->dev); + if (pdata && pdata->axi_max_burst_len) { + ddata->axi_max_burst_len = min_t(u32, pdata->axi_max_burst_len, + STM32_DMA3_MAX_BURST_LEN); + dev_dbg(&pdev->dev, "Burst is limited to %u beats through AXI port\n", + ddata->axi_max_burst_len); + } + ddata->chans = devm_kcalloc(&pdev->dev, ddata->dma_channels, sizeof(*ddata->chans), GFP_KERNEL); if (!ddata->chans) { @@ -1827,7 +1906,7 @@ static const struct dev_pm_ops stm32_dma3_pm_ops = { static struct platform_driver stm32_dma3_driver = { .probe = stm32_dma3_probe, - .remove_new = stm32_dma3_remove, + .remove = stm32_dma3_remove, .driver = { .name = "stm32-dma3", .of_match_table = stm32_dma3_of_match, diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index 2e7f9b07fdd2..f37cdf6f2179 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -1292,7 +1292,7 @@ MODULE_DEVICE_TABLE(of, sun4i_dma_match); static struct platform_driver sun4i_dma_driver = { .probe = sun4i_dma_probe, - .remove_new = sun4i_dma_remove, + .remove = sun4i_dma_remove, .driver = { .name = "sun4i-dma", .of_match_table = sun4i_dma_match, diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 583bf49031cf..95ecb12caaa5 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -1488,7 +1488,7 @@ static void sun6i_dma_remove(struct platform_device *pdev) static struct platform_driver sun6i_dma_driver = { .probe = sun6i_dma_probe, - .remove_new = sun6i_dma_remove, + .remove = sun6i_dma_remove, .driver = { .name = "sun6i-dma", .of_match_table = sun6i_dma_match, diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c index 3642508e88bb..cacf3757adc2 100644 --- a/drivers/dma/tegra186-gpc-dma.c +++ b/drivers/dma/tegra186-gpc-dma.c @@ -1532,7 +1532,7 @@ static struct platform_driver tegra_dma_driver = { .of_match_table = tegra_dma_of_match, }, .probe = tegra_dma_probe, - .remove_new = tegra_dma_remove, + .remove = tegra_dma_remove, }; module_platform_driver(tegra_dma_driver); diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 7d1acda2d72b..14a61e53a41b 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1675,7 +1675,7 @@ static struct platform_driver tegra_dmac_driver = { .of_match_table = tegra_dma_of_match, }, .probe = tegra_dma_probe, - .remove_new = tegra_dma_remove, + .remove = tegra_dma_remove, }; module_platform_driver(tegra_dmac_driver); diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 24ad7077c53b..2953008d42ef 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -1008,7 +1008,7 @@ static struct platform_driver tegra_admac_driver = { .of_match_table = tegra_adma_of_match, }, .probe = tegra_adma_probe, - .remove_new = tegra_adma_remove, + .remove = tegra_adma_remove, }; module_platform_driver(tegra_admac_driver); diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c index a8bb70c2d109..8d8c3d6038fc 100644 --- a/drivers/dma/ti/cppi41.c +++ b/drivers/dma/ti/cppi41.c @@ -1243,7 +1243,7 @@ static const struct dev_pm_ops cppi41_pm_ops = { static struct platform_driver cpp41_dma_driver = { .probe = cppi41_dma_probe, - .remove_new = cppi41_dma_remove, + .remove = cppi41_dma_remove, .driver = { .name = "cppi41-dma-engine", .pm = &cppi41_pm_ops, diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index 5f8d2e93ff3f..343e986e66e7 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -2636,7 +2636,7 @@ static const struct dev_pm_ops edma_pm_ops = { static struct platform_driver edma_driver = { .probe = edma_probe, - .remove_new = edma_remove, + .remove = edma_remove, .driver = { .name = "edma", .pm = &edma_pm_ops, diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c index 6ab9bfbdc480..8c023c6e623a 100644 --- a/drivers/dma/ti/omap-dma.c +++ b/drivers/dma/ti/omap-dma.c @@ -1915,7 +1915,7 @@ MODULE_DEVICE_TABLE(of, omap_dma_match); static struct platform_driver omap_dma_driver = { .probe = omap_dma_probe, - .remove_new = omap_dma_remove, + .remove = omap_dma_remove, .driver = { .name = "omap-dma-engine", .of_match_table = omap_dma_match, diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 7410025605e0..ecaf002558af 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -761,7 +761,7 @@ static struct platform_driver td_driver = { .name = DRIVER_NAME, }, .probe = td_probe, - .remove_new = td_remove, + .remove = td_remove, }; module_platform_driver(td_driver); diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 44ba377b4b5a..35d5221683b2 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -1260,14 +1260,14 @@ static const struct dev_pm_ops txx9dmac_dev_pm_ops = { }; static struct platform_driver txx9dmac_chan_driver = { - .remove_new = txx9dmac_chan_remove, + .remove = txx9dmac_chan_remove, .driver = { .name = "txx9dmac-chan", }, }; static struct platform_driver txx9dmac_driver = { - .remove_new = txx9dmac_remove, + .remove = txx9dmac_remove, .shutdown = txx9dmac_shutdown, .driver = { .name = "txx9dmac", diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c index ad7125f6e2ca..7a99f86ecb5a 100644 --- a/drivers/dma/uniphier-mdmac.c +++ b/drivers/dma/uniphier-mdmac.c @@ -493,7 +493,7 @@ MODULE_DEVICE_TABLE(of, uniphier_mdmac_match); static struct platform_driver uniphier_mdmac_driver = { .probe = uniphier_mdmac_probe, - .remove_new = uniphier_mdmac_remove, + .remove = uniphier_mdmac_remove, .driver = { .name = "uniphier-mio-dmac", .of_match_table = uniphier_mdmac_match, diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c index 3ce2dc2ad9de..ceeb6171c9d1 100644 --- a/drivers/dma/uniphier-xdmac.c +++ b/drivers/dma/uniphier-xdmac.c @@ -603,7 +603,7 @@ MODULE_DEVICE_TABLE(of, uniphier_xdmac_match); static struct platform_driver uniphier_xdmac_driver = { .probe = uniphier_xdmac_probe, - .remove_new = uniphier_xdmac_remove, + .remove = uniphier_xdmac_remove, .driver = { .name = "uniphier-xdmac", .of_match_table = uniphier_xdmac_match, diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 275848a9c450..f64624ea44ad 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -1815,7 +1815,7 @@ MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr); static struct platform_driver xgene_dma_driver = { .probe = xgene_dma_probe, - .remove_new = xgene_dma_remove, + .remove = xgene_dma_remove, .driver = { .name = "X-Gene-DMA", .of_match_table = xgene_dma_of_match_ptr, diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c index 718842fdaf98..93772abc3b49 100644 --- a/drivers/dma/xilinx/xdma.c +++ b/drivers/dma/xilinx/xdma.c @@ -1315,7 +1315,7 @@ static struct platform_driver xdma_driver = { }, .id_table = xdma_id_table, .probe = xdma_probe, - .remove_new = xdma_remove, + .remove = xdma_remove, }; module_platform_driver(xdma_driver); diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 5eb51ae93e89..1bdd57de87a6 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -3271,7 +3271,7 @@ static struct platform_driver xilinx_vdma_driver = { .of_match_table = xilinx_dma_of_ids, }, .probe = xilinx_dma_probe, - .remove_new = xilinx_dma_remove, + .remove = xilinx_dma_remove, }; module_platform_driver(xilinx_vdma_driver); diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c index be87764af9e8..ee5d9fdbfd7f 100644 --- a/drivers/dma/xilinx/xilinx_dpdma.c +++ b/drivers/dma/xilinx/xilinx_dpdma.c @@ -1863,7 +1863,7 @@ MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match); static struct platform_driver xilinx_dpdma_driver = { .probe = xilinx_dpdma_probe, - .remove_new = xilinx_dpdma_remove, + .remove = xilinx_dpdma_remove, .driver = { .name = "xilinx-zynqmp-dpdma", .of_match_table = xilinx_dpdma_of_match, diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 9ae46f1198fe..d05fc5fcc77d 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -366,7 +366,7 @@ static void zynqmp_dma_init(struct zynqmp_dma_chan *chan) } writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); - /* Clearing the interrupt account rgisters */ + /* Clearing the interrupt account registers */ val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); @@ -1192,7 +1192,7 @@ static struct platform_driver zynqmp_dma_driver = { .pm = &zynqmp_dma_dev_pm_ops, }, .probe = zynqmp_dma_probe, - .remove_new = zynqmp_dma_remove, + .remove = zynqmp_dma_remove, }; module_platform_driver(zynqmp_dma_driver); diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c index 039fef26546e..73e660c5e38a 100644 --- a/drivers/gpio/gpio-altera.c +++ b/drivers/gpio/gpio-altera.c @@ -261,6 +261,11 @@ static int altera_gpio_probe(struct platform_device *pdev) altera_gc->gc.set = altera_gpio_set; altera_gc->gc.owner = THIS_MODULE; altera_gc->gc.parent = &pdev->dev; + altera_gc->gc.base = -1; + + altera_gc->gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", dev_fwnode(dev)); + if (!altera_gc->gc.label) + return -ENOMEM; altera_gc->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(altera_gc->regs)) diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c index 5170fe7599cd..d5909a4f0433 100644 --- a/drivers/gpio/gpio-exar.c +++ b/drivers/gpio/gpio-exar.c @@ -99,11 +99,13 @@ static void exar_set_value(struct gpio_chip *chip, unsigned int offset, struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip); unsigned int addr = exar_offset_to_lvl_addr(exar_gpio, offset); unsigned int bit = exar_offset_to_bit(exar_gpio, offset); + unsigned int bit_value = value ? BIT(bit) : 0; - if (value) - regmap_set_bits(exar_gpio->regmap, addr, BIT(bit)); - else - regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit)); + /* + * regmap_write_bits() forces value to be written when an external + * pull up/down might otherwise indicate value was already set. + */ + regmap_write_bits(exar_gpio->regmap, addr, BIT(bit), bit_value); } static int exar_direction_output(struct gpio_chip *chip, unsigned int offset, diff --git a/drivers/gpio/gpio-mpsse.c b/drivers/gpio/gpio-mpsse.c index 9ef24449126a..3ea32c5e33d1 100644 --- a/drivers/gpio/gpio-mpsse.c +++ b/drivers/gpio/gpio-mpsse.c @@ -403,7 +403,7 @@ static void gpio_mpsse_ida_remove(void *data) { struct mpsse_priv *priv = data; - ida_simple_remove(&gpio_mpsse_ida, priv->id); + ida_free(&gpio_mpsse_ida, priv->id); } static int gpio_mpsse_probe(struct usb_interface *interface, @@ -422,7 +422,7 @@ static int gpio_mpsse_probe(struct usb_interface *interface, priv->intf = interface; priv->intf_id = interface->cur_altsetting->desc.bInterfaceNumber; - priv->id = ida_simple_get(&gpio_mpsse_ida, 0, 0, GFP_KERNEL); + priv->id = ida_alloc(&gpio_mpsse_ida, GFP_KERNEL); if (priv->id < 0) return priv->id; diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c index 2de61337ad3b..d7230fd83f5d 100644 --- a/drivers/gpio/gpio-zevio.c +++ b/drivers/gpio/gpio-zevio.c @@ -11,6 +11,7 @@ #include <linux/io.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/slab.h> #include <linux/spinlock.h> @@ -169,6 +170,7 @@ static const struct gpio_chip zevio_gpio_chip = { /* Initialization */ static int zevio_gpio_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct zevio_gpio *controller; int status, i; @@ -180,6 +182,10 @@ static int zevio_gpio_probe(struct platform_device *pdev) controller->chip = zevio_gpio_chip; controller->chip.parent = &pdev->dev; + controller->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", dev_fwnode(dev)); + if (!controller->chip.label) + return -ENOMEM; + controller->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(controller->regs)) return dev_err_probe(&pdev->dev, PTR_ERR(controller->regs), diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 5ce8e6504ba7..3f691e1fd22c 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2407,9 +2407,8 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type) } static struct iommu_domain * -amd_iommu_domain_alloc_user(struct device *dev, u32 flags, - struct iommu_domain *parent, - const struct iommu_user_data *user_data) +amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags, + const struct iommu_user_data *user_data) { unsigned int type = IOMMU_DOMAIN_UNMANAGED; @@ -2420,7 +2419,7 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags, if (dev) iommu = get_amd_iommu_from_dev(dev); - if ((flags & ~supported_flags) || parent || user_data) + if ((flags & ~supported_flags) || user_data) return ERR_PTR(-EOPNOTSUPP); /* Allocate domain with v2 page table if IOMMU supports PASID. */ @@ -2884,7 +2883,7 @@ const struct iommu_ops amd_iommu_ops = { .release_domain = &release_domain, .identity_domain = &identity_domain.domain, .domain_alloc = amd_iommu_domain_alloc, - .domain_alloc_user = amd_iommu_domain_alloc_user, + .domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags, .domain_alloc_sva = amd_iommu_domain_alloc_sva, .probe_device = amd_iommu_probe_device, .release_device = amd_iommu_release_device, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 04630dbfedd9..e4ebd9e12ad4 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3132,9 +3132,8 @@ static struct iommu_domain arm_smmu_blocked_domain = { }; static struct iommu_domain * -arm_smmu_domain_alloc_user(struct device *dev, u32 flags, - struct iommu_domain *parent, - const struct iommu_user_data *user_data) +arm_smmu_domain_alloc_paging_flags(struct device *dev, u32 flags, + const struct iommu_user_data *user_data) { struct arm_smmu_master *master = dev_iommu_priv_get(dev); const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | @@ -3145,7 +3144,7 @@ arm_smmu_domain_alloc_user(struct device *dev, u32 flags, if (flags & ~PAGING_FLAGS) return ERR_PTR(-EOPNOTSUPP); - if (parent || user_data) + if (user_data) return ERR_PTR(-EOPNOTSUPP); if (flags & IOMMU_HWPT_ALLOC_PASID) @@ -3546,7 +3545,7 @@ static struct iommu_ops arm_smmu_ops = { .hw_info = arm_smmu_hw_info, .domain_alloc_paging = arm_smmu_domain_alloc_paging, .domain_alloc_sva = arm_smmu_sva_domain_alloc, - .domain_alloc_user = arm_smmu_domain_alloc_user, + .domain_alloc_paging_flags = arm_smmu_domain_alloc_paging_flags, .probe_device = arm_smmu_probe_device, .release_device = arm_smmu_release_device, .device_group = arm_smmu_device_group, diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 527f6f89d8a1..7d0acb74d5a5 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3328,9 +3328,8 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st } static struct iommu_domain * -intel_iommu_domain_alloc_user(struct device *dev, u32 flags, - struct iommu_domain *parent, - const struct iommu_user_data *user_data) +intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags, + const struct iommu_user_data *user_data) { struct device_domain_info *info = dev_iommu_priv_get(dev); bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; @@ -3340,13 +3339,6 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags, struct iommu_domain *domain; bool first_stage; - /* Must be NESTING domain */ - if (parent) { - if (!nested_supported(iommu) || flags) - return ERR_PTR(-EOPNOTSUPP); - return intel_nested_domain_alloc(parent, user_data); - } - if (flags & (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING | IOMMU_HWPT_FAULT_ID_VALID))) @@ -4472,9 +4464,10 @@ const struct iommu_ops intel_iommu_ops = { .identity_domain = &identity_domain, .capable = intel_iommu_capable, .hw_info = intel_iommu_hw_info, - .domain_alloc_user = intel_iommu_domain_alloc_user, + .domain_alloc_paging_flags = intel_iommu_domain_alloc_paging_flags, .domain_alloc_sva = intel_svm_domain_alloc, .domain_alloc_paging = intel_iommu_domain_alloc_paging, + .domain_alloc_nested = intel_iommu_domain_alloc_nested, .probe_device = intel_iommu_probe_device, .release_device = intel_iommu_release_device, .get_resv_regions = intel_iommu_get_resv_regions, diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index 2cca094c259d..6ea7bbe26b19 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -1265,8 +1265,10 @@ int __domain_setup_first_level(struct intel_iommu *iommu, int dmar_ir_support(void); void iommu_flush_write_buffer(struct intel_iommu *iommu); -struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent, - const struct iommu_user_data *user_data); +struct iommu_domain * +intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent, + u32 flags, + const struct iommu_user_data *user_data); struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid); enum cache_tag_type { diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c index 42c4533a6ea2..aba92c00b427 100644 --- a/drivers/iommu/intel/nested.c +++ b/drivers/iommu/intel/nested.c @@ -186,14 +186,21 @@ static const struct iommu_domain_ops intel_nested_domain_ops = { .cache_invalidate_user = intel_nested_cache_invalidate_user, }; -struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent, - const struct iommu_user_data *user_data) +struct iommu_domain * +intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent, + u32 flags, + const struct iommu_user_data *user_data) { + struct device_domain_info *info = dev_iommu_priv_get(dev); struct dmar_domain *s2_domain = to_dmar_domain(parent); + struct intel_iommu *iommu = info->iommu; struct iommu_hwpt_vtd_s1 vtd; struct dmar_domain *domain; int ret; + if (!nested_supported(iommu) || flags) + return ERR_PTR(-EOPNOTSUPP); + /* Must be nested domain */ if (user_data->type != IOMMU_HWPT_DATA_VTD_S1) return ERR_PTR(-EOPNOTSUPP); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 7618e9c65d3f..9bc0c74cca3c 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1987,8 +1987,8 @@ __iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type, if (ops->domain_alloc_paging && !flags) domain = ops->domain_alloc_paging(dev); - else if (ops->domain_alloc_user) - domain = ops->domain_alloc_user(dev, flags, NULL, NULL); + else if (ops->domain_alloc_paging_flags) + domain = ops->domain_alloc_paging_flags(dev, flags, NULL); else if (ops->domain_alloc && !flags) domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); else diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index 9236e8ca9aa8..ce03c3804651 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -119,7 +119,7 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, lockdep_assert_held(&ioas->mutex); - if ((flags || user_data) && !ops->domain_alloc_user) + if ((flags || user_data) && !ops->domain_alloc_paging_flags) return ERR_PTR(-EOPNOTSUPP); if (flags & ~valid_flags) return ERR_PTR(-EOPNOTSUPP); @@ -139,9 +139,9 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, hwpt_paging->ioas = ioas; hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT; - if (ops->domain_alloc_user) { - hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL, - user_data); + if (ops->domain_alloc_paging_flags) { + hwpt->domain = ops->domain_alloc_paging_flags(idev->dev, flags, + user_data); if (IS_ERR(hwpt->domain)) { rc = PTR_ERR(hwpt->domain); hwpt->domain = NULL; @@ -227,7 +227,7 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, int rc; if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) || - !user_data->len || !ops->domain_alloc_user) + !user_data->len || !ops->domain_alloc_nested) return ERR_PTR(-EOPNOTSUPP); if (parent->auto_domain || !parent->nest_parent || parent->common.domain->owner != ops) @@ -242,9 +242,9 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, refcount_inc(&parent->common.obj.users); hwpt_nested->parent = parent; - hwpt->domain = ops->domain_alloc_user(idev->dev, - flags & ~IOMMU_HWPT_FAULT_ID_VALID, - parent->common.domain, user_data); + hwpt->domain = ops->domain_alloc_nested( + idev->dev, parent->common.domain, + flags & ~IOMMU_HWPT_FAULT_ID_VALID, user_data); if (IS_ERR(hwpt->domain)) { rc = PTR_ERR(hwpt->domain); hwpt->domain = NULL; diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 2f9de177dffc..a0de6d6d4e68 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -356,8 +356,8 @@ __mock_domain_alloc_nested(const struct iommu_user_data *user_data) } static struct iommu_domain * -mock_domain_alloc_nested(struct iommu_domain *parent, u32 flags, - const struct iommu_user_data *user_data) +mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent, + u32 flags, const struct iommu_user_data *user_data) { struct mock_iommu_domain_nested *mock_nested; struct mock_iommu_domain *mock_parent; @@ -379,9 +379,8 @@ mock_domain_alloc_nested(struct iommu_domain *parent, u32 flags, } static struct iommu_domain * -mock_domain_alloc_user(struct device *dev, u32 flags, - struct iommu_domain *parent, - const struct iommu_user_data *user_data) +mock_domain_alloc_paging_flags(struct device *dev, u32 flags, + const struct iommu_user_data *user_data) { bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | @@ -390,9 +389,6 @@ mock_domain_alloc_user(struct device *dev, u32 flags, MOCK_FLAGS_DEVICE_NO_DIRTY; struct iommu_domain *domain; - if (parent) - return mock_domain_alloc_nested(parent, flags, user_data); - if (user_data) return ERR_PTR(-EOPNOTSUPP); if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops)) @@ -718,7 +714,8 @@ static const struct iommu_ops mock_ops = { .pgsize_bitmap = MOCK_IO_PAGE_SIZE, .hw_info = mock_domain_hw_info, .domain_alloc_paging = mock_domain_alloc_paging, - .domain_alloc_user = mock_domain_alloc_user, + .domain_alloc_paging_flags = mock_domain_alloc_paging_flags, + .domain_alloc_nested = mock_domain_alloc_nested, .capable = mock_domain_capable, .device_group = generic_device_group, .probe_device = mock_probe_device, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b826d0255ecf..4ec4934a4edd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4661,7 +4661,7 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) struct net_device *dev = bp->dev; if (page_mode) { - bp->flags &= ~BNXT_FLAG_AGG_RINGS; + bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS); bp->flags |= BNXT_FLAG_RX_PAGE_MODE; if (bp->xdp_prog->aux->xdp_has_frags) @@ -9299,7 +9299,6 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) struct hwrm_port_mac_ptp_qcfg_output *resp; struct hwrm_port_mac_ptp_qcfg_input *req; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; - bool phc_cfg; u8 flags; int rc; @@ -9346,8 +9345,9 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) rc = -ENODEV; goto exit; } - phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; - rc = bnxt_ptp_init(bp, phc_cfg); + ptp->rtc_configured = + (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; + rc = bnxt_ptp_init(bp); if (rc) netdev_warn(bp->dev, "PTP initialization failed.\n"); exit: @@ -14746,6 +14746,14 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) bnxt_close_nic(bp, true, false); WRITE_ONCE(dev->mtu, new_mtu); + + /* MTU change may change the AGG ring settings if an XDP multi-buffer + * program is attached. We need to set the AGG rings settings and + * rx_skb_func accordingly. + */ + if (READ_ONCE(bp->xdp_prog)) + bnxt_set_rx_skb_mode(bp, true); + bnxt_set_ring_params(bp); if (netif_running(dev)) @@ -15483,6 +15491,13 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) { vnic = &bp->vnic_info[i]; + + rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", + vnic->vnic_id, rc); + return rc; + } vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; bnxt_hwrm_vnic_update(bp, vnic, VNIC_UPDATE_REQ_ENABLES_MRU_VALID); @@ -16236,6 +16251,7 @@ static void bnxt_shutdown(struct pci_dev *pdev) if (netif_running(dev)) dev_close(dev); + bnxt_ptp_clear(bp); bnxt_clear_int_mode(bp); pci_disable_device(pdev); @@ -16263,6 +16279,7 @@ static int bnxt_suspend(struct device *device) rc = bnxt_close(dev); } bnxt_hwrm_func_drv_unrgtr(bp); + bnxt_ptp_clear(bp); pci_disable_device(bp->pdev); bnxt_free_ctx_mem(bp, false); rtnl_unlock(); @@ -16306,6 +16323,10 @@ static int bnxt_resume(struct device *device) if (bp->fw_crash_mem) bnxt_hwrm_crash_dump_mem_cfg(bp); + if (bnxt_ptp_init(bp)) { + kfree(bp->ptp_cfg); + bp->ptp_cfg = NULL; + } bnxt_get_wol_settings(bp); if (netif_running(dev)) { rc = bnxt_open(dev); @@ -16484,8 +16505,12 @@ static void bnxt_io_resume(struct pci_dev *pdev) rtnl_lock(); err = bnxt_hwrm_func_qcaps(bp); - if (!err && netif_running(netdev)) - err = bnxt_open(netdev); + if (!err) { + if (netif_running(netdev)) + err = bnxt_open(netdev); + else + err = bnxt_reserve_rings(bp, true); + } if (!err) netif_device_attach(netdev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 2f4987ec7464..f1f6bb328a55 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2837,19 +2837,24 @@ static int bnxt_get_link_ksettings(struct net_device *dev, } base->port = PORT_NONE; - if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { + if (media == BNXT_MEDIA_TP) { base->port = PORT_TP; linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, lk_ksettings->link_modes.supported); linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, lk_ksettings->link_modes.advertising); + } else if (media == BNXT_MEDIA_KR) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, + lk_ksettings->link_modes.supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, + lk_ksettings->link_modes.advertising); } else { linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, lk_ksettings->link_modes.supported); linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, lk_ksettings->link_modes.advertising); - if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) + if (media == BNXT_MEDIA_CR) base->port = PORT_DA; else base->port = PORT_FIBRE; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 075ccd589845..2d4e19b96ee7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -1038,7 +1038,7 @@ static void bnxt_ptp_free(struct bnxt *bp) } } -int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) +int bnxt_ptp_init(struct bnxt *bp) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; int rc; @@ -1061,7 +1061,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) if (BNXT_PTP_USE_RTC(bp)) { bnxt_ptp_timecounter_init(bp, false); - rc = bnxt_ptp_init_rtc(bp, phc_cfg); + rc = bnxt_ptp_init_rtc(bp, ptp->rtc_configured); if (rc) goto out; } else { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index c7851f8c971c..a95f05e9c579 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -135,6 +135,7 @@ struct bnxt_ptp_cfg { BNXT_PTP_MSG_PDELAY_REQ | \ BNXT_PTP_MSG_PDELAY_RESP) u8 tx_tstamp_en:1; + u8 rtc_configured:1; int rx_filter; u32 tstamp_filters; @@ -168,7 +169,7 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi, struct tx_ts_cmp *tscmp); void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns); int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg); -int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg); +int bnxt_ptp_init(struct bnxt *bp); void bnxt_ptp_clear(struct bnxt *bp); static inline u64 bnxt_timecounter_cyc2time(struct bnxt_ptp_cfg *ptp, u64 ts) { diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 01dfec115942..9cc8db10a8d6 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -17839,6 +17839,9 @@ static int tg3_init_one(struct pci_dev *pdev, } else persist_dma_mask = dma_mask = DMA_BIT_MASK(64); + if (tg3_asic_rev(tp) == ASIC_REV_57766) + persist_dma_mask = DMA_BIT_MASK(31); + /* Configure DMA attributes. */ if (dma_mask > DMA_BIT_MASK(32)) { err = dma_set_mask(&pdev->dev, dma_mask); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 27935c54b91b..8216f843a7cd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -112,6 +112,11 @@ struct mac_ops *get_mac_ops(void *cgxd) return ((struct cgx *)cgxd)->mac_ops; } +u32 cgx_get_fifo_len(void *cgxd) +{ + return ((struct cgx *)cgxd)->fifo_len; +} + void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) { writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + @@ -209,6 +214,24 @@ u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id) return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT; } +static u8 cgx_get_nix_resetbit(struct cgx *cgx) +{ + int first_lmac; + u8 p2x; + + /* non 98XX silicons supports only NIX0 block */ + if (cgx->pdev->subsystem_device != PCI_SUBSYS_DEVID_98XX) + return CGX_NIX0_RESET; + + first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); + p2x = cgx_lmac_get_p2x(cgx->cgx_id, first_lmac); + + if (p2x == CMR_P2X_SEL_NIX1) + return CGX_NIX1_RESET; + else + return CGX_NIX0_RESET; +} + /* Ensure the required lock for event queue(where asynchronous events are * posted) is acquired before calling this API. Else an asynchronous event(with * latest link status) can reach the destination before this function returns @@ -501,7 +524,7 @@ static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id) u8 num_lmacs; u32 fifo_len; - fifo_len = cgx->mac_ops->fifo_len; + fifo_len = cgx->fifo_len; num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx); switch (num_lmacs) { @@ -1719,6 +1742,8 @@ static int cgx_lmac_init(struct cgx *cgx) lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id); } + /* Start X2P reset on given MAC block */ + cgx->mac_ops->mac_x2p_reset(cgx, true); return cgx_lmac_verify_fwi_version(cgx); err_bitmap_free: @@ -1764,7 +1789,7 @@ static void cgx_populate_features(struct cgx *cgx) u64 cfg; cfg = cgx_read(cgx, 0, CGX_CONST); - cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); + cgx->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg); if (is_dev_rpm(cgx)) @@ -1784,6 +1809,45 @@ static u8 cgx_get_rxid_mapoffset(struct cgx *cgx) return 0x60; } +static void cgx_x2p_reset(void *cgxd, bool enable) +{ + struct cgx *cgx = cgxd; + int lmac_id; + u64 cfg; + + if (enable) { + for_each_set_bit(lmac_id, &cgx->lmac_bmap, cgx->max_lmac_per_mac) + cgx->mac_ops->mac_enadis_rx(cgx, lmac_id, false); + + usleep_range(1000, 2000); + + cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG); + cfg |= cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP; + cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg); + } else { + cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG); + cfg &= ~(cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP); + cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg); + } +} + +static int cgx_enadis_rx(void *cgxd, int lmac_id, bool enable) +{ + struct cgx *cgx = cgxd; + u64 cfg; + + if (!is_lmac_valid(cgx, lmac_id)) + return -ENODEV; + + cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); + if (enable) + cfg |= DATA_PKT_RX_EN; + else + cfg &= ~DATA_PKT_RX_EN; + cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); + return 0; +} + static struct mac_ops cgx_mac_ops = { .name = "cgx", .csr_offset = 0, @@ -1815,6 +1879,8 @@ static struct mac_ops cgx_mac_ops = { .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg, .mac_reset = cgx_lmac_reset, .mac_stats_reset = cgx_stats_reset, + .mac_x2p_reset = cgx_x2p_reset, + .mac_enadis_rx = cgx_enadis_rx, }; static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index dc9ace30554a..1cf12e5c7da8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -32,6 +32,10 @@ #define CGX_LMAC_TYPE_MASK 0xF #define CGXX_CMRX_INT 0x040 #define FW_CGX_INT BIT_ULL(1) +#define CGXX_CMR_GLOBAL_CONFIG 0x08 +#define CGX_NIX0_RESET BIT_ULL(2) +#define CGX_NIX1_RESET BIT_ULL(3) +#define CGX_NSCI_DROP BIT_ULL(9) #define CGXX_CMRX_INT_ENA_W1S 0x058 #define CGXX_CMRX_RX_ID_MAP 0x060 #define CGXX_CMRX_RX_STAT0 0x070 @@ -185,4 +189,5 @@ int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause, int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause, int pfvf_idx); int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr); +u32 cgx_get_fifo_len(void *cgxd); #endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index 9ffc6790c513..6180e68e1765 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -72,7 +72,6 @@ struct mac_ops { u8 irq_offset; u8 int_ena_bit; u8 lmac_fwi; - u32 fifo_len; bool non_contiguous_serdes_lane; /* RPM & CGX differs in number of Receive/transmit stats */ u8 rx_stats_cnt; @@ -133,6 +132,8 @@ struct mac_ops { int (*get_fec_stats)(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp); int (*mac_stats_reset)(void *cgxd, int lmac_id); + void (*mac_x2p_reset)(void *cgxd, bool enable); + int (*mac_enadis_rx)(void *cgxd, int lmac_id, bool enable); }; struct cgx { @@ -142,6 +143,10 @@ struct cgx { u8 lmac_count; /* number of LMACs per MAC could be 4 or 8 */ u8 max_lmac_per_mac; + /* length of fifo varies depending on the number + * of LMACS + */ + u32 fifo_len; #define MAX_LMAC_COUNT 8 struct lmac *lmac_idmap[MAX_LMAC_COUNT]; struct work_struct cgx_cmd_work; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index 1b34cf9c9703..2e9945446199 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -39,6 +39,8 @@ static struct mac_ops rpm_mac_ops = { .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg, .mac_reset = rpm_lmac_reset, .mac_stats_reset = rpm_stats_reset, + .mac_x2p_reset = rpm_x2p_reset, + .mac_enadis_rx = rpm_enadis_rx, }; static struct mac_ops rpm2_mac_ops = { @@ -72,6 +74,8 @@ static struct mac_ops rpm2_mac_ops = { .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg, .mac_reset = rpm_lmac_reset, .mac_stats_reset = rpm_stats_reset, + .mac_x2p_reset = rpm_x2p_reset, + .mac_enadis_rx = rpm_enadis_rx, }; bool is_dev_rpm2(void *rpmd) @@ -467,7 +471,7 @@ u8 rpm_get_lmac_type(void *rpmd, int lmac_id) int err; req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req); - err = cgx_fwi_cmd_generic(req, &resp, rpm, 0); + err = cgx_fwi_cmd_generic(req, &resp, rpm, lmac_id); if (!err) return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp); return err; @@ -480,7 +484,7 @@ u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id) u8 num_lmacs; u32 fifo_len; - fifo_len = rpm->mac_ops->fifo_len; + fifo_len = rpm->fifo_len; num_lmacs = rpm->mac_ops->get_nr_lmacs(rpm); switch (num_lmacs) { @@ -533,9 +537,9 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id) */ max_lmac = (rpm_read(rpm, 0, CGX_CONST) >> 24) & 0xFF; if (max_lmac > 4) - fifo_len = rpm->mac_ops->fifo_len / 2; + fifo_len = rpm->fifo_len / 2; else - fifo_len = rpm->mac_ops->fifo_len; + fifo_len = rpm->fifo_len; if (lmac_id < 4) { num_lmacs = hweight8(lmac_info & 0xF); @@ -699,46 +703,51 @@ int rpm_get_fec_stats(void *rpmd, int lmac_id, struct cgx_fec_stats_rsp *rsp) if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE) return 0; + /* latched registers FCFECX_CW_HI/RSFEC_STAT_FAST_DATA_HI_CDC are common + * for all counters. Acquire lock to ensure serialized reads + */ + mutex_lock(&rpm->lock); if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) { - val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_CCW_LO); - val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI); + val_lo = rpm_read(rpm, 0, RPMX_MTI_FCFECX_VL0_CCW_LO(lmac_id)); + val_hi = rpm_read(rpm, 0, RPMX_MTI_FCFECX_CW_HI(lmac_id)); rsp->fec_corr_blks = (val_hi << 16 | val_lo); - val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_NCCW_LO); - val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI); + val_lo = rpm_read(rpm, 0, RPMX_MTI_FCFECX_VL0_NCCW_LO(lmac_id)); + val_hi = rpm_read(rpm, 0, RPMX_MTI_FCFECX_CW_HI(lmac_id)); rsp->fec_uncorr_blks = (val_hi << 16 | val_lo); /* 50G uses 2 Physical serdes lines */ if (rpm->lmac_idmap[lmac_id]->link_info.lmac_type_id == LMAC_MODE_50G_R) { - val_lo = rpm_read(rpm, lmac_id, - RPMX_MTI_FCFECX_VL1_CCW_LO); - val_hi = rpm_read(rpm, lmac_id, - RPMX_MTI_FCFECX_CW_HI); + val_lo = rpm_read(rpm, 0, + RPMX_MTI_FCFECX_VL1_CCW_LO(lmac_id)); + val_hi = rpm_read(rpm, 0, + RPMX_MTI_FCFECX_CW_HI(lmac_id)); rsp->fec_corr_blks += (val_hi << 16 | val_lo); - val_lo = rpm_read(rpm, lmac_id, - RPMX_MTI_FCFECX_VL1_NCCW_LO); - val_hi = rpm_read(rpm, lmac_id, - RPMX_MTI_FCFECX_CW_HI); + val_lo = rpm_read(rpm, 0, + RPMX_MTI_FCFECX_VL1_NCCW_LO(lmac_id)); + val_hi = rpm_read(rpm, 0, + RPMX_MTI_FCFECX_CW_HI(lmac_id)); rsp->fec_uncorr_blks += (val_hi << 16 | val_lo); } } else { /* enable RS-FEC capture */ - cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL); + cfg = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_STATN_CONTROL); cfg |= RPMX_RSFEC_RX_CAPTURE | BIT(lmac_id); - rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg); + rpm_write(rpm, 0, RPMX_MTI_RSFEC_STAT_STATN_CONTROL, cfg); val_lo = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2); - val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); + val_hi = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC); rsp->fec_corr_blks = (val_hi << 32 | val_lo); val_lo = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3); - val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); + val_hi = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC); rsp->fec_uncorr_blks = (val_hi << 32 | val_lo); } + mutex_unlock(&rpm->lock); return 0; } @@ -763,3 +772,41 @@ int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr) return 0; } + +void rpm_x2p_reset(void *rpmd, bool enable) +{ + rpm_t *rpm = rpmd; + int lmac_id; + u64 cfg; + + if (enable) { + for_each_set_bit(lmac_id, &rpm->lmac_bmap, rpm->max_lmac_per_mac) + rpm->mac_ops->mac_enadis_rx(rpm, lmac_id, false); + + usleep_range(1000, 2000); + + cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG); + rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg | RPM_NIX0_RESET); + } else { + cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG); + cfg &= ~RPM_NIX0_RESET; + rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg); + } +} + +int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable) +{ + rpm_t *rpm = rpmd; + u64 cfg; + + if (!is_lmac_valid(rpm, lmac_id)) + return -ENODEV; + + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + if (enable) + cfg |= RPM_RX_EN; + else + cfg &= ~RPM_RX_EN; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h index 34b11deb0f3c..b8d3972e096a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h @@ -17,6 +17,8 @@ /* Registers */ #define RPMX_CMRX_CFG 0x00 +#define RPMX_CMR_GLOBAL_CFG 0x08 +#define RPM_NIX0_RESET BIT_ULL(3) #define RPMX_RX_TS_PREPEND BIT_ULL(22) #define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17) #define RPMX_CMRX_RX_ID_MAP 0x80 @@ -84,16 +86,18 @@ /* FEC stats */ #define RPMX_MTI_STAT_STATN_CONTROL 0x10018 #define RPMX_MTI_STAT_DATA_HI_CDC 0x10038 -#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(27) +#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(28) #define RPMX_CMD_CLEAR_RX BIT_ULL(30) #define RPMX_CMD_CLEAR_TX BIT_ULL(31) +#define RPMX_MTI_RSFEC_STAT_STATN_CONTROL 0x40018 +#define RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC 0x40000 #define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2 0x40050 #define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3 0x40058 -#define RPMX_MTI_FCFECX_VL0_CCW_LO 0x38618 -#define RPMX_MTI_FCFECX_VL0_NCCW_LO 0x38620 -#define RPMX_MTI_FCFECX_VL1_CCW_LO 0x38628 -#define RPMX_MTI_FCFECX_VL1_NCCW_LO 0x38630 -#define RPMX_MTI_FCFECX_CW_HI 0x38638 +#define RPMX_MTI_FCFECX_VL0_CCW_LO(a) (0x38618 + ((a) * 0x40)) +#define RPMX_MTI_FCFECX_VL0_NCCW_LO(a) (0x38620 + ((a) * 0x40)) +#define RPMX_MTI_FCFECX_VL1_CCW_LO(a) (0x38628 + ((a) * 0x40)) +#define RPMX_MTI_FCFECX_VL1_NCCW_LO(a) (0x38630 + ((a) * 0x40)) +#define RPMX_MTI_FCFECX_CW_HI(a) (0x38638 + ((a) * 0x40)) /* CN10KB CSR Declaration */ #define RPM2_CMRX_SW_INT 0x1b0 @@ -137,4 +141,6 @@ bool is_dev_rpm2(void *rpmd); int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp); int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr); int rpm_stats_reset(void *rpmd, int lmac_id); +void rpm_x2p_reset(void *rpmd, bool enable); +int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable); #endif /* RPM_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 1a97fb9032fa..cd0d7b7774f1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1162,6 +1162,7 @@ cpt: } rvu_program_channels(rvu); + cgx_start_linkup(rvu); err = rvu_mcs_init(rvu); if (err) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index b897845e25fd..a383b5ef5b2d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -1025,6 +1025,7 @@ int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_ int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause); void rvu_mac_reset(struct rvu *rvu, u16 pcifunc); u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac); +void cgx_start_linkup(struct rvu *rvu); int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, int type); bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 266ecbc1b97a..992fa0b82e8d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -349,6 +349,7 @@ static void rvu_cgx_wq_destroy(struct rvu *rvu) int rvu_cgx_init(struct rvu *rvu) { + struct mac_ops *mac_ops; int cgx, err; void *cgxd; @@ -375,6 +376,15 @@ int rvu_cgx_init(struct rvu *rvu) if (err) return err; + /* Clear X2P reset on all MAC blocks */ + for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; + mac_ops = get_mac_ops(cgxd); + mac_ops->mac_x2p_reset(cgxd, false); + } + /* Register for CGX events */ err = cgx_lmac_event_handler_init(rvu); if (err) @@ -382,10 +392,26 @@ int rvu_cgx_init(struct rvu *rvu) mutex_init(&rvu->cgx_cfg_lock); - /* Ensure event handler registration is completed, before - * we turn on the links - */ - mb(); + return 0; +} + +void cgx_start_linkup(struct rvu *rvu) +{ + unsigned long lmac_bmap; + struct mac_ops *mac_ops; + int cgx, lmac, err; + void *cgxd; + + /* Enable receive on all LMACS */ + for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; + mac_ops = get_mac_ops(cgxd); + lmac_bmap = cgx_get_lmac_bmap(cgxd); + for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) + mac_ops->mac_enadis_rx(cgxd, lmac, true); + } /* Do link up for all CGX ports */ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { @@ -398,8 +424,6 @@ int rvu_cgx_init(struct rvu *rvu) "Link up process failed to start on cgx %d\n", cgx); } - - return 0; } int rvu_cgx_exit(struct rvu *rvu) @@ -923,13 +947,12 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, u32 rvu_cgx_get_fifolen(struct rvu *rvu) { - struct mac_ops *mac_ops; - u32 fifo_len; + void *cgxd = rvu_first_cgx_pdata(rvu); - mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); - fifo_len = mac_ops ? mac_ops->fifo_len : 0; + if (!cgxd) + return 0; - return fifo_len; + return cgx_get_fifo_len(cgxd); } u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac) diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index fe38426ec42d..2bf426cea6dd 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1394,18 +1394,15 @@ static int pxa168_eth_probe(struct platform_device *pdev) printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); - clk = devm_clk_get(&pdev->dev, NULL); + clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) { - dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n"); + dev_err(&pdev->dev, "Fast Ethernet failed to get and enable clock\n"); return -ENODEV; } - clk_prepare_enable(clk); dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); - if (!dev) { - err = -ENOMEM; - goto err_clk; - } + if (!dev) + return -ENOMEM; platform_set_drvdata(pdev, dev); pep = netdev_priv(dev); @@ -1523,8 +1520,6 @@ err_free_mdio: mdiobus_free(pep->smi_bus); err_netdev: free_netdev(dev); -err_clk: - clk_disable_unprepare(clk); return err; } @@ -1542,7 +1537,6 @@ static void pxa168_eth_remove(struct platform_device *pdev) if (dev->phydev) phy_disconnect(dev->phydev); - clk_disable_unprepare(pep->clk); mdiobus_unregister(pep->smi_bus); mdiobus_free(pep->smi_bus); unregister_netdev(dev); diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c index 7251121ab196..16eb3de60eb6 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c @@ -366,12 +366,13 @@ static void vcap_api_iterator_init_test(struct kunit *test) struct vcap_typegroup typegroups[] = { { .offset = 0, .width = 2, .value = 2, }, { .offset = 156, .width = 1, .value = 0, }, - { .offset = 0, .width = 0, .value = 0, }, + { } }; struct vcap_typegroup typegroups2[] = { { .offset = 0, .width = 3, .value = 4, }, { .offset = 49, .width = 2, .value = 0, }, { .offset = 98, .width = 2, .value = 0, }, + { } }; vcap_iter_init(&iter, 52, typegroups, 86); @@ -399,6 +400,7 @@ static void vcap_api_iterator_next_test(struct kunit *test) { .offset = 147, .width = 3, .value = 0, }, { .offset = 196, .width = 2, .value = 0, }, { .offset = 245, .width = 1, .value = 0, }, + { } }; int idx; @@ -433,7 +435,7 @@ static void vcap_api_encode_typegroups_test(struct kunit *test) { .offset = 147, .width = 3, .value = 5, }, { .offset = 196, .width = 2, .value = 2, }, { .offset = 245, .width = 5, .value = 27, }, - { .offset = 0, .width = 0, .value = 0, }, + { } }; vcap_encode_typegroups(stream, 49, typegroups, false); @@ -463,6 +465,7 @@ static void vcap_api_encode_bit_test(struct kunit *test) { .offset = 147, .width = 3, .value = 5, }, { .offset = 196, .width = 2, .value = 2, }, { .offset = 245, .width = 1, .value = 0, }, + { } }; vcap_iter_init(&iter, 49, typegroups, 44); @@ -489,7 +492,7 @@ static void vcap_api_encode_field_test(struct kunit *test) { .offset = 147, .width = 3, .value = 5, }, { .offset = 196, .width = 2, .value = 2, }, { .offset = 245, .width = 5, .value = 27, }, - { .offset = 0, .width = 0, .value = 0, }, + { } }; struct vcap_field rf = { .type = VCAP_FIELD_U32, @@ -538,7 +541,7 @@ static void vcap_api_encode_short_field_test(struct kunit *test) { .offset = 0, .width = 3, .value = 7, }, { .offset = 21, .width = 2, .value = 3, }, { .offset = 42, .width = 1, .value = 1, }, - { .offset = 0, .width = 0, .value = 0, }, + { } }; struct vcap_field rf = { .type = VCAP_FIELD_U32, @@ -608,7 +611,7 @@ static void vcap_api_encode_keyfield_test(struct kunit *test) struct vcap_typegroup tgt[] = { { .offset = 0, .width = 2, .value = 2, }, { .offset = 156, .width = 1, .value = 1, }, - { .offset = 0, .width = 0, .value = 0, }, + { } }; vcap_test_api_init(&admin); @@ -671,7 +674,7 @@ static void vcap_api_encode_max_keyfield_test(struct kunit *test) struct vcap_typegroup tgt[] = { { .offset = 0, .width = 2, .value = 2, }, { .offset = 156, .width = 1, .value = 1, }, - { .offset = 0, .width = 0, .value = 0, }, + { } }; u32 keyres[] = { 0x928e8a84, @@ -732,7 +735,7 @@ static void vcap_api_encode_actionfield_test(struct kunit *test) { .offset = 0, .width = 2, .value = 2, }, { .offset = 21, .width = 1, .value = 1, }, { .offset = 42, .width = 1, .value = 0, }, - { .offset = 0, .width = 0, .value = 0, }, + { } }; vcap_encode_actionfield(&rule, &caf, &rf, tgt); diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h index 942f1e531a85..dbc3f92eebc4 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase.h +++ b/drivers/net/ethernet/realtek/rtase/rtase.h @@ -9,7 +9,10 @@ #ifndef RTASE_H #define RTASE_H -#define RTASE_HW_VER_MASK 0x7C800000 +#define RTASE_HW_VER_MASK 0x7C800000 +#define RTASE_HW_VER_906X_7XA 0x00800000 +#define RTASE_HW_VER_906X_7XC 0x04000000 +#define RTASE_HW_VER_907XD_V1 0x04800000 #define RTASE_RX_DMA_BURST_256 4 #define RTASE_TX_DMA_BURST_UNLIMITED 7 @@ -327,6 +330,8 @@ struct rtase_private { u16 int_nums; u16 tx_int_mit; u16 rx_int_mit; + + u32 hw_ver; }; #define RTASE_LSO_64K 64000 diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c index 874994d9ceb9..de7f11232593 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase_main.c +++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c @@ -1714,10 +1714,21 @@ static int rtase_get_settings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { u32 supported = SUPPORTED_MII | SUPPORTED_Pause | SUPPORTED_Asym_Pause; + const struct rtase_private *tp = netdev_priv(dev); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); - cmd->base.speed = SPEED_5000; + + switch (tp->hw_ver) { + case RTASE_HW_VER_906X_7XA: + case RTASE_HW_VER_906X_7XC: + cmd->base.speed = SPEED_5000; + break; + case RTASE_HW_VER_907XD_V1: + cmd->base.speed = SPEED_10000; + break; + } + cmd->base.duplex = DUPLEX_FULL; cmd->base.port = PORT_MII; cmd->base.autoneg = AUTONEG_DISABLE; @@ -1972,20 +1983,21 @@ static void rtase_init_software_variable(struct pci_dev *pdev, tp->dev->max_mtu = RTASE_MAX_JUMBO_SIZE; } -static bool rtase_check_mac_version_valid(struct rtase_private *tp) +static int rtase_check_mac_version_valid(struct rtase_private *tp) { - u32 hw_ver = rtase_r32(tp, RTASE_TX_CONFIG_0) & RTASE_HW_VER_MASK; - bool known_ver = false; + int ret = -ENODEV; + + tp->hw_ver = rtase_r32(tp, RTASE_TX_CONFIG_0) & RTASE_HW_VER_MASK; - switch (hw_ver) { - case 0x00800000: - case 0x04000000: - case 0x04800000: - known_ver = true; + switch (tp->hw_ver) { + case RTASE_HW_VER_906X_7XA: + case RTASE_HW_VER_906X_7XC: + case RTASE_HW_VER_907XD_V1: + ret = 0; break; } - return known_ver; + return ret; } static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out, @@ -2105,9 +2117,13 @@ static int rtase_init_one(struct pci_dev *pdev, tp->pdev = pdev; /* identify chip attached to board */ - if (!rtase_check_mac_version_valid(tp)) - return dev_err_probe(&pdev->dev, -ENODEV, - "unknown chip version, contact rtase maintainers (see MAINTAINERS file)\n"); + ret = rtase_check_mac_version_valid(tp); + if (ret != 0) { + dev_err(&pdev->dev, + "unknown chip version: 0x%08x, contact rtase maintainers (see MAINTAINERS file)\n", + tp->hw_ver); + goto err_out_release_board; + } rtase_init_software_variable(pdev, tp); rtase_init_hardware(tp); @@ -2181,6 +2197,7 @@ err_out_del_napi: netif_napi_del(&ivec->napi); } +err_out_release_board: rtase_release_board(pdev, dev, ioaddr); return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 248b30d7b864..16020b72dec8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -487,6 +487,8 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) plat_dat->select_pcs = socfpga_dwmac_select_pcs; plat_dat->has_gmac = true; + plat_dat->riwt_off = 1; + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3cdc3910f3a0..9b262cdad60b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1177,6 +1177,9 @@ static int stmmac_init_phy(struct net_device *dev) return -ENODEV; } + if (priv->dma_cap.eee) + phy_support_eee(phydev); + ret = phylink_connect_phy(priv->phylink, phydev); } else { fwnode_handle_put(phy_fwnode); diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c index dd3ed2d6430b..d9a94df482d9 100644 --- a/drivers/net/mdio/mdio-ipq4019.c +++ b/drivers/net/mdio/mdio-ipq4019.c @@ -352,8 +352,11 @@ static int ipq4019_mdio_probe(struct platform_device *pdev) /* The platform resource is provided on the chipset IPQ5018 */ /* This resource is optional */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (res) + if (res) { priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->eth_ldo_rdy)) + return PTR_ERR(priv->eth_ldo_rdy); + } bus->name = "ipq4019_mdio"; bus->read = ipq4019_mdio_read_c22; diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index 96d0b3a5a9d3..944ae98ad110 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -1530,7 +1530,7 @@ int genphy_c45_ethtool_get_eee(struct phy_device *phydev, return ret; data->eee_enabled = is_enabled; - data->eee_active = ret; + data->eee_active = phydev->eee_active; linkmode_copy(data->supported, phydev->supported_eee); return 0; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 4f3e742907cb..0d20b534122b 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -990,14 +990,14 @@ static int phy_check_link_status(struct phy_device *phydev) phydev->state = PHY_RUNNING; err = genphy_c45_eee_is_active(phydev, NULL, NULL, NULL); - if (err <= 0) - phydev->enable_tx_lpi = false; - else - phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled; + phydev->eee_active = err > 0; + phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled && + phydev->eee_active; phy_link_up(phydev); } else if (!phydev->link && phydev->state != PHY_NOLINK) { phydev->state = PHY_NOLINK; + phydev->eee_active = false; phydev->enable_tx_lpi = false; phy_link_down(phydev); } @@ -1672,7 +1672,7 @@ EXPORT_SYMBOL(phy_ethtool_get_eee); * phy_ethtool_set_eee_noneg - Adjusts MAC LPI configuration without PHY * renegotiation * @phydev: pointer to the target PHY device structure - * @data: pointer to the ethtool_keee structure containing the new EEE settings + * @old_cfg: pointer to the eee_config structure containing the old EEE settings * * This function updates the Energy Efficient Ethernet (EEE) configuration * for cases where only the MAC's Low Power Idle (LPI) configuration changes, @@ -1683,18 +1683,23 @@ EXPORT_SYMBOL(phy_ethtool_get_eee); * configuration. */ static void phy_ethtool_set_eee_noneg(struct phy_device *phydev, - struct ethtool_keee *data) + const struct eee_config *old_cfg) { - if (phydev->eee_cfg.tx_lpi_enabled != data->tx_lpi_enabled || - phydev->eee_cfg.tx_lpi_timer != data->tx_lpi_timer) { - eee_to_eeecfg(&phydev->eee_cfg, data); - phydev->enable_tx_lpi = eeecfg_mac_can_tx_lpi(&phydev->eee_cfg); - if (phydev->link) { - phydev->link = false; - phy_link_down(phydev); - phydev->link = true; - phy_link_up(phydev); - } + bool enable_tx_lpi; + + if (!phydev->link) + return; + + enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled && phydev->eee_active; + + if (phydev->enable_tx_lpi != enable_tx_lpi || + phydev->eee_cfg.tx_lpi_timer != old_cfg->tx_lpi_timer) { + phydev->enable_tx_lpi = false; + phydev->link = false; + phy_link_down(phydev); + phydev->enable_tx_lpi = enable_tx_lpi; + phydev->link = true; + phy_link_up(phydev); } } @@ -1707,18 +1712,23 @@ static void phy_ethtool_set_eee_noneg(struct phy_device *phydev, */ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data) { + struct eee_config old_cfg; int ret; if (!phydev->drv) return -EIO; mutex_lock(&phydev->lock); + + old_cfg = phydev->eee_cfg; + eee_to_eeecfg(&phydev->eee_cfg, data); + ret = genphy_c45_ethtool_set_eee(phydev, data); - if (ret >= 0) { - if (ret == 0) - phy_ethtool_set_eee_noneg(phydev, data); - eee_to_eeecfg(&phydev->eee_cfg, data); - } + if (ret == 0) + phy_ethtool_set_eee_noneg(phydev, &old_cfg); + else if (ret < 0) + phydev->eee_cfg = old_cfg; + mutex_unlock(&phydev->lock); return ret < 0 ? ret : 0; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 8adf77e3557e..531b1b6a37d1 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1652,13 +1652,13 @@ static int lan78xx_set_wol(struct net_device *netdev, struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); int ret; + if (wol->wolopts & ~WAKE_ALL) + return -EINVAL; + ret = usb_autopm_get_interface(dev->intf); if (ret < 0) return ret; - if (wol->wolopts & ~WAKE_ALL) - return -EINVAL; - pdata->wol = wol->wolopts; device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts); @@ -2380,6 +2380,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) if (dev->chipid == ID_REV_CHIP_ID_7801_) { if (phy_is_pseudo_fixed_link(phydev)) { fixed_phy_unregister(phydev); + phy_device_free(phydev); } else { phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); @@ -4246,8 +4247,10 @@ static void lan78xx_disconnect(struct usb_interface *intf) phy_disconnect(net->phydev); - if (phy_is_pseudo_fixed_link(phydev)) + if (phy_is_pseudo_fixed_link(phydev)) { fixed_phy_unregister(phydev); + phy_device_free(phydev); + } usb_scuttle_anchored_urbs(&dev->deferred); @@ -4414,29 +4417,30 @@ static int lan78xx_probe(struct usb_interface *intf, period = ep_intr->desc.bInterval; maxp = usb_maxpacket(dev->udev, dev->pipe_intr); - buf = kmalloc(maxp, GFP_KERNEL); - if (!buf) { + + dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL); + if (!dev->urb_intr) { ret = -ENOMEM; goto out5; } - dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL); - if (!dev->urb_intr) { + buf = kmalloc(maxp, GFP_KERNEL); + if (!buf) { ret = -ENOMEM; - goto out6; - } else { - usb_fill_int_urb(dev->urb_intr, dev->udev, - dev->pipe_intr, buf, maxp, - intr_complete, dev, period); - dev->urb_intr->transfer_flags |= URB_FREE_BUFFER; + goto free_urbs; } + usb_fill_int_urb(dev->urb_intr, dev->udev, + dev->pipe_intr, buf, maxp, + intr_complete, dev, period); + dev->urb_intr->transfer_flags |= URB_FREE_BUFFER; + dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out); /* Reject broken descriptors. */ if (dev->maxpacket == 0) { ret = -ENODEV; - goto out6; + goto free_urbs; } /* driver requires remote-wakeup capability during autosuspend. */ @@ -4444,7 +4448,7 @@ static int lan78xx_probe(struct usb_interface *intf, ret = lan78xx_phy_init(dev); if (ret < 0) - goto out7; + goto free_urbs; ret = register_netdev(netdev); if (ret != 0) { @@ -4466,10 +4470,8 @@ static int lan78xx_probe(struct usb_interface *intf, out8: phy_disconnect(netdev->phydev); -out7: +free_urbs: usb_free_urb(dev->urb_intr); -out6: - kfree(buf); out5: lan78xx_unbind(dev, intf); out4: diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c index c9b97aeabf85..2396d19ce549 100644 --- a/drivers/nvdimm/virtio_pmem.c +++ b/drivers/nvdimm/virtio_pmem.c @@ -143,6 +143,28 @@ static void virtio_pmem_remove(struct virtio_device *vdev) virtio_reset_device(vdev); } +static int virtio_pmem_freeze(struct virtio_device *vdev) +{ + vdev->config->del_vqs(vdev); + virtio_reset_device(vdev); + + return 0; +} + +static int virtio_pmem_restore(struct virtio_device *vdev) +{ + int ret; + + ret = init_vq(vdev->priv); + if (ret) { + dev_err(&vdev->dev, "failed to initialize virtio pmem's vq\n"); + return ret; + } + virtio_device_ready(vdev); + + return 0; +} + static unsigned int features[] = { VIRTIO_PMEM_F_SHMEM_REGION, }; @@ -155,6 +177,8 @@ static struct virtio_driver virtio_pmem_driver = { .validate = virtio_pmem_validate, .probe = virtio_pmem_probe, .remove = virtio_pmem_remove, + .freeze = virtio_pmem_freeze, + .restore = virtio_pmem_restore, }; module_virtio_driver(virtio_pmem_driver); diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 4bdb79ffa101..430651e7424a 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -263,9 +263,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *o int _opp_add_v1(struct opp_table *opp_table, struct device *dev, struct dev_pm_opp_data *data, bool dynamic); void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu); struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk); -void _put_opp_list_kref(struct opp_table *opp_table); void _required_opps_available(struct dev_pm_opp *opp, int count); -void _update_set_required_opps(struct opp_table *opp_table); static inline bool lazy_linking_pending(struct opp_table *opp_table) { diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index f73abff416be..8d58efe998ec 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -82,6 +82,17 @@ config PHY_AIROHA_PCIE This driver create the basic PHY instance and provides initialize callback for PCIe GEN3 port. +config PHY_NXP_PTN3222 + tristate "NXP PTN3222 1-port eUSB2 to USB2 redriver" + depends on I2C + depends on OF + select GENERIC_PHY + help + Enable this to support NXP PTN3222 1-port eUSB2 to USB2 Redriver. + This redriver performs translation between eUSB2 and USB2 signalling + schemes. It supports all three USB 2.0 data rates: Low Speed, Full + Speed and High Speed. + source "drivers/phy/allwinner/Kconfig" source "drivers/phy/amlogic/Kconfig" source "drivers/phy/broadcom/Kconfig" diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index ebc399560da4..e281442acc75 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_PHY_XGENE) += phy-xgene.o obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o obj-$(CONFIG_USB_LGM_PHY) += phy-lgm-usb.o obj-$(CONFIG_PHY_AIROHA_PCIE) += phy-airoha-pcie.o +obj-$(CONFIG_PHY_NXP_PTN3222) += phy-nxp-ptn3222.o obj-y += allwinner/ \ amlogic/ \ broadcom/ \ diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c index b0f19e950601..cd159a71b23c 100644 --- a/drivers/phy/allwinner/phy-sun4i-usb.c +++ b/drivers/phy/allwinner/phy-sun4i-usb.c @@ -1049,11 +1049,11 @@ static const struct of_device_id sun4i_usb_phy_of_match[] = { MODULE_DEVICE_TABLE(of, sun4i_usb_phy_of_match); static struct platform_driver sun4i_usb_phy_driver = { - .probe = sun4i_usb_phy_probe, - .remove_new = sun4i_usb_phy_remove, + .probe = sun4i_usb_phy_probe, + .remove = sun4i_usb_phy_remove, .driver = { - .of_match_table = sun4i_usb_phy_of_match, - .name = "sun4i-usb-phy", + .of_match_table= sun4i_usb_phy_of_match, + .name = "sun4i-usb-phy", } }; module_platform_driver(sun4i_usb_phy_driver); diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb2.c b/drivers/phy/broadcom/phy-bcm-ns-usb2.c index 5213c75b6da6..c5d35031b398 100644 --- a/drivers/phy/broadcom/phy-bcm-ns-usb2.c +++ b/drivers/phy/broadcom/phy-bcm-ns-usb2.c @@ -24,9 +24,6 @@ struct bcm_ns_usb2 { struct phy *phy; struct regmap *clkset; void __iomem *base; - - /* Deprecated binding */ - void __iomem *dmu; }; static int bcm_ns_usb2_phy_init(struct phy *phy) @@ -49,10 +46,7 @@ static int bcm_ns_usb2_phy_init(struct phy *phy) goto err_clk_off; } - if (usb2->base) - usb2ctl = readl(usb2->base); - else - usb2ctl = readl(usb2->dmu + BCMA_DMU_CRU_USB2_CONTROL); + usb2ctl = readl(usb2->base); if (usb2ctl & BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_MASK) { usb_pll_pdiv = usb2ctl; @@ -66,24 +60,16 @@ static int bcm_ns_usb2_phy_init(struct phy *phy) usb_pll_ndiv = (1920000000 * usb_pll_pdiv) / ref_clk_rate; /* Unlock DMU PLL settings with some magic value */ - if (usb2->clkset) - regmap_write(usb2->clkset, 0, 0x0000ea68); - else - writel(0x0000ea68, usb2->dmu + BCMA_DMU_CRU_CLKSET_KEY); + regmap_write(usb2->clkset, 0, 0x0000ea68); /* Write USB 2.0 PLL control setting */ usb2ctl &= ~BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_MASK; usb2ctl |= usb_pll_ndiv << BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_SHIFT; - if (usb2->base) - writel(usb2ctl, usb2->base); - else - writel(usb2ctl, usb2->dmu + BCMA_DMU_CRU_USB2_CONTROL); + + writel(usb2ctl, usb2->base); /* Lock DMU PLL settings */ - if (usb2->clkset) - regmap_write(usb2->clkset, 0, 0x00000000); - else - writel(0x00000000, usb2->dmu + BCMA_DMU_CRU_CLKSET_KEY); + regmap_write(usb2->clkset, 0, 0x00000000); err_clk_off: clk_disable_unprepare(usb2->ref_clk); @@ -107,27 +93,17 @@ static int bcm_ns_usb2_probe(struct platform_device *pdev) return -ENOMEM; usb2->dev = dev; - if (of_property_present(dev->of_node, "brcm,syscon-clkset")) { - usb2->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(usb2->base)) { - dev_err(dev, "Failed to map control reg\n"); - return PTR_ERR(usb2->base); - } - - usb2->clkset = syscon_regmap_lookup_by_phandle(dev->of_node, - "brcm,syscon-clkset"); - if (IS_ERR(usb2->clkset)) { - dev_err(dev, "Failed to lookup clkset regmap\n"); - return PTR_ERR(usb2->clkset); - } - } else { - usb2->dmu = devm_platform_ioremap_resource_byname(pdev, "dmu"); - if (IS_ERR(usb2->dmu)) { - dev_err(dev, "Failed to map DMU regs\n"); - return PTR_ERR(usb2->dmu); - } + usb2->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(usb2->base)) { + dev_err(dev, "Failed to map control reg\n"); + return PTR_ERR(usb2->base); + } - dev_warn(dev, "using deprecated DT binding\n"); + usb2->clkset = syscon_regmap_lookup_by_phandle(dev->of_node, + "brcm,syscon-clkset"); + if (IS_ERR(usb2->clkset)) { + dev_err(dev, "Failed to lookup clkset regmap\n"); + return PTR_ERR(usb2->clkset); } usb2->ref_clk = devm_clk_get(dev, "phy-ref-clk"); diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c index 5ebb3a616115..da23078878a9 100644 --- a/drivers/phy/broadcom/phy-brcm-usb-init.c +++ b/drivers/phy/broadcom/phy-brcm-usb-init.c @@ -193,256 +193,251 @@ static const u32 usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = { /* 3390B0 */ [BRCM_FAMILY_3390A0] = { - USB_CTRL_SETUP_SCB1_EN_MASK, - USB_CTRL_SETUP_SCB2_EN_MASK, - USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, - USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ - 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ - USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_USB_PWRDN_MASK, - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SCB1_EN_SELECTOR] = + USB_CTRL_SETUP_SCB1_EN_MASK, + [USB_CTRL_SETUP_SCB2_EN_SELECTOR] = + USB_CTRL_SETUP_SCB2_EN_MASK, + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, + [USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR] = + USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_USB_PWRDN_SELECTOR] = + USB_CTRL_USB_PM_USB_PWRDN_MASK, + [USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_SELECTOR] = + USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 4908 */ [BRCM_FAMILY_4908] = { - 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */ - 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */ - 0, /* USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ - 0, /* USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK */ - 0, /* USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK */ - 0, /* USB_CTRL_SETUP_OC3_DISABLE_MASK */ - 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ - 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ - USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_USB_PWRDN_MASK, - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */ - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - 0, /* USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK */ - 0, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_USB_PWRDN_SELECTOR] = + USB_CTRL_USB_PM_USB_PWRDN_MASK, }, /* 7250b0 */ [BRCM_FAMILY_7250B0] = { - USB_CTRL_SETUP_SCB1_EN_MASK, - USB_CTRL_SETUP_SCB2_EN_MASK, - USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, - 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, - 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ - USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK, - 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */ - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */ - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - USB_CTRL_USB_PM_USB20_HC_RESETB_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SCB1_EN_SELECTOR] = + USB_CTRL_SETUP_SCB1_EN_MASK, + [USB_CTRL_SETUP_SCB2_EN_SELECTOR] = + USB_CTRL_SETUP_SCB2_EN_MASK, + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_SELECTOR] = + USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 7271a0 */ [BRCM_FAMILY_7271A0] = { - 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */ - 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */ - USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, - USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ - USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_USB_PWRDN_MASK, - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, - USB_CTRL_USB_PM_SOFT_RESET_MASK, - USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK, - USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK, - USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, + [USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR] = + USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_USB_PM_BDC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_USB_PWRDN_SELECTOR] = + USB_CTRL_USB_PM_USB_PWRDN_MASK, + [USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_SELECTOR] = + USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, + [USB_CTRL_USB_PM_SOFT_RESET_SELECTOR] = + USB_CTRL_USB_PM_SOFT_RESET_MASK, + [USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_SELECTOR] = + USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK, + [USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_SELECTOR] = + USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 7364a0 */ [BRCM_FAMILY_7364A0] = { - USB_CTRL_SETUP_SCB1_EN_MASK, - USB_CTRL_SETUP_SCB2_EN_MASK, - USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, - 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, - 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ - USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK, - 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */ - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */ - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - USB_CTRL_USB_PM_USB20_HC_RESETB_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SCB1_EN_SELECTOR] = + USB_CTRL_SETUP_SCB1_EN_MASK, + [USB_CTRL_SETUP_SCB2_EN_SELECTOR] = + USB_CTRL_SETUP_SCB2_EN_MASK, + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_SELECTOR] = + USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 7366c0 */ [BRCM_FAMILY_7366C0] = { - USB_CTRL_SETUP_SCB1_EN_MASK, - USB_CTRL_SETUP_SCB2_EN_MASK, - USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, - 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ - 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ - USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK, - USB_CTRL_USB_PM_USB_PWRDN_MASK, - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */ - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - USB_CTRL_USB_PM_USB20_HC_RESETB_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SCB1_EN_SELECTOR] = + USB_CTRL_SETUP_SCB1_EN_MASK, + [USB_CTRL_SETUP_SCB2_EN_SELECTOR] = + USB_CTRL_SETUP_SCB2_EN_MASK, + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK, + [USB_CTRL_USB_PM_USB_PWRDN_SELECTOR] = + USB_CTRL_USB_PM_USB_PWRDN_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 74371A0 */ [BRCM_FAMILY_74371A0] = { - USB_CTRL_SETUP_SCB1_EN_MASK, - USB_CTRL_SETUP_SCB2_EN_MASK, - USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK, - 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ - 0, /* USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK */ - 0, /* USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK */ - 0, /* USB_CTRL_SETUP_OC3_DISABLE_MASK */ - USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, - 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */ - USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK, - USB_CTRL_USB30_CTL1_USB3_IOC_MASK, - USB_CTRL_USB30_CTL1_USB3_IPP_MASK, - 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */ - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - 0, /* USB_CTRL_USB_PM_USB20_HC_RESETB_MASK */ - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SCB1_EN_SELECTOR] = + USB_CTRL_SETUP_SCB1_EN_MASK, + [USB_CTRL_SETUP_SCB2_EN_SELECTOR] = + USB_CTRL_SETUP_SCB2_EN_MASK, + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK, + [USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_SELECTOR] = + USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, + [USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB30_CTL1_USB3_IOC_SELECTOR] = + USB_CTRL_USB30_CTL1_USB3_IOC_MASK, + [USB_CTRL_USB30_CTL1_USB3_IPP_SELECTOR] = + USB_CTRL_USB30_CTL1_USB3_IPP_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 7439B0 */ [BRCM_FAMILY_7439B0] = { - USB_CTRL_SETUP_SCB1_EN_MASK, - USB_CTRL_SETUP_SCB2_EN_MASK, - USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, - USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ - USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_USB_PWRDN_MASK, - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SCB1_EN_SELECTOR] = + USB_CTRL_SETUP_SCB1_EN_MASK, + [USB_CTRL_SETUP_SCB2_EN_SELECTOR] = + USB_CTRL_SETUP_SCB2_EN_MASK, + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, + [USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR] = + USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_USB_PM_BDC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_USB_PWRDN_SELECTOR] = + USB_CTRL_USB_PM_USB_PWRDN_MASK, + [USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_SELECTOR] = + USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 7445d0 */ [BRCM_FAMILY_7445D0] = { - USB_CTRL_SETUP_SCB1_EN_MASK, - USB_CTRL_SETUP_SCB2_EN_MASK, - USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK, - 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, - 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */ - USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK, - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */ - 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SCB1_EN_SELECTOR] = + USB_CTRL_SETUP_SCB1_EN_MASK, + [USB_CTRL_SETUP_SCB2_EN_SELECTOR] = + USB_CTRL_SETUP_SCB2_EN_MASK, + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_SELECTOR] = + USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, + [USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 7260a0 */ [BRCM_FAMILY_7260A0] = { - 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */ - 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */ - USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, - USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ - USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_USB_PWRDN_MASK, - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, - USB_CTRL_USB_PM_SOFT_RESET_MASK, - USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK, - USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK, - USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, - ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR] = + USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK, + [USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR] = + USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_USB_PM_BDC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_USB_PWRDN_SELECTOR] = + USB_CTRL_USB_PM_USB_PWRDN_MASK, + [USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_SELECTOR] = + USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, + [USB_CTRL_USB_PM_SOFT_RESET_SELECTOR] = + USB_CTRL_USB_PM_SOFT_RESET_MASK, + [USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_SELECTOR] = + USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK, + [USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_SELECTOR] = + USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK, + [USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR] = + USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, + [USB_CTRL_SETUP_ENDIAN_SELECTOR] = ENDIAN_SETTINGS, }, /* 7278a0 */ [BRCM_FAMILY_7278A0] = { - 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */ - 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */ - 0, /*USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK */ - USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, - USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, - USB_CTRL_SETUP_OC3_DISABLE_MASK, - 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ - USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, - USB_CTRL_USB_PM_USB_PWRDN_MASK, - 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ - 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ - USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, - USB_CTRL_USB_PM_SOFT_RESET_MASK, - 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ - 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ - 0, /* USB_CTRL_USB_PM_USB20_HC_RESETB_MASK */ - 0, /* USB_CTRL_SETUP ENDIAN bits */ + [USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR] = + USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK, + [USB_CTRL_SETUP_OC3_DISABLE_SELECTOR] = + USB_CTRL_SETUP_OC3_DISABLE_MASK, + [USB_CTRL_USB_PM_BDC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR] = + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, + [USB_CTRL_USB_PM_USB_PWRDN_SELECTOR] = + USB_CTRL_USB_PM_USB_PWRDN_MASK, + [USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_SELECTOR] = + USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK, + [USB_CTRL_USB_PM_SOFT_RESET_SELECTOR] = + USB_CTRL_USB_PM_SOFT_RESET_MASK, }, }; diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c index ad2eec095601..6362ca5b7fb6 100644 --- a/drivers/phy/broadcom/phy-brcm-usb.c +++ b/drivers/phy/broadcom/phy-brcm-usb.c @@ -667,7 +667,7 @@ MODULE_DEVICE_TABLE(of, brcm_usb_dt_ids); static struct platform_driver brcm_usb_driver = { .probe = brcm_usb_phy_probe, - .remove_new = brcm_usb_phy_remove, + .remove = brcm_usb_phy_remove, .driver = { .name = "brcmstb-usb-phy", .pm = &brcm_usb_phy_pm_ops, diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c index dddb66de6dba..ed87a3970f83 100644 --- a/drivers/phy/cadence/cdns-dphy.c +++ b/drivers/phy/cadence/cdns-dphy.c @@ -472,7 +472,7 @@ MODULE_DEVICE_TABLE(of, cdns_dphy_of_match); static struct platform_driver cdns_dphy_platform_driver = { .probe = cdns_dphy_probe, - .remove_new = cdns_dphy_remove, + .remove = cdns_dphy_remove, .driver = { .name = "cdns-mipi-dphy", .of_match_table = cdns_dphy_of_match, diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c index dfc4f55d112e..45a5c00843bf 100644 --- a/drivers/phy/cadence/phy-cadence-sierra.c +++ b/drivers/phy/cadence/phy-cadence-sierra.c @@ -2731,7 +2731,7 @@ MODULE_DEVICE_TABLE(of, cdns_sierra_id_table); static struct platform_driver cdns_sierra_driver = { .probe = cdns_sierra_phy_probe, - .remove_new = cdns_sierra_phy_remove, + .remove = cdns_sierra_phy_remove, .driver = { .name = "cdns-sierra-phy", .of_match_table = cdns_sierra_id_table, diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c index 8bbbbb87bb22..a281c0dfae97 100644 --- a/drivers/phy/cadence/phy-cadence-torrent.c +++ b/drivers/phy/cadence/phy-cadence-torrent.c @@ -5440,8 +5440,8 @@ MODULE_DEVICE_TABLE(of, cdns_torrent_phy_of_match); static struct platform_driver cdns_torrent_phy_driver = { .probe = cdns_torrent_phy_probe, - .remove_new = cdns_torrent_phy_remove, - .driver = { + .remove = cdns_torrent_phy_remove, + .driver = { .name = "cdns-torrent-phy", .of_match_table = cdns_torrent_phy_of_match, .pm = pm_sleep_ptr(&cdns_torrent_phy_pm_ops), diff --git a/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c b/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c index 38388dd04bdc..7aef2f59e8eb 100644 --- a/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c +++ b/drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c @@ -433,12 +433,12 @@ static const struct of_device_id mixel_lvds_phy_of_match[] = { MODULE_DEVICE_TABLE(of, mixel_lvds_phy_of_match); static struct platform_driver mixel_lvds_phy_driver = { - .probe = mixel_lvds_phy_probe, - .remove_new = mixel_lvds_phy_remove, + .probe = mixel_lvds_phy_probe, + .remove = mixel_lvds_phy_remove, .driver = { .pm = &mixel_lvds_phy_pm_ops, .name = "mixel-lvds-phy", - .of_match_table = mixel_lvds_phy_of_match, + .of_match_table = mixel_lvds_phy_of_match, } }; module_platform_driver(mixel_lvds_phy_driver); diff --git a/drivers/phy/freescale/phy-fsl-lynx-28g.c b/drivers/phy/freescale/phy-fsl-lynx-28g.c index b86da8e9daa4..f7994e8983c8 100644 --- a/drivers/phy/freescale/phy-fsl-lynx-28g.c +++ b/drivers/phy/freescale/phy-fsl-lynx-28g.c @@ -631,9 +631,9 @@ static const struct of_device_id lynx_28g_of_match_table[] = { MODULE_DEVICE_TABLE(of, lynx_28g_of_match_table); static struct platform_driver lynx_28g_driver = { - .probe = lynx_28g_probe, - .remove_new = lynx_28g_remove, - .driver = { + .probe = lynx_28g_probe, + .remove = lynx_28g_remove, + .driver = { .name = "lynx-28g", .of_match_table = lynx_28g_of_match_table, }, diff --git a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c index 9048cdc760c2..2c8038864357 100644 --- a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c +++ b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c @@ -14,352 +14,265 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#define PHY_REG_00 0x00 -#define PHY_REG_01 0x04 -#define PHY_REG_02 0x08 -#define PHY_REG_08 0x20 -#define PHY_REG_09 0x24 -#define PHY_REG_10 0x28 -#define PHY_REG_11 0x2c - -#define PHY_REG_12 0x30 -#define REG12_CK_DIV_MASK GENMASK(5, 4) - -#define PHY_REG_13 0x34 -#define REG13_TG_CODE_LOW_MASK GENMASK(7, 0) - -#define PHY_REG_14 0x38 -#define REG14_TOL_MASK GENMASK(7, 4) -#define REG14_RP_CODE_MASK GENMASK(3, 1) -#define REG14_TG_CODE_HIGH_MASK GENMASK(0, 0) - -#define PHY_REG_15 0x3c -#define PHY_REG_16 0x40 -#define PHY_REG_17 0x44 -#define PHY_REG_18 0x48 -#define PHY_REG_19 0x4c -#define PHY_REG_20 0x50 - -#define PHY_REG_21 0x54 -#define REG21_SEL_TX_CK_INV BIT(7) -#define REG21_PMS_S_MASK GENMASK(3, 0) - -#define PHY_REG_22 0x58 -#define PHY_REG_23 0x5c -#define PHY_REG_24 0x60 -#define PHY_REG_25 0x64 -#define PHY_REG_26 0x68 -#define PHY_REG_27 0x6c -#define PHY_REG_28 0x70 -#define PHY_REG_29 0x74 -#define PHY_REG_30 0x78 -#define PHY_REG_31 0x7c -#define PHY_REG_32 0x80 +#define PHY_REG(reg) (reg * 4) +#define REG01_PMS_P_MASK GENMASK(3, 0) +#define REG03_PMS_S_MASK GENMASK(7, 4) +#define REG12_CK_DIV_MASK GENMASK(5, 4) + +#define REG13_TG_CODE_LOW_MASK GENMASK(7, 0) + +#define REG14_TOL_MASK GENMASK(7, 4) +#define REG14_RP_CODE_MASK GENMASK(3, 1) +#define REG14_TG_CODE_HIGH_MASK GENMASK(0, 0) + +#define REG21_SEL_TX_CK_INV BIT(7) +#define REG21_PMS_S_MASK GENMASK(3, 0) /* * REG33 does not match the ref manual. According to Sandor Yu from NXP, * "There is a doc issue on the i.MX8MP latest RM" * REG33 is being used per guidance from Sandor */ +#define REG33_MODE_SET_DONE BIT(7) +#define REG33_FIX_DA BIT(1) + +#define REG34_PHY_READY BIT(7) +#define REG34_PLL_LOCK BIT(6) +#define REG34_PHY_CLK_READY BIT(5) -#define PHY_REG_33 0x84 -#define REG33_MODE_SET_DONE BIT(7) -#define REG33_FIX_DA BIT(1) - -#define PHY_REG_34 0x88 -#define REG34_PHY_READY BIT(7) -#define REG34_PLL_LOCK BIT(6) -#define REG34_PHY_CLK_READY BIT(5) - -#define PHY_REG_35 0x8c -#define PHY_REG_36 0x90 -#define PHY_REG_37 0x94 -#define PHY_REG_38 0x98 -#define PHY_REG_39 0x9c -#define PHY_REG_40 0xa0 -#define PHY_REG_41 0xa4 -#define PHY_REG_42 0xa8 -#define PHY_REG_43 0xac -#define PHY_REG_44 0xb0 -#define PHY_REG_45 0xb4 -#define PHY_REG_46 0xb8 -#define PHY_REG_47 0xbc - -#define PHY_PLL_DIV_REGS_NUM 6 +#ifndef MHZ +#define MHZ (1000UL * 1000UL) +#endif + +#define PHY_PLL_DIV_REGS_NUM 7 struct phy_config { u32 pixclk; u8 pll_div_regs[PHY_PLL_DIV_REGS_NUM]; }; +/* + * The calculated_phy_pll_cfg only handles integer divider for PMS, + * meaning the last four entries will be fixed, but the first three will + * be calculated by the PMS calculator. + */ +static struct phy_config calculated_phy_pll_cfg = { + .pixclk = 0, + .pll_div_regs = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 }, +}; + +/* The lookup table contains values for which the fractional divder is used */ static const struct phy_config phy_pll_cfg[] = { { .pixclk = 22250000, - .pll_div_regs = { 0x4b, 0xf1, 0x89, 0x88, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x4b, 0xf1, 0x89, 0x88, 0x80, 0x40 }, }, { .pixclk = 23750000, - .pll_div_regs = { 0x50, 0xf1, 0x86, 0x85, 0x80, 0x40 }, - }, { - .pixclk = 24000000, - .pll_div_regs = { 0x50, 0xf0, 0x00, 0x00, 0x80, 0x00 }, + .pll_div_regs = { 0xd1, 0x50, 0xf1, 0x86, 0x85, 0x80, 0x40 }, }, { .pixclk = 24024000, - .pll_div_regs = { 0x50, 0xf1, 0x99, 0x02, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x50, 0xf1, 0x99, 0x02, 0x80, 0x40 }, }, { .pixclk = 25175000, - .pll_div_regs = { 0x54, 0xfc, 0xcc, 0x91, 0x80, 0x40 }, - }, { - .pixclk = 25200000, - .pll_div_regs = { 0x54, 0xf0, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x54, 0xfc, 0xcc, 0x91, 0x80, 0x40 }, + }, { .pixclk = 26750000, - .pll_div_regs = { 0x5a, 0xf2, 0x89, 0x88, 0x80, 0x40 }, - }, { - .pixclk = 27000000, - .pll_div_regs = { 0x5a, 0xf0, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x5a, 0xf2, 0x89, 0x88, 0x80, 0x40 }, + }, { .pixclk = 27027000, - .pll_div_regs = { 0x5a, 0xf2, 0xfd, 0x0c, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x5a, 0xf2, 0xfd, 0x0c, 0x80, 0x40 }, }, { .pixclk = 29500000, - .pll_div_regs = { 0x62, 0xf4, 0x95, 0x08, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x62, 0xf4, 0x95, 0x08, 0x80, 0x40 }, }, { .pixclk = 30750000, - .pll_div_regs = { 0x66, 0xf4, 0x82, 0x01, 0x88, 0x45 }, + .pll_div_regs = { 0xd1, 0x66, 0xf4, 0x82, 0x01, 0x88, 0x45 }, }, { .pixclk = 30888000, - .pll_div_regs = { 0x66, 0xf4, 0x99, 0x18, 0x88, 0x45 }, + .pll_div_regs = { 0xd1, 0x66, 0xf4, 0x99, 0x18, 0x88, 0x45 }, }, { .pixclk = 33750000, - .pll_div_regs = { 0x70, 0xf4, 0x82, 0x01, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x70, 0xf4, 0x82, 0x01, 0x80, 0x40 }, }, { .pixclk = 35000000, - .pll_div_regs = { 0x58, 0xb8, 0x8b, 0x88, 0x80, 0x40 }, - }, { - .pixclk = 36000000, - .pll_div_regs = { 0x5a, 0xb0, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x58, 0xb8, 0x8b, 0x88, 0x80, 0x40 }, + }, { .pixclk = 36036000, - .pll_div_regs = { 0x5a, 0xb2, 0xfd, 0x0c, 0x80, 0x40 }, - }, { - .pixclk = 40000000, - .pll_div_regs = { 0x64, 0xb0, 0x00, 0x00, 0x80, 0x00 }, - }, { - .pixclk = 43200000, - .pll_div_regs = { 0x5a, 0x90, 0x00, 0x00, 0x80, 0x00 }, + .pll_div_regs = { 0xd1, 0x5a, 0xb2, 0xfd, 0x0c, 0x80, 0x40 }, }, { .pixclk = 43243200, - .pll_div_regs = { 0x5a, 0x92, 0xfd, 0x0c, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x5a, 0x92, 0xfd, 0x0c, 0x80, 0x40 }, }, { .pixclk = 44500000, - .pll_div_regs = { 0x5c, 0x92, 0x98, 0x11, 0x84, 0x41 }, + .pll_div_regs = { 0xd1, 0x5c, 0x92, 0x98, 0x11, 0x84, 0x41 }, }, { .pixclk = 47000000, - .pll_div_regs = { 0x62, 0x94, 0x95, 0x82, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x62, 0x94, 0x95, 0x82, 0x80, 0x40 }, }, { .pixclk = 47500000, - .pll_div_regs = { 0x63, 0x96, 0xa1, 0x82, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x63, 0x96, 0xa1, 0x82, 0x80, 0x40 }, }, { .pixclk = 50349650, - .pll_div_regs = { 0x54, 0x7c, 0xc3, 0x8f, 0x80, 0x40 }, - }, { - .pixclk = 50400000, - .pll_div_regs = { 0x54, 0x70, 0x00, 0x00, 0x80, 0x00 }, + .pll_div_regs = { 0xd1, 0x54, 0x7c, 0xc3, 0x8f, 0x80, 0x40 }, }, { .pixclk = 53250000, - .pll_div_regs = { 0x58, 0x72, 0x84, 0x03, 0x82, 0x41 }, + .pll_div_regs = { 0xd1, 0x58, 0x72, 0x84, 0x03, 0x82, 0x41 }, }, { .pixclk = 53500000, - .pll_div_regs = { 0x5a, 0x72, 0x89, 0x88, 0x80, 0x40 }, - }, { - .pixclk = 54000000, - .pll_div_regs = { 0x5a, 0x70, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x5a, 0x72, 0x89, 0x88, 0x80, 0x40 }, + }, { .pixclk = 54054000, - .pll_div_regs = { 0x5a, 0x72, 0xfd, 0x0c, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x5a, 0x72, 0xfd, 0x0c, 0x80, 0x40 }, }, { .pixclk = 59000000, - .pll_div_regs = { 0x62, 0x74, 0x95, 0x08, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x62, 0x74, 0x95, 0x08, 0x80, 0x40 }, }, { .pixclk = 59340659, - .pll_div_regs = { 0x62, 0x74, 0xdb, 0x52, 0x88, 0x47 }, - }, { - .pixclk = 59400000, - .pll_div_regs = { 0x63, 0x70, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x62, 0x74, 0xdb, 0x52, 0x88, 0x47 }, + }, { .pixclk = 61500000, - .pll_div_regs = { 0x66, 0x74, 0x82, 0x01, 0x88, 0x45 }, + .pll_div_regs = { 0xd1, 0x66, 0x74, 0x82, 0x01, 0x88, 0x45 }, }, { .pixclk = 63500000, - .pll_div_regs = { 0x69, 0x74, 0x89, 0x08, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x69, 0x74, 0x89, 0x08, 0x80, 0x40 }, }, { .pixclk = 67500000, - .pll_div_regs = { 0x54, 0x52, 0x87, 0x03, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x54, 0x52, 0x87, 0x03, 0x80, 0x40 }, }, { .pixclk = 70000000, - .pll_div_regs = { 0x58, 0x58, 0x8b, 0x88, 0x80, 0x40 }, - }, { - .pixclk = 72000000, - .pll_div_regs = { 0x5a, 0x50, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x58, 0x58, 0x8b, 0x88, 0x80, 0x40 }, + }, { .pixclk = 72072000, - .pll_div_regs = { 0x5a, 0x52, 0xfd, 0x0c, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x5a, 0x52, 0xfd, 0x0c, 0x80, 0x40 }, }, { .pixclk = 74176000, - .pll_div_regs = { 0x5d, 0x58, 0xdb, 0xA2, 0x88, 0x41 }, + .pll_div_regs = { 0xd1, 0x5d, 0x58, 0xdb, 0xA2, 0x88, 0x41 }, }, { .pixclk = 74250000, - .pll_div_regs = { 0x5c, 0x52, 0x90, 0x0d, 0x84, 0x41 }, + .pll_div_regs = { 0xd1, 0x5c, 0x52, 0x90, 0x0d, 0x84, 0x41 }, }, { .pixclk = 78500000, - .pll_div_regs = { 0x62, 0x54, 0x87, 0x01, 0x80, 0x40 }, - }, { - .pixclk = 80000000, - .pll_div_regs = { 0x64, 0x50, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x62, 0x54, 0x87, 0x01, 0x80, 0x40 }, + }, { .pixclk = 82000000, - .pll_div_regs = { 0x66, 0x54, 0x82, 0x01, 0x88, 0x45 }, + .pll_div_regs = { 0xd1, 0x66, 0x54, 0x82, 0x01, 0x88, 0x45 }, }, { .pixclk = 82500000, - .pll_div_regs = { 0x67, 0x54, 0x88, 0x01, 0x90, 0x49 }, + .pll_div_regs = { 0xd1, 0x67, 0x54, 0x88, 0x01, 0x90, 0x49 }, }, { .pixclk = 89000000, - .pll_div_regs = { 0x70, 0x54, 0x84, 0x83, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x70, 0x54, 0x84, 0x83, 0x80, 0x40 }, }, { .pixclk = 90000000, - .pll_div_regs = { 0x70, 0x54, 0x82, 0x01, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x70, 0x54, 0x82, 0x01, 0x80, 0x40 }, }, { .pixclk = 94000000, - .pll_div_regs = { 0x4e, 0x32, 0xa7, 0x10, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x4e, 0x32, 0xa7, 0x10, 0x80, 0x40 }, }, { .pixclk = 95000000, - .pll_div_regs = { 0x50, 0x31, 0x86, 0x85, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x50, 0x31, 0x86, 0x85, 0x80, 0x40 }, }, { .pixclk = 98901099, - .pll_div_regs = { 0x52, 0x3a, 0xdb, 0x4c, 0x88, 0x47 }, + .pll_div_regs = { 0xd1, 0x52, 0x3a, 0xdb, 0x4c, 0x88, 0x47 }, }, { .pixclk = 99000000, - .pll_div_regs = { 0x52, 0x32, 0x82, 0x01, 0x88, 0x47 }, + .pll_div_regs = { 0xd1, 0x52, 0x32, 0x82, 0x01, 0x88, 0x47 }, }, { .pixclk = 100699300, - .pll_div_regs = { 0x54, 0x3c, 0xc3, 0x8f, 0x80, 0x40 }, - }, { - .pixclk = 100800000, - .pll_div_regs = { 0x54, 0x30, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x54, 0x3c, 0xc3, 0x8f, 0x80, 0x40 }, + }, { .pixclk = 102500000, - .pll_div_regs = { 0x55, 0x32, 0x8c, 0x05, 0x90, 0x4b }, + .pll_div_regs = { 0xd1, 0x55, 0x32, 0x8c, 0x05, 0x90, 0x4b }, }, { .pixclk = 104750000, - .pll_div_regs = { 0x57, 0x32, 0x98, 0x07, 0x90, 0x49 }, + .pll_div_regs = { 0xd1, 0x57, 0x32, 0x98, 0x07, 0x90, 0x49 }, }, { .pixclk = 106500000, - .pll_div_regs = { 0x58, 0x32, 0x84, 0x03, 0x82, 0x41 }, + .pll_div_regs = { 0xd1, 0x58, 0x32, 0x84, 0x03, 0x82, 0x41 }, }, { .pixclk = 107000000, - .pll_div_regs = { 0x5a, 0x32, 0x89, 0x88, 0x80, 0x40 }, - }, { - .pixclk = 108000000, - .pll_div_regs = { 0x5a, 0x30, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x5a, 0x32, 0x89, 0x88, 0x80, 0x40 }, + }, { .pixclk = 108108000, - .pll_div_regs = { 0x5a, 0x32, 0xfd, 0x0c, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x5a, 0x32, 0xfd, 0x0c, 0x80, 0x40 }, }, { .pixclk = 118000000, - .pll_div_regs = { 0x62, 0x34, 0x95, 0x08, 0x80, 0x40 }, - }, { - .pixclk = 118800000, - .pll_div_regs = { 0x63, 0x30, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x62, 0x34, 0x95, 0x08, 0x80, 0x40 }, + }, { .pixclk = 123000000, - .pll_div_regs = { 0x66, 0x34, 0x82, 0x01, 0x88, 0x45 }, + .pll_div_regs = { 0xd1, 0x66, 0x34, 0x82, 0x01, 0x88, 0x45 }, }, { .pixclk = 127000000, - .pll_div_regs = { 0x69, 0x34, 0x89, 0x08, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x69, 0x34, 0x89, 0x08, 0x80, 0x40 }, }, { .pixclk = 135000000, - .pll_div_regs = { 0x70, 0x34, 0x82, 0x01, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x70, 0x34, 0x82, 0x01, 0x80, 0x40 }, }, { .pixclk = 135580000, - .pll_div_regs = { 0x71, 0x39, 0xe9, 0x82, 0x9c, 0x5b }, + .pll_div_regs = { 0xd1, 0x71, 0x39, 0xe9, 0x82, 0x9c, 0x5b }, }, { .pixclk = 137520000, - .pll_div_regs = { 0x72, 0x38, 0x99, 0x10, 0x85, 0x41 }, + .pll_div_regs = { 0xd1, 0x72, 0x38, 0x99, 0x10, 0x85, 0x41 }, }, { .pixclk = 138750000, - .pll_div_regs = { 0x73, 0x35, 0x88, 0x05, 0x90, 0x4d }, + .pll_div_regs = { 0xd1, 0x73, 0x35, 0x88, 0x05, 0x90, 0x4d }, }, { .pixclk = 140000000, - .pll_div_regs = { 0x75, 0x36, 0xa7, 0x90, 0x80, 0x40 }, - }, { - .pixclk = 144000000, - .pll_div_regs = { 0x78, 0x30, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x75, 0x36, 0xa7, 0x90, 0x80, 0x40 }, + }, { .pixclk = 148352000, - .pll_div_regs = { 0x7b, 0x35, 0xdb, 0x39, 0x90, 0x45 }, + .pll_div_regs = { 0xd1, 0x7b, 0x35, 0xdb, 0x39, 0x90, 0x45 }, }, { .pixclk = 148500000, - .pll_div_regs = { 0x7b, 0x35, 0x84, 0x03, 0x90, 0x45 }, + .pll_div_regs = { 0xd1, 0x7b, 0x35, 0x84, 0x03, 0x90, 0x45 }, }, { .pixclk = 154000000, - .pll_div_regs = { 0x40, 0x18, 0x83, 0x01, 0x00, 0x40 }, + .pll_div_regs = { 0xd1, 0x40, 0x18, 0x83, 0x01, 0x00, 0x40 }, }, { .pixclk = 157000000, - .pll_div_regs = { 0x41, 0x11, 0xa7, 0x14, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x41, 0x11, 0xa7, 0x14, 0x80, 0x40 }, }, { .pixclk = 160000000, - .pll_div_regs = { 0x42, 0x12, 0xa1, 0x20, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x42, 0x12, 0xa1, 0x20, 0x80, 0x40 }, }, { .pixclk = 162000000, - .pll_div_regs = { 0x43, 0x18, 0x8b, 0x08, 0x96, 0x55 }, + .pll_div_regs = { 0xd1, 0x43, 0x18, 0x8b, 0x08, 0x96, 0x55 }, }, { .pixclk = 164000000, - .pll_div_regs = { 0x45, 0x11, 0x83, 0x82, 0x90, 0x4b }, + .pll_div_regs = { 0xd1, 0x45, 0x11, 0x83, 0x82, 0x90, 0x4b }, }, { .pixclk = 165000000, - .pll_div_regs = { 0x45, 0x11, 0x84, 0x81, 0x90, 0x4b }, - }, { - .pixclk = 180000000, - .pll_div_regs = { 0x4b, 0x10, 0x00, 0x00, 0x80, 0x00 }, + .pll_div_regs = { 0xd1, 0x45, 0x11, 0x84, 0x81, 0x90, 0x4b }, }, { .pixclk = 185625000, - .pll_div_regs = { 0x4e, 0x12, 0x9a, 0x95, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x4e, 0x12, 0x9a, 0x95, 0x80, 0x40 }, }, { .pixclk = 188000000, - .pll_div_regs = { 0x4e, 0x12, 0xa7, 0x10, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x4e, 0x12, 0xa7, 0x10, 0x80, 0x40 }, }, { .pixclk = 198000000, - .pll_div_regs = { 0x52, 0x12, 0x82, 0x01, 0x88, 0x47 }, + .pll_div_regs = { 0xd1, 0x52, 0x12, 0x82, 0x01, 0x88, 0x47 }, }, { .pixclk = 205000000, - .pll_div_regs = { 0x55, 0x12, 0x8c, 0x05, 0x90, 0x4b }, + .pll_div_regs = { 0xd1, 0x55, 0x12, 0x8c, 0x05, 0x90, 0x4b }, }, { .pixclk = 209500000, - .pll_div_regs = { 0x57, 0x12, 0x98, 0x07, 0x90, 0x49 }, + .pll_div_regs = { 0xd1, 0x57, 0x12, 0x98, 0x07, 0x90, 0x49 }, }, { .pixclk = 213000000, - .pll_div_regs = { 0x58, 0x12, 0x84, 0x03, 0x82, 0x41 }, - }, { - .pixclk = 216000000, - .pll_div_regs = { 0x5a, 0x10, 0x00, 0x00, 0x80, 0x00 }, + .pll_div_regs = { 0xd1, 0x58, 0x12, 0x84, 0x03, 0x82, 0x41 }, }, { .pixclk = 216216000, - .pll_div_regs = { 0x5a, 0x12, 0xfd, 0x0c, 0x80, 0x40 }, - }, { - .pixclk = 237600000, - .pll_div_regs = { 0x63, 0x10, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x5a, 0x12, 0xfd, 0x0c, 0x80, 0x40 }, + }, { .pixclk = 254000000, - .pll_div_regs = { 0x69, 0x14, 0x89, 0x08, 0x80, 0x40 }, + .pll_div_regs = { 0xd1, 0x69, 0x14, 0x89, 0x08, 0x80, 0x40 }, }, { .pixclk = 277500000, - .pll_div_regs = { 0x73, 0x15, 0x88, 0x05, 0x90, 0x4d }, - }, { - .pixclk = 288000000, - .pll_div_regs = { 0x78, 0x10, 0x00, 0x00, 0x80, 0x00 }, - }, { + .pll_div_regs = { 0xd1, 0x73, 0x15, 0x88, 0x05, 0x90, 0x4d }, + }, { .pixclk = 297000000, - .pll_div_regs = { 0x7b, 0x15, 0x84, 0x03, 0x90, 0x45 }, + .pll_div_regs = { 0xd1, 0x7b, 0x15, 0x84, 0x03, 0x90, 0x45 }, }, }; @@ -369,29 +282,30 @@ struct reg_settings { }; static const struct reg_settings common_phy_cfg[] = { - { PHY_REG_00, 0x00 }, { PHY_REG_01, 0xd1 }, - { PHY_REG_08, 0x4f }, { PHY_REG_09, 0x30 }, - { PHY_REG_10, 0x33 }, { PHY_REG_11, 0x65 }, + { PHY_REG(0), 0x00 }, + /* PHY_REG(1-7) pix clk specific */ + { PHY_REG(8), 0x4f }, { PHY_REG(9), 0x30 }, + { PHY_REG(10), 0x33 }, { PHY_REG(11), 0x65 }, /* REG12 pixclk specific */ /* REG13 pixclk specific */ /* REG14 pixclk specific */ - { PHY_REG_15, 0x80 }, { PHY_REG_16, 0x6c }, - { PHY_REG_17, 0xf2 }, { PHY_REG_18, 0x67 }, - { PHY_REG_19, 0x00 }, { PHY_REG_20, 0x10 }, + { PHY_REG(15), 0x80 }, { PHY_REG(16), 0x6c }, + { PHY_REG(17), 0xf2 }, { PHY_REG(18), 0x67 }, + { PHY_REG(19), 0x00 }, { PHY_REG(20), 0x10 }, /* REG21 pixclk specific */ - { PHY_REG_22, 0x30 }, { PHY_REG_23, 0x32 }, - { PHY_REG_24, 0x60 }, { PHY_REG_25, 0x8f }, - { PHY_REG_26, 0x00 }, { PHY_REG_27, 0x00 }, - { PHY_REG_28, 0x08 }, { PHY_REG_29, 0x00 }, - { PHY_REG_30, 0x00 }, { PHY_REG_31, 0x00 }, - { PHY_REG_32, 0x00 }, { PHY_REG_33, 0x80 }, - { PHY_REG_34, 0x00 }, { PHY_REG_35, 0x00 }, - { PHY_REG_36, 0x00 }, { PHY_REG_37, 0x00 }, - { PHY_REG_38, 0x00 }, { PHY_REG_39, 0x00 }, - { PHY_REG_40, 0x00 }, { PHY_REG_41, 0xe0 }, - { PHY_REG_42, 0x83 }, { PHY_REG_43, 0x0f }, - { PHY_REG_44, 0x3E }, { PHY_REG_45, 0xf8 }, - { PHY_REG_46, 0x00 }, { PHY_REG_47, 0x00 } + { PHY_REG(22), 0x30 }, { PHY_REG(23), 0x32 }, + { PHY_REG(24), 0x60 }, { PHY_REG(25), 0x8f }, + { PHY_REG(26), 0x00 }, { PHY_REG(27), 0x00 }, + { PHY_REG(28), 0x08 }, { PHY_REG(29), 0x00 }, + { PHY_REG(30), 0x00 }, { PHY_REG(31), 0x00 }, + { PHY_REG(32), 0x00 }, { PHY_REG(33), 0x80 }, + { PHY_REG(34), 0x00 }, { PHY_REG(35), 0x00 }, + { PHY_REG(36), 0x00 }, { PHY_REG(37), 0x00 }, + { PHY_REG(38), 0x00 }, { PHY_REG(39), 0x00 }, + { PHY_REG(40), 0x00 }, { PHY_REG(41), 0xe0 }, + { PHY_REG(42), 0x83 }, { PHY_REG(43), 0x0f }, + { PHY_REG(44), 0x3E }, { PHY_REG(45), 0xf8 }, + { PHY_REG(46), 0x00 }, { PHY_REG(47), 0x00 } }; struct fsl_samsung_hdmi_phy { @@ -412,40 +326,6 @@ to_fsl_samsung_hdmi_phy(struct clk_hw *hw) } static void -fsl_samsung_hdmi_phy_configure_pixclk(struct fsl_samsung_hdmi_phy *phy, - const struct phy_config *cfg) -{ - u8 div = 0x1; - - switch (cfg->pixclk) { - case 22250000 ... 33750000: - div = 0xf; - break; - case 35000000 ... 40000000: - div = 0xb; - break; - case 43200000 ... 47500000: - div = 0x9; - break; - case 50349650 ... 63500000: - div = 0x7; - break; - case 67500000 ... 90000000: - div = 0x5; - break; - case 94000000 ... 148500000: - div = 0x3; - break; - case 154000000 ... 297000000: - div = 0x1; - break; - } - - writeb(REG21_SEL_TX_CK_INV | FIELD_PREP(REG21_PMS_S_MASK, div), - phy->regs + PHY_REG_21); -} - -static void fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy, const struct phy_config *cfg) { @@ -469,7 +349,7 @@ fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy, break; } - writeb(FIELD_PREP(REG12_CK_DIV_MASK, ilog2(div)), phy->regs + PHY_REG_12); + writeb(FIELD_PREP(REG12_CK_DIV_MASK, ilog2(div)), phy->regs + PHY_REG(12)); /* * Calculation for the frequency lock detector target code (fld_tg_code) @@ -489,11 +369,88 @@ fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy, /* FLD_TOL and FLD_RP_CODE taken from downstream driver */ writeb(FIELD_PREP(REG13_TG_CODE_LOW_MASK, fld_tg_code), - phy->regs + PHY_REG_13); + phy->regs + PHY_REG(13)); writeb(FIELD_PREP(REG14_TOL_MASK, 2) | FIELD_PREP(REG14_RP_CODE_MASK, 2) | FIELD_PREP(REG14_TG_CODE_HIGH_MASK, fld_tg_code >> 8), - phy->regs + PHY_REG_14); + phy->regs + PHY_REG(14)); +} + +static unsigned long fsl_samsung_hdmi_phy_find_pms(unsigned long fout, u8 *p, u16 *m, u8 *s) +{ + unsigned long best_freq = 0; + u32 min_delta = 0xffffffff; + u8 _p, best_p; + u16 _m, best_m; + u8 _s, best_s; + + /* + * Figure 13-78 of the reference manual states the PLL should be TMDS x 5 + * while the TMDS_CLKO should be the PLL / 5. So to calculate the PLL, + * take the pix clock x 5, then return the value of the PLL / 5. + */ + fout *= 5; + + /* The ref manual states the values of 'P' range from 1 to 11 */ + for (_p = 1; _p <= 11; ++_p) { + for (_s = 1; _s <= 16; ++_s) { + u64 tmp; + u32 delta; + + /* s must be one or even */ + if (_s > 1 && (_s & 0x01) == 1) + _s++; + + /* _s cannot be 14 per the TRM */ + if (_s == 14) + continue; + + /* + * TODO: Ref Manual doesn't state the range of _m + * so this should be further refined if possible. + * This range was set based on the original values + * in the lookup table + */ + tmp = (u64)fout * (_p * _s); + do_div(tmp, 24 * MHZ); + _m = tmp; + if (_m < 0x30 || _m > 0x7b) + continue; + + /* + * Rev 2 of the Ref Manual states the + * VCO can range between 750MHz and + * 3GHz. The VCO is assumed to be + * Fvco = (M * f_ref) / P, + * where f_ref is 24MHz. + */ + tmp = (u64)_m * 24 * MHZ; + do_div(tmp, _p); + if (tmp < 750 * MHZ || + tmp > 3000 * MHZ) + continue; + + /* Final frequency after post-divider */ + do_div(tmp, _s); + + delta = abs(fout - tmp); + if (delta < min_delta) { + best_p = _p; + best_s = _s; + best_m = _m; + min_delta = delta; + best_freq = tmp; + } + } + } + + if (best_freq) { + *p = best_p; + *m = best_m; + *s = best_s; + } + + return best_freq / 5; } static int fsl_samsung_hdmi_phy_configure(struct fsl_samsung_hdmi_phy *phy, @@ -503,22 +460,25 @@ static int fsl_samsung_hdmi_phy_configure(struct fsl_samsung_hdmi_phy *phy, u8 val; /* HDMI PHY init */ - writeb(REG33_FIX_DA, phy->regs + PHY_REG_33); + writeb(REG33_FIX_DA, phy->regs + PHY_REG(33)); /* common PHY registers */ for (i = 0; i < ARRAY_SIZE(common_phy_cfg); i++) writeb(common_phy_cfg[i].val, phy->regs + common_phy_cfg[i].reg); - /* set individual PLL registers PHY_REG2 ... PHY_REG7 */ + /* set individual PLL registers PHY_REG1 ... PHY_REG7 */ for (i = 0; i < PHY_PLL_DIV_REGS_NUM; i++) - writeb(cfg->pll_div_regs[i], phy->regs + PHY_REG_02 + i * 4); + writeb(cfg->pll_div_regs[i], phy->regs + PHY_REG(1) + i * 4); + + /* High nibble of PHY_REG3 and low nibble of PHY_REG21 both contain 'S' */ + writeb(REG21_SEL_TX_CK_INV | FIELD_PREP(REG21_PMS_S_MASK, + cfg->pll_div_regs[2] >> 4), phy->regs + PHY_REG(21)); - fsl_samsung_hdmi_phy_configure_pixclk(phy, cfg); fsl_samsung_hdmi_phy_configure_pll_lock_det(phy, cfg); - writeb(REG33_FIX_DA | REG33_MODE_SET_DONE, phy->regs + PHY_REG_33); + writeb(REG33_FIX_DA | REG33_MODE_SET_DONE, phy->regs + PHY_REG(33)); - ret = readb_poll_timeout(phy->regs + PHY_REG_34, val, + ret = readb_poll_timeout(phy->regs + PHY_REG(34), val, val & REG34_PLL_LOCK, 50, 20000); if (ret) dev_err(phy->dev, "PLL failed to lock\n"); @@ -537,34 +497,120 @@ static unsigned long phy_clk_recalc_rate(struct clk_hw *hw, return phy->cur_cfg->pixclk; } -static long phy_clk_round_rate(struct clk_hw *hw, - unsigned long rate, unsigned long *parent_rate) +/* Helper function to lookup the available fractional-divider rate */ +static const struct phy_config *fsl_samsung_hdmi_phy_lookup_rate(unsigned long rate) { int i; + /* Search the lookup table */ for (i = ARRAY_SIZE(phy_pll_cfg) - 1; i >= 0; i--) if (phy_pll_cfg[i].pixclk <= rate) - return phy_pll_cfg[i].pixclk; + break; - return -EINVAL; + return &phy_pll_cfg[i]; +} + +static void fsl_samsung_hdmi_calculate_phy(struct phy_config *cal_phy, unsigned long rate, + u8 p, u16 m, u8 s) +{ + cal_phy->pixclk = rate; + cal_phy->pll_div_regs[0] = FIELD_PREP(REG01_PMS_P_MASK, p); + cal_phy->pll_div_regs[1] = m; + cal_phy->pll_div_regs[2] = FIELD_PREP(REG03_PMS_S_MASK, s-1); + /* pll_div_regs 3-6 are fixed and pre-defined already */ +} + +static u32 fsl_samsung_hdmi_phy_get_closest_rate(unsigned long rate, + u32 int_div_clk, u32 frac_div_clk) +{ + /* Calculate the absolute value of the differences and return whichever is closest */ + if (abs((long)rate - (long)int_div_clk) < abs((long)(rate - (long)frac_div_clk))) + return int_div_clk; + + return frac_div_clk; +} + +static long phy_clk_round_rate(struct clk_hw *hw, + unsigned long rate, unsigned long *parent_rate) +{ + const struct phy_config *fract_div_phy; + u32 int_div_clk; + u16 m; + u8 p, s; + + /* If the clock is out of range return error instead of searching */ + if (rate > 297000000 || rate < 22250000) + return -EINVAL; + + /* Search the fractional divider lookup table */ + fract_div_phy = fsl_samsung_hdmi_phy_lookup_rate(rate); + + /* If the rate is an exact match, return that value */ + if (rate == fract_div_phy->pixclk) + return fract_div_phy->pixclk; + + /* If the exact match isn't found, calculate the integer divider */ + int_div_clk = fsl_samsung_hdmi_phy_find_pms(rate, &p, &m, &s); + + /* If the int_div_clk rate is an exact match, return that value */ + if (int_div_clk == rate) + return int_div_clk; + + /* If neither rate is an exact match, use the value from the LUT */ + return fract_div_phy->pixclk; +} + +static int phy_use_fract_div(struct fsl_samsung_hdmi_phy *phy, const struct phy_config *fract_div_phy) +{ + phy->cur_cfg = fract_div_phy; + dev_dbg(phy->dev, "fsl_samsung_hdmi_phy: using fractional divider rate = %u\n", + phy->cur_cfg->pixclk); + return fsl_samsung_hdmi_phy_configure(phy, phy->cur_cfg); +} + +static int phy_use_integer_div(struct fsl_samsung_hdmi_phy *phy, + const struct phy_config *int_div_clk) +{ + phy->cur_cfg = &calculated_phy_pll_cfg; + dev_dbg(phy->dev, "fsl_samsung_hdmi_phy: integer divider rate = %u\n", + phy->cur_cfg->pixclk); + return fsl_samsung_hdmi_phy_configure(phy, phy->cur_cfg); } static int phy_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct fsl_samsung_hdmi_phy *phy = to_fsl_samsung_hdmi_phy(hw); - int i; + const struct phy_config *fract_div_phy; + u32 int_div_clk; + u16 m; + u8 p, s; - for (i = ARRAY_SIZE(phy_pll_cfg) - 1; i >= 0; i--) - if (phy_pll_cfg[i].pixclk <= rate) - break; + /* Search the fractional divider lookup table */ + fract_div_phy = fsl_samsung_hdmi_phy_lookup_rate(rate); - if (i < 0) - return -EINVAL; + /* If the rate is an exact match, use that value */ + if (fract_div_phy->pixclk == rate) + return phy_use_fract_div(phy, fract_div_phy); - phy->cur_cfg = &phy_pll_cfg[i]; + /* + * If the rate from the fractional divider is not exact, check the integer divider, + * and use it if that value is an exact match. + */ + int_div_clk = fsl_samsung_hdmi_phy_find_pms(rate, &p, &m, &s); + fsl_samsung_hdmi_calculate_phy(&calculated_phy_pll_cfg, int_div_clk, p, m, s); + if (int_div_clk == rate) + return phy_use_integer_div(phy, &calculated_phy_pll_cfg); - return fsl_samsung_hdmi_phy_configure(phy, phy->cur_cfg); + /* + * Compare the difference between the integer clock and the fractional clock against + * the desired clock and which whichever is closest. + */ + if (fsl_samsung_hdmi_phy_get_closest_rate(rate, int_div_clk, + fract_div_phy->pixclk) == fract_div_phy->pixclk) + return phy_use_fract_div(phy, fract_div_phy); + else + return phy_use_integer_div(phy, &calculated_phy_pll_cfg); } static const struct clk_ops phy_clk_ops = { @@ -703,8 +749,8 @@ static const struct of_device_id fsl_samsung_hdmi_phy_of_match[] = { MODULE_DEVICE_TABLE(of, fsl_samsung_hdmi_phy_of_match); static struct platform_driver fsl_samsung_hdmi_phy_driver = { - .probe = fsl_samsung_hdmi_phy_probe, - .remove_new = fsl_samsung_hdmi_phy_remove, + .probe = fsl_samsung_hdmi_phy_probe, + .remove = fsl_samsung_hdmi_phy_remove, .driver = { .name = "fsl-samsung-hdmi-phy", .of_match_table = fsl_samsung_hdmi_phy_of_match, diff --git a/drivers/phy/intel/phy-intel-lgm-combo.c b/drivers/phy/intel/phy-intel-lgm-combo.c index f8e3054a9e59..9ee3cf61cdd0 100644 --- a/drivers/phy/intel/phy-intel-lgm-combo.c +++ b/drivers/phy/intel/phy-intel-lgm-combo.c @@ -605,7 +605,7 @@ static const struct of_device_id of_intel_cbphy_match[] = { static struct platform_driver intel_cbphy_driver = { .probe = intel_cbphy_probe, - .remove_new = intel_cbphy_remove, + .remove = intel_cbphy_remove, .driver = { .name = "intel-combo-phy", .of_match_table = of_intel_cbphy_match, diff --git a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c index 4922a5f3327d..59903f86b13f 100644 --- a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c +++ b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c @@ -62,6 +62,8 @@ #define SQ_AMP_CAL_MASK GENMASK(2, 0) #define SQ_AMP_CAL_VAL 1 #define SQ_AMP_CAL_EN BIT(3) +#define UTMI_DIG_CTRL1_REG 0x20 +#define SWAP_DPDM BIT(15) #define UTMI_CTRL_STATUS0_REG 0x24 #define SUSPENDM BIT(22) #define TEST_SEL BIT(25) @@ -99,11 +101,13 @@ struct mvebu_cp110_utmi { * @priv: PHY driver data * @id: PHY port ID * @dr_mode: PHY connection: USB_DR_MODE_HOST or USB_DR_MODE_PERIPHERAL + * @swap_dx: whether to swap d+/d- signals */ struct mvebu_cp110_utmi_port { struct mvebu_cp110_utmi *priv; u32 id; enum usb_dr_mode dr_mode; + bool swap_dx; }; static void mvebu_cp110_utmi_port_setup(struct mvebu_cp110_utmi_port *port) @@ -159,6 +163,13 @@ static void mvebu_cp110_utmi_port_setup(struct mvebu_cp110_utmi_port *port) reg &= ~(VDAT_MASK | VSRC_MASK); reg |= (VDAT_VAL << VDAT_OFFSET) | (VSRC_VAL << VSRC_OFFSET); writel(reg, PORT_REGS(port) + UTMI_CHGDTC_CTRL_REG); + + /* Swap D+/D- */ + reg = readl(PORT_REGS(port) + UTMI_DIG_CTRL1_REG); + reg &= ~(SWAP_DPDM); + if (port->swap_dx) + reg |= SWAP_DPDM; + writel(reg, PORT_REGS(port) + UTMI_DIG_CTRL1_REG); } static int mvebu_cp110_utmi_phy_power_off(struct phy *phy) @@ -286,6 +297,7 @@ static int mvebu_cp110_utmi_phy_probe(struct platform_device *pdev) struct phy_provider *provider; struct device_node *child; u32 usb_devices = 0; + u32 swap_dx = 0; utmi = devm_kzalloc(dev, sizeof(*utmi), GFP_KERNEL); if (!utmi) @@ -345,6 +357,10 @@ static int mvebu_cp110_utmi_phy_probe(struct platform_device *pdev) } } + of_property_for_each_u32(dev->of_node, "swap-dx-lanes", swap_dx) + if (swap_dx == port_id) + port->swap_dx = 1; + /* Retrieve PHY capabilities */ utmi->ops = &mvebu_cp110_utmi_phy_ops; diff --git a/drivers/phy/microchip/sparx5_serdes.c b/drivers/phy/microchip/sparx5_serdes.c index 7cb85029fab3..320cf5b50a8c 100644 --- a/drivers/phy/microchip/sparx5_serdes.c +++ b/drivers/phy/microchip/sparx5_serdes.c @@ -21,23 +21,33 @@ #include "sparx5_serdes.h" -#define SPX5_CMU_MAX 14 - #define SPX5_SERDES_10G_START 13 #define SPX5_SERDES_25G_START 25 #define SPX5_SERDES_6G10G_CNT SPX5_SERDES_25G_START +#define LAN969X_SERDES_10G_CNT 10 + /* Optimal power settings from GUC */ #define SPX5_SERDES_QUIET_MODE_VAL 0x01ef4e0c -enum sparx5_10g28cmu_mode { - SPX5_SD10G28_CMU_MAIN = 0, - SPX5_SD10G28_CMU_AUX1 = 1, - SPX5_SD10G28_CMU_AUX2 = 3, - SPX5_SD10G28_CMU_NONE = 4, - SPX5_SD10G28_CMU_MAX, +/* Register target sizes */ +const unsigned int sparx5_serdes_tsize[TSIZE_LAST] = { + [TC_SD10G_LANE] = 12, + [TC_SD_CMU] = 14, + [TC_SD_CMU_CFG] = 14, + [TC_SD_LANE] = 25, +}; + +const unsigned int lan969x_serdes_tsize[TSIZE_LAST] = { + [TC_SD10G_LANE] = 10, + [TC_SD_CMU] = 6, + [TC_SD_CMU_CFG] = 6, + [TC_SD_LANE] = 10, }; +/* Pointer to the register target size table */ +const unsigned int *tsize; + enum sparx5_sd25g28_mode_preset_type { SPX5_SD25G28_MODE_PRESET_25000, SPX5_SD25G28_MODE_PRESET_10000, @@ -1095,13 +1105,31 @@ static int sparx5_serdes_cmu_get(enum sparx5_10g28cmu_mode mode, int sd_index) return sparx5_serdes_cmu_map[mode][sd_index]; } +/* Map of 6G/10G serdes mode and index to CMU index. */ +static const int +lan969x_serdes_cmu_map[SPX5_SD10G28_CMU_MAX][LAN969X_SERDES_10G_CNT] = { + [SPX5_SD10G28_CMU_MAIN] = { 2, 2, 2, 2, 2, + 2, 2, 2, 5, 5 }, + [SPX5_SD10G28_CMU_AUX1] = { 0, 0, 3, 3, 3, + 3, 3, 3, 3, 3 }, + [SPX5_SD10G28_CMU_AUX2] = { 1, 1, 1, 1, 4, + 4, 4, 4, 4, 4 }, + [SPX5_SD10G28_CMU_NONE] = { 1, 1, 1, 1, 4, + 4, 4, 4, 4, 4 }, +}; + +static int lan969x_serdes_cmu_get(enum sparx5_10g28cmu_mode mode, int sd_index) +{ + return lan969x_serdes_cmu_map[mode][sd_index]; +} + static void sparx5_serdes_cmu_power_off(struct sparx5_serdes_private *priv) { void __iomem *cmu_inst, *cmu_cfg_inst; int i; /* Power down each CMU */ - for (i = 0; i < SPX5_CMU_MAX; i++) { + for (i = 0; i < priv->data->consts.cmu_max; i++) { cmu_inst = sdx5_inst_get(priv, TARGET_SD_CMU, i); cmu_cfg_inst = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, i); @@ -1650,7 +1678,7 @@ static int sparx5_sd10g28_apply_params(struct sparx5_serdes_macro *macro, if (params->skip_cmu_cfg) return 0; - cmu_idx = sparx5_serdes_cmu_get(params->cmu_sel, lane_index); + cmu_idx = priv->data->ops.serdes_cmu_get(params->cmu_sel, macro->sidx); err = sparx5_cmu_cfg(priv, cmu_idx); if (err) return err; @@ -2183,6 +2211,10 @@ static int sparx5_serdes_clock_config(struct sparx5_serdes_macro *macro) { struct sparx5_serdes_private *priv = macro->priv; + /* Clock is auto-detected in 100Base-FX mode on lan969x */ + if (priv->data->type == SPX5_TARGET_LAN969X) + return 0; + if (macro->serdesmode == SPX5_SD_MODE_100FX) { u32 freq = priv->coreclock == 250000000 ? 2 : priv->coreclock == 500000000 ? 1 : 0; @@ -2297,10 +2329,12 @@ static int sparx5_serdes_set_speed(struct phy *phy, int speed) { struct sparx5_serdes_macro *macro = phy_get_drvdata(phy); - if (macro->sidx < SPX5_SERDES_10G_START && speed > SPEED_5000) - return -EINVAL; - if (macro->sidx < SPX5_SERDES_25G_START && speed > SPEED_10000) - return -EINVAL; + if (macro->priv->data->type == SPX5_TARGET_SPARX5) { + if (macro->sidx < SPX5_SERDES_10G_START && speed > SPEED_5000) + return -EINVAL; + if (macro->sidx < SPX5_SERDES_25G_START && speed > SPEED_10000) + return -EINVAL; + } if (speed != macro->speed) { macro->speed = speed; if (macro->serdesmode != SPX5_SD_MODE_NONE) @@ -2337,11 +2371,14 @@ static int sparx5_serdes_validate(struct phy *phy, enum phy_mode mode, if (macro->speed == 0) return -EINVAL; - if (macro->sidx < SPX5_SERDES_10G_START && macro->speed > SPEED_5000) - return -EINVAL; - if (macro->sidx < SPX5_SERDES_25G_START && macro->speed > SPEED_10000) - return -EINVAL; - + if (macro->priv->data->type == SPX5_TARGET_SPARX5) { + if (macro->sidx < SPX5_SERDES_10G_START && + macro->speed > SPEED_5000) + return -EINVAL; + if (macro->sidx < SPX5_SERDES_25G_START && + macro->speed > SPEED_10000) + return -EINVAL; + } switch (submode) { case PHY_INTERFACE_MODE_1000BASEX: if (macro->speed != SPEED_100 && /* This is for 100BASE-FX */ @@ -2375,6 +2412,26 @@ static const struct phy_ops sparx5_serdes_ops = { .owner = THIS_MODULE, }; +static void sparx5_serdes_type_set(struct sparx5_serdes_macro *macro, int sidx) +{ + if (sidx < SPX5_SERDES_10G_START) { + macro->serdestype = SPX5_SDT_6G; + macro->stpidx = macro->sidx; + } else if (sidx < SPX5_SERDES_25G_START) { + macro->serdestype = SPX5_SDT_10G; + macro->stpidx = macro->sidx - SPX5_SERDES_10G_START; + } else { + macro->serdestype = SPX5_SDT_25G; + macro->stpidx = macro->sidx - SPX5_SERDES_25G_START; + } +} + +static void lan969x_serdes_type_set(struct sparx5_serdes_macro *macro, int sidx) +{ + macro->serdestype = SPX5_SDT_10G; + macro->stpidx = macro->sidx; +} + static int sparx5_phy_create(struct sparx5_serdes_private *priv, int idx, struct phy **phy) { @@ -2391,16 +2448,8 @@ static int sparx5_phy_create(struct sparx5_serdes_private *priv, macro->sidx = idx; macro->priv = priv; macro->speed = SPEED_UNKNOWN; - if (idx < SPX5_SERDES_10G_START) { - macro->serdestype = SPX5_SDT_6G; - macro->stpidx = macro->sidx; - } else if (idx < SPX5_SERDES_25G_START) { - macro->serdestype = SPX5_SDT_10G; - macro->stpidx = macro->sidx - SPX5_SERDES_10G_START; - } else { - macro->serdestype = SPX5_SDT_25G; - macro->stpidx = macro->sidx - SPX5_SERDES_25G_START; - } + + priv->data->ops.serdes_type_set(macro, idx); phy_set_drvdata(*phy, macro); @@ -2507,6 +2556,71 @@ static struct sparx5_serdes_io_resource sparx5_serdes_iomap[] = { { TARGET_SD_LANE_25G + 7, 0x5c8000 }, /* 0x610dd0000: sd_lane_25g_32 */ }; +static const struct sparx5_serdes_io_resource lan969x_serdes_iomap[] = { + { TARGET_SD_CMU, 0x0 }, /* 0xe3410000 */ + { TARGET_SD_CMU + 1, 0x8000 }, /* 0xe3418000 */ + { TARGET_SD_CMU + 2, 0x10000 }, /* 0xe3420000 */ + { TARGET_SD_CMU + 3, 0x18000 }, /* 0xe3428000 */ + { TARGET_SD_CMU + 4, 0x20000 }, /* 0xe3430000 */ + { TARGET_SD_CMU + 5, 0x28000 }, /* 0xe3438000 */ + { TARGET_SD_CMU_CFG, 0x30000 }, /* 0xe3440000 */ + { TARGET_SD_CMU_CFG + 1, 0x38000 }, /* 0xe3448000 */ + { TARGET_SD_CMU_CFG + 2, 0x40000 }, /* 0xe3450000 */ + { TARGET_SD_CMU_CFG + 3, 0x48000 }, /* 0xe3458000 */ + { TARGET_SD_CMU_CFG + 4, 0x50000 }, /* 0xe3460000 */ + { TARGET_SD_CMU_CFG + 5, 0x58000 }, /* 0xe3468000 */ + { TARGET_SD10G_LANE, 0x60000 }, /* 0xe3470000 */ + { TARGET_SD10G_LANE + 1, 0x68000 }, /* 0xe3478000 */ + { TARGET_SD10G_LANE + 2, 0x70000 }, /* 0xe3480000 */ + { TARGET_SD10G_LANE + 3, 0x78000 }, /* 0xe3488000 */ + { TARGET_SD10G_LANE + 4, 0x80000 }, /* 0xe3490000 */ + { TARGET_SD10G_LANE + 5, 0x88000 }, /* 0xe3498000 */ + { TARGET_SD10G_LANE + 6, 0x90000 }, /* 0xe34a0000 */ + { TARGET_SD10G_LANE + 7, 0x98000 }, /* 0xe34a8000 */ + { TARGET_SD10G_LANE + 8, 0xa0000 }, /* 0xe34b0000 */ + { TARGET_SD10G_LANE + 9, 0xa8000 }, /* 0xe34b8000 */ + { TARGET_SD_LANE, 0x100000 }, /* 0xe3510000 */ + { TARGET_SD_LANE + 1, 0x108000 }, /* 0xe3518000 */ + { TARGET_SD_LANE + 2, 0x110000 }, /* 0xe3520000 */ + { TARGET_SD_LANE + 3, 0x118000 }, /* 0xe3528000 */ + { TARGET_SD_LANE + 4, 0x120000 }, /* 0xe3530000 */ + { TARGET_SD_LANE + 5, 0x128000 }, /* 0xe3538000 */ + { TARGET_SD_LANE + 6, 0x130000 }, /* 0xe3540000 */ + { TARGET_SD_LANE + 7, 0x138000 }, /* 0xe3548000 */ + { TARGET_SD_LANE + 8, 0x140000 }, /* 0xe3550000 */ + { TARGET_SD_LANE + 9, 0x148000 }, /* 0xe3558000 */ +}; + +static const struct sparx5_serdes_match_data sparx5_desc = { + .type = SPX5_TARGET_SPARX5, + .iomap = sparx5_serdes_iomap, + .iomap_size = ARRAY_SIZE(sparx5_serdes_iomap), + .tsize = sparx5_serdes_tsize, + .consts = { + .sd_max = 33, + .cmu_max = 14, + }, + .ops = { + .serdes_type_set = &sparx5_serdes_type_set, + .serdes_cmu_get = &sparx5_serdes_cmu_get, + }, +}; + +static const struct sparx5_serdes_match_data lan969x_desc = { + .type = SPX5_TARGET_LAN969X, + .iomap = lan969x_serdes_iomap, + .iomap_size = ARRAY_SIZE(lan969x_serdes_iomap), + .tsize = lan969x_serdes_tsize, + .consts = { + .sd_max = 10, + .cmu_max = 6, + }, + .ops = { + .serdes_type_set = &lan969x_serdes_type_set, + .serdes_cmu_get = &lan969x_serdes_cmu_get, + } +}; + /* Client lookup function, uses serdes index */ static struct phy *sparx5_serdes_xlate(struct device *dev, const struct of_phandle_args *args) @@ -2521,7 +2635,7 @@ static struct phy *sparx5_serdes_xlate(struct device *dev, sidx = args->args[0]; /* Check validity: ERR_PTR(-ENODEV) if not valid */ - for (idx = 0; idx < SPX5_SERDES_MAX; idx++) { + for (idx = 0; idx < priv->data->consts.sd_max; idx++) { struct sparx5_serdes_macro *macro = phy_get_drvdata(priv->phys[idx]); @@ -2555,6 +2669,12 @@ static int sparx5_serdes_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); priv->dev = &pdev->dev; + priv->data = device_get_match_data(priv->dev); + if (!priv->data) + return -EINVAL; + + tsize = priv->data->tsize; + /* Get coreclock */ clk = devm_clk_get(priv->dev, NULL); if (IS_ERR(clk)) { @@ -2579,19 +2699,21 @@ static int sparx5_serdes_probe(struct platform_device *pdev) iores->name); return -ENOMEM; } - for (idx = 0; idx < ARRAY_SIZE(sparx5_serdes_iomap); idx++) { - struct sparx5_serdes_io_resource *iomap = &sparx5_serdes_iomap[idx]; + for (idx = 0; idx < priv->data->iomap_size; idx++) { + const struct sparx5_serdes_io_resource *iomap = + &priv->data->iomap[idx]; priv->regs[iomap->id] = iomem + iomap->offset; } - for (idx = 0; idx < SPX5_SERDES_MAX; idx++) { + for (idx = 0; idx < priv->data->consts.sd_max; idx++) { err = sparx5_phy_create(priv, idx, &priv->phys[idx]); if (err) return err; } - /* Power down all CMUs by default */ - sparx5_serdes_cmu_power_off(priv); + /* Power down all CMU's by default */ + if (priv->data->type == SPX5_TARGET_SPARX5) + sparx5_serdes_cmu_power_off(priv); provider = devm_of_phy_provider_register(priv->dev, sparx5_serdes_xlate); @@ -2599,7 +2721,8 @@ static int sparx5_serdes_probe(struct platform_device *pdev) } static const struct of_device_id sparx5_serdes_match[] = { - { .compatible = "microchip,sparx5-serdes" }, + { .compatible = "microchip,sparx5-serdes", .data = &sparx5_desc }, + { .compatible = "microchip,lan9691-serdes", .data = &lan969x_desc }, { } }; MODULE_DEVICE_TABLE(of, sparx5_serdes_match); diff --git a/drivers/phy/microchip/sparx5_serdes.h b/drivers/phy/microchip/sparx5_serdes.h index 13f94a29225a..d7093d0b09c0 100644 --- a/drivers/phy/microchip/sparx5_serdes.h +++ b/drivers/phy/microchip/sparx5_serdes.h @@ -26,11 +26,18 @@ enum sparx5_serdes_mode { SPX5_SD_MODE_SFI, }; -struct sparx5_serdes_private { - struct device *dev; - void __iomem *regs[NUM_TARGETS]; - struct phy *phys[SPX5_SERDES_MAX]; - unsigned long coreclock; +enum sparx5_10g28cmu_mode { + SPX5_SD10G28_CMU_MAIN = 0, + SPX5_SD10G28_CMU_AUX1 = 1, + SPX5_SD10G28_CMU_AUX2 = 3, + SPX5_SD10G28_CMU_NONE = 4, + SPX5_SD10G28_CMU_MAX, +}; + +enum sparx5_target { + SPX5_TARGET_SPARX5, + SPX5_TARGET_LAN969X, + }; struct sparx5_serdes_macro { @@ -44,6 +51,33 @@ struct sparx5_serdes_macro { enum phy_media media; }; +struct sparx5_serdes_consts { + int sd_max; + int cmu_max; +}; + +struct sparx5_serdes_ops { + void (*serdes_type_set)(struct sparx5_serdes_macro *macro, int sidx); + int (*serdes_cmu_get)(enum sparx5_10g28cmu_mode mode, int sd_index); +}; + +struct sparx5_serdes_match_data { + enum sparx5_target type; + const struct sparx5_serdes_consts consts; + const struct sparx5_serdes_ops ops; + const struct sparx5_serdes_io_resource *iomap; + int iomap_size; + const unsigned int *tsize; +}; + +struct sparx5_serdes_private { + struct device *dev; + void __iomem *regs[NUM_TARGETS]; + struct phy *phys[SPX5_SERDES_MAX]; + unsigned long coreclock; + const struct sparx5_serdes_match_data *data; +}; + /* Read, Write and modify registers content. * The register definition macros start at the id */ diff --git a/drivers/phy/microchip/sparx5_serdes_regs.h b/drivers/phy/microchip/sparx5_serdes_regs.h index d0543fd3dc94..11c4fdc593fa 100644 --- a/drivers/phy/microchip/sparx5_serdes_regs.h +++ b/drivers/phy/microchip/sparx5_serdes_regs.h @@ -1,11 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0+ * Microchip Sparx5 SerDes driver * - * Copyright (c) 2020 Microchip Technology Inc. + * Copyright (c) 2024 Microchip Technology Inc. */ -/* This file is autogenerated by cml-utils 2020-11-16 13:11:27 +0100. - * Commit ID: 13bdf073131d8bf40c54901df6988ae4e9c8f29f +/* This file is autogenerated by cml-utils 2023-04-13 15:02:00 +0200. + * Commit ID: 5ac560288d46048f872ecdb8add53717f1efc0e1 */ #ifndef _SPARX5_SERDES_REGS_H_ @@ -26,10 +26,25 @@ enum sparx5_serdes_target { NUM_TARGETS = 332 }; +enum sparx5_serdes_tsize_enum { + TC_SD10G_LANE, + TC_SD_CMU, + TC_SD_CMU_CFG, + TC_SD_LANE, + TSIZE_LAST, +}; + +/* sparx5_serdes.c */ +extern const unsigned int *tsize; + +#define TSIZE(o) tsize[o] + #define __REG(...) __VA_ARGS__ -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_01 */ -#define SD10G_LANE_LANE_01(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 4, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_01 */ +#define SD10G_LANE_LANE_01(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 4, 0, \ + 1, 4) #define SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0 GENMASK(2, 0) #define SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0_SET(x)\ @@ -49,8 +64,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_01_CFG_RXDET_STR_GET(x)\ FIELD_GET(SD10G_LANE_LANE_01_CFG_RXDET_STR, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_02 */ -#define SD10G_LANE_LANE_02(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 8, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_02 */ +#define SD10G_LANE_LANE_02(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 8, 0, \ + 1, 4) #define SD10G_LANE_LANE_02_CFG_EN_ADV BIT(0) #define SD10G_LANE_LANE_02_CFG_EN_ADV_SET(x)\ @@ -82,8 +99,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_03 */ -#define SD10G_LANE_LANE_03(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 12, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_03 */ +#define SD10G_LANE_LANE_03(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 12, 0, \ + 1, 4) #define SD10G_LANE_LANE_03_CFG_TAP_MAIN BIT(0) #define SD10G_LANE_LANE_03_CFG_TAP_MAIN_SET(x)\ @@ -91,8 +110,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_03_CFG_TAP_MAIN_GET(x)\ FIELD_GET(SD10G_LANE_LANE_03_CFG_TAP_MAIN, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_04 */ -#define SD10G_LANE_LANE_04(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 16, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_04 */ +#define SD10G_LANE_LANE_04(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 16, 0, \ + 1, 4) #define SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0 GENMASK(4, 0) #define SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0_SET(x)\ @@ -100,8 +121,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_06 */ -#define SD10G_LANE_LANE_06(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 24, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_06 */ +#define SD10G_LANE_LANE_06(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 24, 0, \ + 1, 4) #define SD10G_LANE_LANE_06_CFG_PD_DRIVER BIT(0) #define SD10G_LANE_LANE_06_CFG_PD_DRIVER_SET(x)\ @@ -139,8 +162,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_06_CFG_EN_PREEMPH_GET(x)\ FIELD_GET(SD10G_LANE_LANE_06_CFG_EN_PREEMPH, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0B */ -#define SD10G_LANE_LANE_0B(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 44, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0B */ +#define SD10G_LANE_LANE_0B(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 44, 0, \ + 1, 4) #define SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0 GENMASK(3, 0) #define SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0_SET(x)\ @@ -172,8 +197,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_SQ_GET(x)\ FIELD_GET(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_SQ, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0C */ -#define SD10G_LANE_LANE_0C(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 48, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0C */ +#define SD10G_LANE_LANE_0C(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 48, 0, \ + 1, 4) #define SD10G_LANE_LANE_0C_CFG_OSCAL_AFE BIT(0) #define SD10G_LANE_LANE_0C_CFG_OSCAL_AFE_SET(x)\ @@ -223,8 +250,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_0C_CFG_RX_PCIE_GEN12_GET(x)\ FIELD_GET(SD10G_LANE_LANE_0C_CFG_RX_PCIE_GEN12, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0D */ -#define SD10G_LANE_LANE_0D(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 52, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0D */ +#define SD10G_LANE_LANE_0D(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 52, 0, \ + 1, 4) #define SD10G_LANE_LANE_0D_CFG_CTLE_M_THR_1_0 GENMASK(1, 0) #define SD10G_LANE_LANE_0D_CFG_CTLE_M_THR_1_0_SET(x)\ @@ -238,8 +267,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_0D_CFG_EQR_BYP_GET(x)\ FIELD_GET(SD10G_LANE_LANE_0D_CFG_EQR_BYP, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0E */ -#define SD10G_LANE_LANE_0E(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 56, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0E */ +#define SD10G_LANE_LANE_0E(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 56, 0, \ + 1, 4) #define SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0 GENMASK(3, 0) #define SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0_SET(x)\ @@ -265,8 +296,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN_GET(x)\ FIELD_GET(SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0F */ -#define SD10G_LANE_LANE_0F(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 60, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_0F */ +#define SD10G_LANE_LANE_0F(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 60, 0, \ + 1, 4) #define SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0 GENMASK(7, 0) #define SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0_SET(x)\ @@ -274,8 +307,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_13 */ -#define SD10G_LANE_LANE_13(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 76, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_13 */ +#define SD10G_LANE_LANE_13(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 76, 0, \ + 1, 4) #define SD10G_LANE_LANE_13_CFG_DCDR_PD BIT(0) #define SD10G_LANE_LANE_13_CFG_DCDR_PD_SET(x)\ @@ -295,8 +330,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_13_CFG_CDRCK_EN_GET(x)\ FIELD_GET(SD10G_LANE_LANE_13_CFG_CDRCK_EN, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_14 */ -#define SD10G_LANE_LANE_14(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 80, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_14 */ +#define SD10G_LANE_LANE_14(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 80, 0, \ + 1, 4) #define SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0 GENMASK(7, 0) #define SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0_SET(x)\ @@ -304,8 +341,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_15 */ -#define SD10G_LANE_LANE_15(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 84, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_15 */ +#define SD10G_LANE_LANE_15(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 84, 0, \ + 1, 4) #define SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8 GENMASK(7, 0) #define SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8_SET(x)\ @@ -313,8 +352,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8_GET(x)\ FIELD_GET(SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_16 */ -#define SD10G_LANE_LANE_16(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 88, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_16 */ +#define SD10G_LANE_LANE_16(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 88, 0, \ + 1, 4) #define SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16 GENMASK(7, 0) #define SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16_SET(x)\ @@ -322,8 +363,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16_GET(x)\ FIELD_GET(SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_1A */ -#define SD10G_LANE_LANE_1A(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 104, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_1A */ +#define SD10G_LANE_LANE_1A(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 104, 0,\ + 1, 4) #define SD10G_LANE_LANE_1A_CFG_PI_R_SCAN_EN BIT(0) #define SD10G_LANE_LANE_1A_CFG_PI_R_SCAN_EN_SET(x)\ @@ -355,8 +398,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_22 */ -#define SD10G_LANE_LANE_22(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 136, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_22 */ +#define SD10G_LANE_LANE_22(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 136, 0,\ + 1, 4) #define SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1 GENMASK(4, 0) #define SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1_SET(x)\ @@ -364,8 +409,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1_GET(x)\ FIELD_GET(SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_23 */ -#define SD10G_LANE_LANE_23(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 140, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_23 */ +#define SD10G_LANE_LANE_23(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 140, 0,\ + 1, 4) #define SD10G_LANE_LANE_23_CFG_DFE_PD BIT(0) #define SD10G_LANE_LANE_23_CFG_DFE_PD_SET(x)\ @@ -397,8 +444,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_23_CFG_DFEDIG_M_2_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_23_CFG_DFEDIG_M_2_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_24 */ -#define SD10G_LANE_LANE_24(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 144, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_24 */ +#define SD10G_LANE_LANE_24(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 144, 0,\ + 1, 4) #define SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0 GENMASK(3, 0) #define SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0_SET(x)\ @@ -412,8 +461,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_24_CFG_PI_BW_GEN2_3_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_24_CFG_PI_BW_GEN2_3_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_26 */ -#define SD10G_LANE_LANE_26(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 152, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_26 */ +#define SD10G_LANE_LANE_26(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 152, 0,\ + 1, 4) #define SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0 GENMASK(7, 0) #define SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0_SET(x)\ @@ -421,8 +472,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_2F */ -#define SD10G_LANE_LANE_2F(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 188, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_2F */ +#define SD10G_LANE_LANE_2F(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 188, 0,\ + 1, 4) #define SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0 GENMASK(2, 0) #define SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0_SET(x)\ @@ -436,8 +489,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_30 */ -#define SD10G_LANE_LANE_30(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 192, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_30 */ +#define SD10G_LANE_LANE_30(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 192, 0,\ + 1, 4) #define SD10G_LANE_LANE_30_CFG_SUMMER_EN BIT(0) #define SD10G_LANE_LANE_30_CFG_SUMMER_EN_SET(x)\ @@ -451,8 +506,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_31 */ -#define SD10G_LANE_LANE_31(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 196, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_31 */ +#define SD10G_LANE_LANE_31(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 196, 0,\ + 1, 4) #define SD10G_LANE_LANE_31_CFG_PI_RSTN BIT(0) #define SD10G_LANE_LANE_31_CFG_PI_RSTN_SET(x)\ @@ -490,8 +547,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_31_CFG_R50_EN_GET(x)\ FIELD_GET(SD10G_LANE_LANE_31_CFG_R50_EN, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_32 */ -#define SD10G_LANE_LANE_32(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 200, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_32 */ +#define SD10G_LANE_LANE_32(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 200, 0,\ + 1, 4) #define SD10G_LANE_LANE_32_CFG_ITX_IPCLK_BASE_1_0 GENMASK(1, 0) #define SD10G_LANE_LANE_32_CFG_ITX_IPCLK_BASE_1_0_SET(x)\ @@ -505,8 +564,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_33 */ -#define SD10G_LANE_LANE_33(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 204, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_33 */ +#define SD10G_LANE_LANE_33(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 204, 0,\ + 1, 4) #define SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0 GENMASK(2, 0) #define SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0_SET(x)\ @@ -520,8 +581,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_35 */ -#define SD10G_LANE_LANE_35(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 212, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_35 */ +#define SD10G_LANE_LANE_35(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 212, 0,\ + 1, 4) #define SD10G_LANE_LANE_35_CFG_TXRATE_1_0 GENMASK(1, 0) #define SD10G_LANE_LANE_35_CFG_TXRATE_1_0_SET(x)\ @@ -535,8 +598,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_35_CFG_RXRATE_1_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_35_CFG_RXRATE_1_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_36 */ -#define SD10G_LANE_LANE_36(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 216, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_36 */ +#define SD10G_LANE_LANE_36(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 216, 0,\ + 1, 4) #define SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0 GENMASK(1, 0) #define SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0_SET(x)\ @@ -568,8 +633,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_36_CFG_PRBS_SETB_GET(x)\ FIELD_GET(SD10G_LANE_LANE_36_CFG_PRBS_SETB, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_37 */ -#define SD10G_LANE_LANE_37(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 220, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_37 */ +#define SD10G_LANE_LANE_37(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 220, 0,\ + 1, 4) #define SD10G_LANE_LANE_37_CFG_RXDET_COMP_PD BIT(0) #define SD10G_LANE_LANE_37_CFG_RXDET_COMP_PD_SET(x)\ @@ -595,8 +662,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_39 */ -#define SD10G_LANE_LANE_39(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 228, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_39 */ +#define SD10G_LANE_LANE_39(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 228, 0,\ + 1, 4) #define SD10G_LANE_LANE_39_CFG_RXFILT_Y_2_0 GENMASK(2, 0) #define SD10G_LANE_LANE_39_CFG_RXFILT_Y_2_0_SET(x)\ @@ -610,8 +679,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_39_CFG_RX_SSC_LH_GET(x)\ FIELD_GET(SD10G_LANE_LANE_39_CFG_RX_SSC_LH, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_3A */ -#define SD10G_LANE_LANE_3A(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 232, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_3A */ +#define SD10G_LANE_LANE_3A(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 232, 0,\ + 1, 4) #define SD10G_LANE_LANE_3A_CFG_MP_MIN_3_0 GENMASK(3, 0) #define SD10G_LANE_LANE_3A_CFG_MP_MIN_3_0_SET(x)\ @@ -625,8 +696,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_3C */ -#define SD10G_LANE_LANE_3C(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 240, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_3C */ +#define SD10G_LANE_LANE_3C(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 240, 0,\ + 1, 4) #define SD10G_LANE_LANE_3C_CFG_DIS_ACC BIT(0) #define SD10G_LANE_LANE_3C_CFG_DIS_ACC_SET(x)\ @@ -640,8 +713,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER_GET(x)\ FIELD_GET(SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_40 */ -#define SD10G_LANE_LANE_40(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 256, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_40 */ +#define SD10G_LANE_LANE_40(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 256, 0,\ + 1, 4) #define SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0 GENMASK(7, 0) #define SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0_SET(x)\ @@ -649,8 +724,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_41 */ -#define SD10G_LANE_LANE_41(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 260, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_41 */ +#define SD10G_LANE_LANE_41(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 260, 0,\ + 1, 4) #define SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8 GENMASK(7, 0) #define SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8_SET(x)\ @@ -658,8 +735,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8_GET(x)\ FIELD_GET(SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8, x) -/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_42 */ -#define SD10G_LANE_LANE_42(t) __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 264, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_0:LANE_42 */ +#define SD10G_LANE_LANE_42(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 0, 0, 1, 288, 264, 0,\ + 1, 4) #define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0 GENMASK(2, 0) #define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0_SET(x)\ @@ -673,8 +752,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN2_2_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_42_CFG_CDR_KF_GEN2_2_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_1:LANE_48 */ -#define SD10G_LANE_LANE_48(t) __REG(TARGET_SD10G_LANE, t, 12, 288, 0, 1, 40, 0, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_1:LANE_48 */ +#define SD10G_LANE_LANE_48(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 288, 0, 1, 40, 0, 0, \ + 1, 4) #define SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0 GENMASK(3, 0) #define SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0_SET(x)\ @@ -694,8 +775,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_48_CFG_CLK_ENQ_GET(x)\ FIELD_GET(SD10G_LANE_LANE_48_CFG_CLK_ENQ, x) -/* SD10G_LANE_TARGET:LANE_GRP_1:LANE_50 */ -#define SD10G_LANE_LANE_50(t) __REG(TARGET_SD10G_LANE, t, 12, 288, 0, 1, 40, 32, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_1:LANE_50 */ +#define SD10G_LANE_LANE_50(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 288, 0, 1, 40, 32, 0,\ + 1, 4) #define SD10G_LANE_LANE_50_CFG_SSC_PI_STEP_1_0 GENMASK(1, 0) #define SD10G_LANE_LANE_50_CFG_SSC_PI_STEP_1_0_SET(x)\ @@ -727,8 +810,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_50_CFG_JT_EN_GET(x)\ FIELD_GET(SD10G_LANE_LANE_50_CFG_JT_EN, x) -/* SD10G_LANE_TARGET:LANE_GRP_2:LANE_52 */ -#define SD10G_LANE_LANE_52(t) __REG(TARGET_SD10G_LANE, t, 12, 328, 0, 1, 24, 0, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_2:LANE_52 */ +#define SD10G_LANE_LANE_52(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 328, 0, 1, 24, 0, 0, \ + 1, 4) #define SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0 GENMASK(5, 0) #define SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0_SET(x)\ @@ -736,8 +821,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_4:LANE_83 */ -#define SD10G_LANE_LANE_83(t) __REG(TARGET_SD10G_LANE, t, 12, 464, 0, 1, 112, 60, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_4:LANE_83 */ +#define SD10G_LANE_LANE_83(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 464, 0, 1, 112, 60, \ + 0, 1, 4) #define SD10G_LANE_LANE_83_R_TX_BIT_REVERSE BIT(0) #define SD10G_LANE_LANE_83_R_TX_BIT_REVERSE_SET(x)\ @@ -781,8 +868,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_83_R_CTLE_RSTN_GET(x)\ FIELD_GET(SD10G_LANE_LANE_83_R_CTLE_RSTN, x) -/* SD10G_LANE_TARGET:LANE_GRP_5:LANE_93 */ -#define SD10G_LANE_LANE_93(t) __REG(TARGET_SD10G_LANE, t, 12, 576, 0, 1, 64, 12, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_5:LANE_93 */ +#define SD10G_LANE_LANE_93(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 576, 0, 1, 64, 12, 0,\ + 1, 4) #define SD10G_LANE_LANE_93_R_RXEI_FIFO_RST_EN BIT(0) #define SD10G_LANE_LANE_93_R_RXEI_FIFO_RST_EN_SET(x)\ @@ -832,8 +921,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_93_R_RX_PCIE_GEN12_FROM_HWT_GET(x)\ FIELD_GET(SD10G_LANE_LANE_93_R_RX_PCIE_GEN12_FROM_HWT, x) -/* SD10G_LANE_TARGET:LANE_GRP_5:LANE_94 */ -#define SD10G_LANE_LANE_94(t) __REG(TARGET_SD10G_LANE, t, 12, 576, 0, 1, 64, 16, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_5:LANE_94 */ +#define SD10G_LANE_LANE_94(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 576, 0, 1, 64, 16, 0,\ + 1, 4) #define SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0 GENMASK(2, 0) #define SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0_SET(x)\ @@ -865,8 +956,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_94_R_SWING_REG_GET(x)\ FIELD_GET(SD10G_LANE_LANE_94_R_SWING_REG, x) -/* SD10G_LANE_TARGET:LANE_GRP_5:LANE_9E */ -#define SD10G_LANE_LANE_9E(t) __REG(TARGET_SD10G_LANE, t, 12, 576, 0, 1, 64, 56, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_5:LANE_9E */ +#define SD10G_LANE_LANE_9E(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 576, 0, 1, 64, 56, 0,\ + 1, 4) #define SD10G_LANE_LANE_9E_R_RXEQ_REG BIT(0) #define SD10G_LANE_LANE_9E_R_RXEQ_REG_SET(x)\ @@ -886,8 +979,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN_GET(x)\ FIELD_GET(SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN, x) -/* SD10G_LANE_TARGET:LANE_GRP_6:LANE_A1 */ -#define SD10G_LANE_LANE_A1(t) __REG(TARGET_SD10G_LANE, t, 12, 640, 0, 1, 128, 4, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_6:LANE_A1 */ +#define SD10G_LANE_LANE_A1(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 640, 0, 1, 128, 4, 0,\ + 1, 4) #define SD10G_LANE_LANE_A1_R_PMA_TXCK_DIV_SEL_1_0 GENMASK(1, 0) #define SD10G_LANE_LANE_A1_R_PMA_TXCK_DIV_SEL_1_0_SET(x)\ @@ -919,8 +1014,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_A1_R_PCLK_GATING_GET(x)\ FIELD_GET(SD10G_LANE_LANE_A1_R_PCLK_GATING, x) -/* SD10G_LANE_TARGET:LANE_GRP_6:LANE_A2 */ -#define SD10G_LANE_LANE_A2(t) __REG(TARGET_SD10G_LANE, t, 12, 640, 0, 1, 128, 8, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_6:LANE_A2 */ +#define SD10G_LANE_LANE_A2(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 640, 0, 1, 128, 8, 0,\ + 1, 4) #define SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0 GENMASK(4, 0) #define SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0_SET(x)\ @@ -928,8 +1025,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0_GET(x)\ FIELD_GET(SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0, x) -/* SD10G_LANE_TARGET:LANE_GRP_8:LANE_DF */ -#define SD10G_LANE_LANE_DF(t) __REG(TARGET_SD10G_LANE, t, 12, 832, 0, 1, 84, 60, 0, 1, 4) +/* SD10G_LANE_TARGET:LANE_GRP_8:LANE_DF */ +#define SD10G_LANE_LANE_DF(t) \ + __REG(TARGET_SD10G_LANE, t, TSIZE(TC_SD10G_LANE), 832, 0, 1, 84, 60, 0,\ + 1, 4) #define SD10G_LANE_LANE_DF_LOL_UDL BIT(0) #define SD10G_LANE_LANE_DF_LOL_UDL_SET(x)\ @@ -955,8 +1054,10 @@ enum sparx5_serdes_target { #define SD10G_LANE_LANE_DF_SQUELCH_GET(x)\ FIELD_GET(SD10G_LANE_LANE_DF_SQUELCH, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_09 */ -#define SD25G_LANE_CMU_09(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 36, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_09 */ +#define SD25G_LANE_CMU_09(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 36, 0, 1, 4) #define SD25G_LANE_CMU_09_CFG_REFCK_TERM_EN BIT(0) #define SD25G_LANE_CMU_09_CFG_REFCK_TERM_EN_SET(x)\ @@ -988,8 +1089,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_09_CFG_PLL_TP_SEL_1_0_GET(x)\ FIELD_GET(SD25G_LANE_CMU_09_CFG_PLL_TP_SEL_1_0, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_0B */ -#define SD25G_LANE_CMU_0B(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 44, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_0B */ +#define SD25G_LANE_CMU_0B(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 44, 0, 1, 4) #define SD25G_LANE_CMU_0B_CFG_FORCE_RX_FILT BIT(0) #define SD25G_LANE_CMU_0B_CFG_FORCE_RX_FILT_SET(x)\ @@ -1039,8 +1142,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_GET(x)\ FIELD_GET(SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_0C */ -#define SD25G_LANE_CMU_0C(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 48, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_0C */ +#define SD25G_LANE_CMU_0C(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 48, 0, 1, 4) #define SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET BIT(0) #define SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET_SET(x)\ @@ -1072,8 +1177,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0_GET(x)\ FIELD_GET(SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_0D */ -#define SD25G_LANE_CMU_0D(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 52, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_0D */ +#define SD25G_LANE_CMU_0D(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 52, 0, 1, 4) #define SD25G_LANE_CMU_0D_CFG_CK_TREE_PD BIT(0) #define SD25G_LANE_CMU_0D_CFG_CK_TREE_PD_SET(x)\ @@ -1105,8 +1212,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0_GET(x)\ FIELD_GET(SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_0E */ -#define SD25G_LANE_CMU_0E(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 56, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_0E */ +#define SD25G_LANE_CMU_0E(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 56, 0, 1, 4) #define SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0 GENMASK(3, 0) #define SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0_SET(x)\ @@ -1120,8 +1229,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_0E_CFG_PMAA_CENTR_CK_PD_GET(x)\ FIELD_GET(SD25G_LANE_CMU_0E_CFG_PMAA_CENTR_CK_PD, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_13 */ -#define SD25G_LANE_CMU_13(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 76, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_13 */ +#define SD25G_LANE_CMU_13(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 76, 0, 1, 4) #define SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0 GENMASK(3, 0) #define SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0_SET(x)\ @@ -1135,8 +1246,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_13_CFG_JT_EN_GET(x)\ FIELD_GET(SD25G_LANE_CMU_13_CFG_JT_EN, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_18 */ -#define SD25G_LANE_CMU_18(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 96, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_18 */ +#define SD25G_LANE_CMU_18(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 96, 0, 1, 4) #define SD25G_LANE_CMU_18_R_PLL_RSTN BIT(0) #define SD25G_LANE_CMU_18_R_PLL_RSTN_SET(x)\ @@ -1162,8 +1275,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_18_R_PLL_TP_SEL_1_0_GET(x)\ FIELD_GET(SD25G_LANE_CMU_18_R_PLL_TP_SEL_1_0, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_19 */ -#define SD25G_LANE_CMU_19(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 100, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_19 */ +#define SD25G_LANE_CMU_19(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 100, 0, 1, 4) #define SD25G_LANE_CMU_19_R_CK_RESETB BIT(0) #define SD25G_LANE_CMU_19_R_CK_RESETB_SET(x)\ @@ -1177,8 +1292,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_19_R_PLL_DLOL_EN_GET(x)\ FIELD_GET(SD25G_LANE_CMU_19_R_PLL_DLOL_EN, x) -/* SD25G_TARGET:CMU_GRP_0:CMU_1A */ -#define SD25G_LANE_CMU_1A(t) __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 104, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_0:CMU_1A */ +#define SD25G_LANE_CMU_1A(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 104, 0, 1, 4) #define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0 GENMASK(2, 0) #define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0_SET(x)\ @@ -1204,8 +1321,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_1A_R_REG_MANUAL_GET(x)\ FIELD_GET(SD25G_LANE_CMU_1A_R_REG_MANUAL, x) -/* SD25G_TARGET:CMU_GRP_1:CMU_2A */ -#define SD25G_LANE_CMU_2A(t) __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 36, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_1:CMU_2A */ +#define SD25G_LANE_CMU_2A(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 36, 0, 1, 4) #define SD25G_LANE_CMU_2A_R_DBG_SEL_1_0 GENMASK(1, 0) #define SD25G_LANE_CMU_2A_R_DBG_SEL_1_0_SET(x)\ @@ -1225,8 +1344,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS_GET(x)\ FIELD_GET(SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS, x) -/* SD25G_TARGET:CMU_GRP_1:CMU_30 */ -#define SD25G_LANE_CMU_30(t) __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 60, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_1:CMU_30 */ +#define SD25G_LANE_CMU_30(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 60, 0, 1, 4) #define SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0 GENMASK(2, 0) #define SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0_SET(x)\ @@ -1240,8 +1361,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0_GET(x)\ FIELD_GET(SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0, x) -/* SD25G_TARGET:CMU_GRP_1:CMU_31 */ -#define SD25G_LANE_CMU_31(t) __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 64, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_1:CMU_31 */ +#define SD25G_LANE_CMU_31(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 64, 0, 1, 4) #define SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0 GENMASK(7, 0) #define SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0_SET(x)\ @@ -1249,8 +1372,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0_GET(x)\ FIELD_GET(SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0, x) -/* SD25G_TARGET:CMU_GRP_2:CMU_40 */ -#define SD25G_LANE_CMU_40(t) __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_2:CMU_40 */ +#define SD25G_LANE_CMU_40(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 0, 0, 1, 4) #define SD25G_LANE_CMU_40_L0_CFG_CKSKEW_CTRL BIT(0) #define SD25G_LANE_CMU_40_L0_CFG_CKSKEW_CTRL_SET(x)\ @@ -1288,8 +1413,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_40_L0_CFG_TXCAL_RST_GET(x)\ FIELD_GET(SD25G_LANE_CMU_40_L0_CFG_TXCAL_RST, x) -/* SD25G_TARGET:CMU_GRP_2:CMU_45 */ -#define SD25G_LANE_CMU_45(t) __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 20, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_2:CMU_45 */ +#define SD25G_LANE_CMU_45(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 20, 0, 1, 4) #define SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0 GENMASK(7, 0) #define SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0_SET(x)\ @@ -1297,8 +1424,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0_GET(x)\ FIELD_GET(SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0, x) -/* SD25G_TARGET:CMU_GRP_2:CMU_46 */ -#define SD25G_LANE_CMU_46(t) __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 24, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_2:CMU_46 */ +#define SD25G_LANE_CMU_46(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 24, 0, 1, 4) #define SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8 GENMASK(7, 0) #define SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8_SET(x)\ @@ -1306,8 +1435,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8_GET(x)\ FIELD_GET(SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8, x) -/* SD25G_TARGET:CMU_GRP_3:CMU_C0 */ -#define SD25G_LANE_CMU_C0(t) __REG(TARGET_SD25G_LANE, t, 8, 768, 0, 1, 252, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_3:CMU_C0 */ +#define SD25G_LANE_CMU_C0(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 768, 0, 1, 252, 0, 0, 1, 4) #define SD25G_LANE_CMU_C0_READ_VCO_CTUNE_3_0 GENMASK(3, 0) #define SD25G_LANE_CMU_C0_READ_VCO_CTUNE_3_0_SET(x)\ @@ -1321,8 +1452,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_C0_PLL_LOL_UDL_GET(x)\ FIELD_GET(SD25G_LANE_CMU_C0_PLL_LOL_UDL, x) -/* SD25G_TARGET:CMU_GRP_4:CMU_FF */ -#define SD25G_LANE_CMU_FF(t) __REG(TARGET_SD25G_LANE, t, 8, 1020, 0, 1, 4, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:CMU_GRP_4:CMU_FF */ +#define SD25G_LANE_CMU_FF(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1020, 0, 1, 4, 0, 0, 1, 4) #define SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX GENMASK(7, 0) #define SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(x)\ @@ -1330,8 +1463,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_GET(x)\ FIELD_GET(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_00 */ -#define SD25G_LANE_LANE_00(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_00 */ +#define SD25G_LANE_LANE_00(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 0, 0, 1, 4) #define SD25G_LANE_LANE_00_LN_CFG_ITX_VC_DRIVER_3_0 GENMASK(3, 0) #define SD25G_LANE_LANE_00_LN_CFG_ITX_VC_DRIVER_3_0_SET(x)\ @@ -1345,8 +1480,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_01 */ -#define SD25G_LANE_LANE_01(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 4, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_01 */ +#define SD25G_LANE_LANE_01(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 4, 0, 1, 4) #define SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0 GENMASK(2, 0) #define SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0_SET(x)\ @@ -1360,8 +1497,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_03 */ -#define SD25G_LANE_LANE_03(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 12, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_03 */ +#define SD25G_LANE_LANE_03(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 12, 0, 1, 4) #define SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0 GENMASK(4, 0) #define SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0_SET(x)\ @@ -1369,8 +1508,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_04 */ -#define SD25G_LANE_LANE_04(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 16, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_04 */ +#define SD25G_LANE_LANE_04(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 16, 0, 1, 4) #define SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN BIT(0) #define SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN_SET(x)\ @@ -1408,8 +1549,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_04_LN_CFG_TAP_MAIN_GET(x)\ FIELD_GET(SD25G_LANE_LANE_04_LN_CFG_TAP_MAIN, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_05 */ -#define SD25G_LANE_LANE_05(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 20, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_05 */ +#define SD25G_LANE_LANE_05(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 20, 0, 1, 4) #define SD25G_LANE_LANE_05_LN_CFG_TAP_DLY2_3_0 GENMASK(3, 0) #define SD25G_LANE_LANE_05_LN_CFG_TAP_DLY2_3_0_SET(x)\ @@ -1423,8 +1566,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_05_LN_CFG_BW_1_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_05_LN_CFG_BW_1_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_06 */ -#define SD25G_LANE_LANE_06(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 24, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_06 */ +#define SD25G_LANE_LANE_06(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 24, 0, 1, 4) #define SD25G_LANE_LANE_06_LN_CFG_EN_MAIN BIT(0) #define SD25G_LANE_LANE_06_LN_CFG_EN_MAIN_SET(x)\ @@ -1438,8 +1583,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_07 */ -#define SD25G_LANE_LANE_07(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 28, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_07 */ +#define SD25G_LANE_LANE_07(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 28, 0, 1, 4) #define SD25G_LANE_LANE_07_LN_CFG_EN_ADV BIT(0) #define SD25G_LANE_LANE_07_LN_CFG_EN_ADV_SET(x)\ @@ -1459,8 +1606,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_07_LN_CFG_EN_DLY_GET(x)\ FIELD_GET(SD25G_LANE_LANE_07_LN_CFG_EN_DLY, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_09 */ -#define SD25G_LANE_LANE_09(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 36, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_09 */ +#define SD25G_LANE_LANE_09(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 36, 0, 1, 4) #define SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0 GENMASK(3, 0) #define SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0_SET(x)\ @@ -1468,8 +1617,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_0A */ -#define SD25G_LANE_LANE_0A(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 40, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_0A */ +#define SD25G_LANE_LANE_0A(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 40, 0, 1, 4) #define SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0 GENMASK(5, 0) #define SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0_SET(x)\ @@ -1477,8 +1628,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_0B */ -#define SD25G_LANE_LANE_0B(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 44, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_0B */ +#define SD25G_LANE_LANE_0B(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 44, 0, 1, 4) #define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN BIT(0) #define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN_SET(x)\ @@ -1498,8 +1651,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_0B_LN_CFG_QUAD_MAN_1_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_0B_LN_CFG_QUAD_MAN_1_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_0C */ -#define SD25G_LANE_LANE_0C(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 48, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_0C */ +#define SD25G_LANE_LANE_0C(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 48, 0, 1, 4) #define SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0 GENMASK(2, 0) #define SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0_SET(x)\ @@ -1519,8 +1674,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_0C_LN_CFG_RXTERM_PD_GET(x)\ FIELD_GET(SD25G_LANE_LANE_0C_LN_CFG_RXTERM_PD, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_0D */ -#define SD25G_LANE_LANE_0D(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 52, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_0D */ +#define SD25G_LANE_LANE_0D(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 52, 0, 1, 4) #define SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0 GENMASK(2, 0) #define SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0_SET(x)\ @@ -1552,8 +1709,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN_GET(x)\ FIELD_GET(SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_0E */ -#define SD25G_LANE_LANE_0E(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 56, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_0E */ +#define SD25G_LANE_LANE_0E(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 56, 0, 1, 4) #define SD25G_LANE_LANE_0E_LN_CFG_ISCAN_EN BIT(0) #define SD25G_LANE_LANE_0E_LN_CFG_ISCAN_EN_SET(x)\ @@ -1579,8 +1738,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_0F */ -#define SD25G_LANE_LANE_0F(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 60, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_0F */ +#define SD25G_LANE_LANE_0F(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 60, 0, 1, 4) #define SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1 GENMASK(4, 0) #define SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1_SET(x)\ @@ -1588,8 +1749,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1_GET(x)\ FIELD_GET(SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_18 */ -#define SD25G_LANE_LANE_18(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 96, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_18 */ +#define SD25G_LANE_LANE_18(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 96, 0, 1, 4) #define SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN BIT(0) #define SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN_SET(x)\ @@ -1621,8 +1784,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_19 */ -#define SD25G_LANE_LANE_19(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 100, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_19 */ +#define SD25G_LANE_LANE_19(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 100, 0, 1, 4) #define SD25G_LANE_LANE_19_LN_CFG_DCDR_PD BIT(0) #define SD25G_LANE_LANE_19_LN_CFG_DCDR_PD_SET(x)\ @@ -1672,8 +1837,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_19_LN_CFG_PD_CTLE_GET(x)\ FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_PD_CTLE, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_1A */ -#define SD25G_LANE_LANE_1A(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 104, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_1A */ +#define SD25G_LANE_LANE_1A(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 104, 0, 1, 4) #define SD25G_LANE_LANE_1A_LN_CFG_CTLE_TP_EN BIT(0) #define SD25G_LANE_LANE_1A_LN_CFG_CTLE_TP_EN_SET(x)\ @@ -1687,8 +1854,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_1B */ -#define SD25G_LANE_LANE_1B(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 108, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_1B */ +#define SD25G_LANE_LANE_1B(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 108, 0, 1, 4) #define SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0 GENMASK(7, 0) #define SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0_SET(x)\ @@ -1696,8 +1865,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_1C */ -#define SD25G_LANE_LANE_1C(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 112, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_1C */ +#define SD25G_LANE_LANE_1C(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 112, 0, 1, 4) #define SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN BIT(0) #define SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN_SET(x)\ @@ -1723,8 +1894,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_1D */ -#define SD25G_LANE_LANE_1D(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 116, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_1D */ +#define SD25G_LANE_LANE_1D(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 116, 0, 1, 4) #define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_EXT_OVR BIT(0) #define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_EXT_OVR_SET(x)\ @@ -1774,8 +1947,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_1D_LN_CFG_PI_HOLD_GET(x)\ FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_PI_HOLD, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_1E */ -#define SD25G_LANE_LANE_1E(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 120, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_1E */ +#define SD25G_LANE_LANE_1E(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 120, 0, 1, 4) #define SD25G_LANE_LANE_1E_LN_CFG_PI_STEPS_1_0 GENMASK(1, 0) #define SD25G_LANE_LANE_1E_LN_CFG_PI_STEPS_1_0_SET(x)\ @@ -1807,8 +1982,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_1E_LN_CFG_PMAD_CK_PD_GET(x)\ FIELD_GET(SD25G_LANE_LANE_1E_LN_CFG_PMAD_CK_PD, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_21 */ -#define SD25G_LANE_LANE_21(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 132, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_21 */ +#define SD25G_LANE_LANE_21(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 132, 0, 1, 4) #define SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0 GENMASK(4, 0) #define SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0_SET(x)\ @@ -1816,8 +1993,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_22 */ -#define SD25G_LANE_LANE_22(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 136, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_22 */ +#define SD25G_LANE_LANE_22(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 136, 0, 1, 4) #define SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0 GENMASK(3, 0) #define SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0_SET(x)\ @@ -1825,8 +2004,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_25 */ -#define SD25G_LANE_LANE_25(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 148, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_25 */ +#define SD25G_LANE_LANE_25(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 148, 0, 1, 4) #define SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0 GENMASK(6, 0) #define SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0_SET(x)\ @@ -1834,8 +2015,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_26 */ -#define SD25G_LANE_LANE_26(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 152, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_26 */ +#define SD25G_LANE_LANE_26(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 152, 0, 1, 4) #define SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0 GENMASK(6, 0) #define SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0_SET(x)\ @@ -1843,8 +2026,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_28 */ -#define SD25G_LANE_LANE_28(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 160, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_28 */ +#define SD25G_LANE_LANE_28(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 160, 0, 1, 4) #define SD25G_LANE_LANE_28_LN_CFG_ISCAN_MODE_EN BIT(0) #define SD25G_LANE_LANE_28_LN_CFG_ISCAN_MODE_EN_SET(x)\ @@ -1870,8 +2055,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_2B */ -#define SD25G_LANE_LANE_2B(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 172, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_2B */ +#define SD25G_LANE_LANE_2B(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 172, 0, 1, 4) #define SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0 GENMASK(3, 0) #define SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0_SET(x)\ @@ -1891,8 +2078,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_2B_LN_CFG_RSTN_TXDUPU_GET(x)\ FIELD_GET(SD25G_LANE_LANE_2B_LN_CFG_RSTN_TXDUPU, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_2C */ -#define SD25G_LANE_LANE_2C(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 176, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_2C */ +#define SD25G_LANE_LANE_2C(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 176, 0, 1, 4) #define SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0 GENMASK(2, 0) #define SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0_SET(x)\ @@ -1906,8 +2095,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER_GET(x)\ FIELD_GET(SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_2D */ -#define SD25G_LANE_LANE_2D(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 180, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_2D */ +#define SD25G_LANE_LANE_2D(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 180, 0, 1, 4) #define SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0 GENMASK(2, 0) #define SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0_SET(x)\ @@ -1921,8 +2112,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_2D_LN_CFG_SAT_CNTSEL_2_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_2D_LN_CFG_SAT_CNTSEL_2_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_2E */ -#define SD25G_LANE_LANE_2E(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 184, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_2E */ +#define SD25G_LANE_LANE_2E(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 184, 0, 1, 4) #define SD25G_LANE_LANE_2E_LN_CFG_EN_FAST_ISCAN BIT(0) #define SD25G_LANE_LANE_2E_LN_CFG_EN_FAST_ISCAN_SET(x)\ @@ -1972,8 +2165,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN_GET(x)\ FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_40 */ -#define SD25G_LANE_LANE_40(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 256, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_40 */ +#define SD25G_LANE_LANE_40(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 256, 0, 1, 4) #define SD25G_LANE_LANE_40_LN_R_TX_BIT_REVERSE BIT(0) #define SD25G_LANE_LANE_40_LN_R_TX_BIT_REVERSE_SET(x)\ @@ -2017,8 +2212,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_40_LN_R_CTLE_RSTN_GET(x)\ FIELD_GET(SD25G_LANE_LANE_40_LN_R_CTLE_RSTN, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_42 */ -#define SD25G_LANE_LANE_42(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 264, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_42 */ +#define SD25G_LANE_LANE_42(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 264, 0, 1, 4) #define SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0 GENMASK(7, 0) #define SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0_SET(x)\ @@ -2026,8 +2223,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_43 */ -#define SD25G_LANE_LANE_43(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 268, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_43 */ +#define SD25G_LANE_LANE_43(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 268, 0, 1, 4) #define SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8 GENMASK(7, 0) #define SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8_SET(x)\ @@ -2035,8 +2234,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8_GET(x)\ FIELD_GET(SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_44 */ -#define SD25G_LANE_LANE_44(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 272, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_44 */ +#define SD25G_LANE_LANE_44(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 272, 0, 1, 4) #define SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0 GENMASK(7, 0) #define SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0_SET(x)\ @@ -2044,8 +2245,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0_GET(x)\ FIELD_GET(SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0, x) -/* SD25G_TARGET:LANE_GRP_0:LANE_45 */ -#define SD25G_LANE_LANE_45(t) __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 276, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_0:LANE_45 */ +#define SD25G_LANE_LANE_45(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 276, 0, 1, 4) #define SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8 GENMASK(7, 0) #define SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8_SET(x)\ @@ -2053,8 +2256,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8_GET(x)\ FIELD_GET(SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8, x) -/* SD25G_TARGET:LANE_GRP_1:LANE_DE */ -#define SD25G_LANE_LANE_DE(t) __REG(TARGET_SD25G_LANE, t, 8, 1792, 0, 1, 128, 120, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_TARGET:LANE_GRP_1:LANE_DE */ +#define SD25G_LANE_LANE_DE(t) \ + __REG(TARGET_SD25G_LANE, t, 8, 1792, 0, 1, 128, 120, 0, 1, 4) #define SD25G_LANE_LANE_DE_LN_LOL_UDL BIT(0) #define SD25G_LANE_LANE_DE_LN_LOL_UDL_SET(x)\ @@ -2080,8 +2285,10 @@ enum sparx5_serdes_target { #define SD25G_LANE_LANE_DE_LN_PMA_RXEI_GET(x)\ FIELD_GET(SD25G_LANE_LANE_DE_LN_PMA_RXEI, x) -/* SD10G_LANE_TARGET:LANE_GRP_8:LANE_DF */ -#define SD6G_LANE_LANE_DF(t) __REG(TARGET_SD6G_LANE, t, 13, 832, 0, 1, 84, 60, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD10G_LANE_TARGET:LANE_GRP_8:LANE_DF */ +#define SD6G_LANE_LANE_DF(t) \ + __REG(TARGET_SD6G_LANE, t, 13, 832, 0, 1, 84, 60, 0, 1, 4) #define SD6G_LANE_LANE_DF_LOL_UDL BIT(0) #define SD6G_LANE_LANE_DF_LOL_UDL_SET(x)\ @@ -2107,8 +2314,9 @@ enum sparx5_serdes_target { #define SD6G_LANE_LANE_DF_SQUELCH_GET(x)\ FIELD_GET(SD6G_LANE_LANE_DF_SQUELCH, x) -/* SD10G_CMU_TARGET:CMU_GRP_0:CMU_00 */ -#define SD_CMU_CMU_00(t) __REG(TARGET_SD_CMU, t, 14, 0, 0, 1, 20, 0, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_0:CMU_00 */ +#define SD_CMU_CMU_00(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 0, 0, 1, 20, 0, 0, 1, 4) #define SD_CMU_CMU_00_R_HWT_SIMULATION_MODE BIT(0) #define SD_CMU_CMU_00_R_HWT_SIMULATION_MODE_SET(x)\ @@ -2134,8 +2342,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0_GET(x)\ FIELD_GET(SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0, x) -/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_05 */ -#define SD_CMU_CMU_05(t) __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 0, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_05 */ +#define SD_CMU_CMU_05(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 20, 0, 1, 72, 0, 0, 1, 4) #define SD_CMU_CMU_05_CFG_REFCK_TERM_EN BIT(0) #define SD_CMU_CMU_05_CFG_REFCK_TERM_EN_SET(x)\ @@ -2149,9 +2358,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0_GET(x)\ FIELD_GET(SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0, x) -/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_06 */ -#define SD_CMU_CMU_06(t) \ - __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 4, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_06 */ +#define SD_CMU_CMU_06(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 20, 0, 1, 72, 4, 0, 1, 4) #define SD_CMU_CMU_06_CFG_DISLOS BIT(0) #define SD_CMU_CMU_06_CFG_DISLOS_SET(x)\ @@ -2201,9 +2410,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_06_CFG_VCO_CAL_BYP_GET(x)\ FIELD_GET(SD_CMU_CMU_06_CFG_VCO_CAL_BYP, x) -/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_08 */ -#define SD_CMU_CMU_08(t) \ - __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 12, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_08 */ +#define SD_CMU_CMU_08(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 20, 0, 1, 72, 12, 0, 1, 4) #define SD_CMU_CMU_08_CFG_VFILT2PAD BIT(0) #define SD_CMU_CMU_08_CFG_VFILT2PAD_SET(x)\ @@ -2235,8 +2444,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_EN_GET(x)\ FIELD_GET(SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_EN, x) -/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_09 */ -#define SD_CMU_CMU_09(t) __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 16, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_09 */ +#define SD_CMU_CMU_09(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 20, 0, 1, 72, 16, 0, 1, 4) #define SD_CMU_CMU_09_CFG_EN_TX_CK_UP BIT(0) #define SD_CMU_CMU_09_CFG_EN_TX_CK_UP_SET(x)\ @@ -2262,8 +2472,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_09_CFG_SW_10G_GET(x)\ FIELD_GET(SD_CMU_CMU_09_CFG_SW_10G, x) -/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_0D */ -#define SD_CMU_CMU_0D(t) __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 32, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_0D */ +#define SD_CMU_CMU_0D(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 20, 0, 1, 72, 32, 0, 1, 4) #define SD_CMU_CMU_0D_CFG_PD_DIV64 BIT(0) #define SD_CMU_CMU_0D_CFG_PD_DIV64_SET(x)\ @@ -2295,8 +2506,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_0D_CFG_REFCK_PD_GET(x)\ FIELD_GET(SD_CMU_CMU_0D_CFG_REFCK_PD, x) -/* SD10G_CMU_TARGET:CMU_GRP_3:CMU_1B */ -#define SD_CMU_CMU_1B(t) __REG(TARGET_SD_CMU, t, 14, 104, 0, 1, 20, 4, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_3:CMU_1B */ +#define SD_CMU_CMU_1B(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 104, 0, 1, 20, 4, 0, 1, 4) #define SD_CMU_CMU_1B_CFG_RESERVE_7_0 GENMASK(7, 0) #define SD_CMU_CMU_1B_CFG_RESERVE_7_0_SET(x)\ @@ -2304,8 +2516,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_1B_CFG_RESERVE_7_0_GET(x)\ FIELD_GET(SD_CMU_CMU_1B_CFG_RESERVE_7_0, x) -/* SD10G_CMU_TARGET:CMU_GRP_4:CMU_1F */ -#define SD_CMU_CMU_1F(t) __REG(TARGET_SD_CMU, t, 14, 124, 0, 1, 68, 0, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_4:CMU_1F */ +#define SD_CMU_CMU_1F(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 124, 0, 1, 68, 0, 0, 1, 4) #define SD_CMU_CMU_1F_CFG_BIAS_DN_EN BIT(0) #define SD_CMU_CMU_1F_CFG_BIAS_DN_EN_SET(x)\ @@ -2331,8 +2544,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_1F_CFG_VTUNE_SEL_GET(x)\ FIELD_GET(SD_CMU_CMU_1F_CFG_VTUNE_SEL, x) -/* SD10G_CMU_TARGET:CMU_GRP_5:CMU_30 */ -#define SD_CMU_CMU_30(t) __REG(TARGET_SD_CMU, t, 14, 192, 0, 1, 72, 0, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_5:CMU_30 */ +#define SD_CMU_CMU_30(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 192, 0, 1, 72, 0, 0, 1, 4) #define SD_CMU_CMU_30_R_PLL_DLOL_EN BIT(0) #define SD_CMU_CMU_30_R_PLL_DLOL_EN_SET(x)\ @@ -2340,8 +2554,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_30_R_PLL_DLOL_EN_GET(x)\ FIELD_GET(SD_CMU_CMU_30_R_PLL_DLOL_EN, x) -/* SD10G_CMU_TARGET:CMU_GRP_6:CMU_44 */ -#define SD_CMU_CMU_44(t) __REG(TARGET_SD_CMU, t, 14, 264, 0, 1, 632, 8, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_6:CMU_44 */ +#define SD_CMU_CMU_44(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 264, 0, 1, 632, 8, 0, 1, 4) #define SD_CMU_CMU_44_R_PLL_RSTN BIT(0) #define SD_CMU_CMU_44_R_PLL_RSTN_SET(x)\ @@ -2355,8 +2570,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_44_R_CK_RESETB_GET(x)\ FIELD_GET(SD_CMU_CMU_44_R_CK_RESETB, x) -/* SD10G_CMU_TARGET:CMU_GRP_6:CMU_45 */ -#define SD_CMU_CMU_45(t) __REG(TARGET_SD_CMU, t, 14, 264, 0, 1, 632, 12, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_6:CMU_45 */ +#define SD_CMU_CMU_45(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 264, 0, 1, 632, 12, 0, 1, 4) #define SD_CMU_CMU_45_R_EN_RATECHG_CTRL BIT(0) #define SD_CMU_CMU_45_R_EN_RATECHG_CTRL_SET(x)\ @@ -2406,8 +2622,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_45_R_AUTO_RST_TREE_PD_MAN_GET(x)\ FIELD_GET(SD_CMU_CMU_45_R_AUTO_RST_TREE_PD_MAN, x) -/* SD10G_CMU_TARGET:CMU_GRP_6:CMU_47 */ -#define SD_CMU_CMU_47(t) __REG(TARGET_SD_CMU, t, 14, 264, 0, 1, 632, 20, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_6:CMU_47 */ +#define SD_CMU_CMU_47(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 264, 0, 1, 632, 20, 0, 1, 4) #define SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0 GENMASK(4, 0) #define SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0_SET(x)\ @@ -2415,8 +2632,9 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0_GET(x)\ FIELD_GET(SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0, x) -/* SD10G_CMU_TARGET:CMU_GRP_7:CMU_E0 */ -#define SD_CMU_CMU_E0(t) __REG(TARGET_SD_CMU, t, 14, 896, 0, 1, 8, 0, 0, 1, 4) +/* SD10G_CMU_TARGET:CMU_GRP_7:CMU_E0 */ +#define SD_CMU_CMU_E0(t) \ + __REG(TARGET_SD_CMU, t, TSIZE(TC_SD_CMU), 896, 0, 1, 8, 0, 0, 1, 4) #define SD_CMU_CMU_E0_READ_VCO_CTUNE_3_0 GENMASK(3, 0) #define SD_CMU_CMU_E0_READ_VCO_CTUNE_3_0_SET(x)\ @@ -2430,8 +2648,10 @@ enum sparx5_serdes_target { #define SD_CMU_CMU_E0_PLL_LOL_UDL_GET(x)\ FIELD_GET(SD_CMU_CMU_E0_PLL_LOL_UDL, x) -/* SD_CMU_TARGET:SD_CMU_CFG:SD_CMU_CFG */ -#define SD_CMU_CFG_SD_CMU_CFG(t) __REG(TARGET_SD_CMU_CFG, t, 14, 0, 0, 1, 8, 0, 0, 1, 4) +/* SD_CMU_TARGET:SD_CMU_CFG:SD_CMU_CFG */ +#define SD_CMU_CFG_SD_CMU_CFG(t) \ + __REG(TARGET_SD_CMU_CFG, t, TSIZE(TC_SD_CMU_CFG), 0, 0, 1, 8, 0, 0, 1, \ + 4) #define SD_CMU_CFG_SD_CMU_CFG_CMU_RST BIT(0) #define SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(x)\ @@ -2445,8 +2665,9 @@ enum sparx5_serdes_target { #define SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_GET(x)\ FIELD_GET(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST, x) -/* SD_LANE_TARGET:SD_RESET:SD_SER_RST */ -#define SD_LANE_SD_SER_RST(t) __REG(TARGET_SD_LANE, t, 25, 0, 0, 1, 8, 0, 0, 1, 4) +/* SD_LANE_TARGET:SD_RESET:SD_SER_RST */ +#define SD_LANE_SD_SER_RST(t) \ + __REG(TARGET_SD_LANE, t, TSIZE(TC_SD_LANE), 0, 0, 1, 8, 0, 0, 1, 4) #define SD_LANE_SD_SER_RST_SER_RST BIT(0) #define SD_LANE_SD_SER_RST_SER_RST_SET(x)\ @@ -2454,8 +2675,9 @@ enum sparx5_serdes_target { #define SD_LANE_SD_SER_RST_SER_RST_GET(x)\ FIELD_GET(SD_LANE_SD_SER_RST_SER_RST, x) -/* SD_LANE_TARGET:SD_RESET:SD_DES_RST */ -#define SD_LANE_SD_DES_RST(t) __REG(TARGET_SD_LANE, t, 25, 0, 0, 1, 8, 4, 0, 1, 4) +/* SD_LANE_TARGET:SD_RESET:SD_DES_RST */ +#define SD_LANE_SD_DES_RST(t) \ + __REG(TARGET_SD_LANE, t, TSIZE(TC_SD_LANE), 0, 0, 1, 8, 4, 0, 1, 4) #define SD_LANE_SD_DES_RST_DES_RST BIT(0) #define SD_LANE_SD_DES_RST_DES_RST_SET(x)\ @@ -2463,8 +2685,9 @@ enum sparx5_serdes_target { #define SD_LANE_SD_DES_RST_DES_RST_GET(x)\ FIELD_GET(SD_LANE_SD_DES_RST_DES_RST, x) -/* SD_LANE_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG */ -#define SD_LANE_SD_LANE_CFG(t) __REG(TARGET_SD_LANE, t, 25, 8, 0, 1, 8, 0, 0, 1, 4) +/* SD_LANE_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG */ +#define SD_LANE_SD_LANE_CFG(t) \ + __REG(TARGET_SD_LANE, t, TSIZE(TC_SD_LANE), 8, 0, 1, 8, 0, 0, 1, 4) #define SD_LANE_SD_LANE_CFG_MACRO_RST BIT(0) #define SD_LANE_SD_LANE_CFG_MACRO_RST_SET(x)\ @@ -2508,8 +2731,9 @@ enum sparx5_serdes_target { #define SD_LANE_SD_LANE_CFG_LANE_RX_RST_GET(x)\ FIELD_GET(SD_LANE_SD_LANE_CFG_LANE_RX_RST, x) -/* SD_LANE_TARGET:SD_LANE_CFG_STAT:SD_LANE_STAT */ -#define SD_LANE_SD_LANE_STAT(t) __REG(TARGET_SD_LANE, t, 25, 8, 0, 1, 8, 4, 0, 1, 4) +/* SD_LANE_TARGET:SD_LANE_CFG_STAT:SD_LANE_STAT */ +#define SD_LANE_SD_LANE_STAT(t) \ + __REG(TARGET_SD_LANE, t, TSIZE(TC_SD_LANE), 8, 0, 1, 8, 4, 0, 1, 4) #define SD_LANE_SD_LANE_STAT_PMA_RST_DONE BIT(0) #define SD_LANE_SD_LANE_STAT_PMA_RST_DONE_SET(x)\ @@ -2529,9 +2753,9 @@ enum sparx5_serdes_target { #define SD_LANE_SD_LANE_STAT_DBG_OBS_GET(x)\ FIELD_GET(SD_LANE_SD_LANE_STAT_DBG_OBS, x) -/* SD_LANE_TARGET:SD_PWR_CFG:QUIET_MODE_6G */ -#define SD_LANE_QUIET_MODE_6G(t) \ - __REG(TARGET_SD_LANE, t, 25, 24, 0, 1, 8, 4, 0, 1, 4) +/* SD_LANE_TARGET:SD_PWR_CFG:QUIET_MODE_6G */ +#define SD_LANE_QUIET_MODE_6G(t) \ + __REG(TARGET_SD_LANE, t, TSIZE(TC_SD_LANE), 24, 0, 1, 8, 4, 0, 1, 4) #define SD_LANE_QUIET_MODE_6G_QUIET_MODE GENMASK(24, 0) #define SD_LANE_QUIET_MODE_6G_QUIET_MODE_SET(x)\ @@ -2539,8 +2763,9 @@ enum sparx5_serdes_target { #define SD_LANE_QUIET_MODE_6G_QUIET_MODE_GET(x)\ FIELD_GET(SD_LANE_QUIET_MODE_6G_QUIET_MODE, x) -/* SD_LANE_TARGET:CFG_STAT_FX100:MISC */ -#define SD_LANE_MISC(t) __REG(TARGET_SD_LANE, t, 25, 56, 0, 1, 56, 0, 0, 1, 4) +/* SD_LANE_TARGET:CFG_STAT_FX100:MISC */ +#define SD_LANE_MISC(t) \ + __REG(TARGET_SD_LANE, t, TSIZE(TC_SD_LANE), 56, 0, 1, 56, 0, 0, 1, 4) #define SD_LANE_MISC_SD_125_RST_DIS BIT(0) #define SD_LANE_MISC_SD_125_RST_DIS_SET(x)\ @@ -2560,14 +2785,16 @@ enum sparx5_serdes_target { #define SD_LANE_MISC_MUX_ENA_GET(x)\ FIELD_GET(SD_LANE_MISC_MUX_ENA, x) +/* SPARX5 ONLY */ #define SD_LANE_MISC_CORE_CLK_FREQ GENMASK(5, 4) #define SD_LANE_MISC_CORE_CLK_FREQ_SET(x)\ FIELD_PREP(SD_LANE_MISC_CORE_CLK_FREQ, x) #define SD_LANE_MISC_CORE_CLK_FREQ_GET(x)\ FIELD_GET(SD_LANE_MISC_CORE_CLK_FREQ, x) -/* SD_LANE_TARGET:CFG_STAT_FX100:M_STAT_MISC */ -#define SD_LANE_M_STAT_MISC(t) __REG(TARGET_SD_LANE, t, 25, 56, 0, 1, 56, 36, 0, 1, 4) +/* SD_LANE_TARGET:CFG_STAT_FX100:M_STAT_MISC */ +#define SD_LANE_M_STAT_MISC(t) \ + __REG(TARGET_SD_LANE, t, TSIZE(TC_SD_LANE), 56, 0, 1, 56, 36, 0, 1, 4) #define SD_LANE_M_STAT_MISC_M_RIS_EDGE_PTR_ADJ_SUM GENMASK(21, 0) #define SD_LANE_M_STAT_MISC_M_RIS_EDGE_PTR_ADJ_SUM_SET(x)\ @@ -2581,8 +2808,10 @@ enum sparx5_serdes_target { #define SD_LANE_M_STAT_MISC_M_LOCK_CNT_GET(x)\ FIELD_GET(SD_LANE_M_STAT_MISC_M_LOCK_CNT, x) -/* SD25G_CFG_TARGET:SD_RESET:SD_SER_RST */ -#define SD_LANE_25G_SD_SER_RST(t) __REG(TARGET_SD_LANE_25G, t, 8, 0, 0, 1, 8, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_CFG_TARGET:SD_RESET:SD_SER_RST */ +#define SD_LANE_25G_SD_SER_RST(t) \ + __REG(TARGET_SD_LANE_25G, t, 8, 0, 0, 1, 8, 0, 0, 1, 4) #define SD_LANE_25G_SD_SER_RST_SER_RST BIT(0) #define SD_LANE_25G_SD_SER_RST_SER_RST_SET(x)\ @@ -2590,8 +2819,10 @@ enum sparx5_serdes_target { #define SD_LANE_25G_SD_SER_RST_SER_RST_GET(x)\ FIELD_GET(SD_LANE_25G_SD_SER_RST_SER_RST, x) -/* SD25G_CFG_TARGET:SD_RESET:SD_DES_RST */ -#define SD_LANE_25G_SD_DES_RST(t) __REG(TARGET_SD_LANE_25G, t, 8, 0, 0, 1, 8, 4, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_CFG_TARGET:SD_RESET:SD_DES_RST */ +#define SD_LANE_25G_SD_DES_RST(t) \ + __REG(TARGET_SD_LANE_25G, t, 8, 0, 0, 1, 8, 4, 0, 1, 4) #define SD_LANE_25G_SD_DES_RST_DES_RST BIT(0) #define SD_LANE_25G_SD_DES_RST_DES_RST_SET(x)\ @@ -2599,8 +2830,10 @@ enum sparx5_serdes_target { #define SD_LANE_25G_SD_DES_RST_DES_RST_GET(x)\ FIELD_GET(SD_LANE_25G_SD_DES_RST_DES_RST, x) -/* SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG */ -#define SD_LANE_25G_SD_LANE_CFG(t) __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 0, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG */ +#define SD_LANE_25G_SD_LANE_CFG(t) \ + __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 0, 0, 1, 4) #define SD_LANE_25G_SD_LANE_CFG_MACRO_RST BIT(0) #define SD_LANE_25G_SD_LANE_CFG_MACRO_RST_SET(x)\ @@ -2698,8 +2931,10 @@ enum sparx5_serdes_target { #define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXMARGIN_GET(x)\ FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXMARGIN, x) -/* SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG2 */ -#define SD_LANE_25G_SD_LANE_CFG2(t) __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 4, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG2 */ +#define SD_LANE_25G_SD_LANE_CFG2(t) \ + __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 4, 0, 1, 4) #define SD_LANE_25G_SD_LANE_CFG2_DATA_WIDTH_SEL GENMASK(2, 0) #define SD_LANE_25G_SD_LANE_CFG2_DATA_WIDTH_SEL_SET(x)\ @@ -2767,8 +3002,10 @@ enum sparx5_serdes_target { #define SD_LANE_25G_SD_LANE_CFG2_RXRATE_SEL_GET(x)\ FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_RXRATE_SEL, x) -/* SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_STAT */ -#define SD_LANE_25G_SD_LANE_STAT(t) __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 8, 0, 1, 4) +/* SPARX5 ONLY */ +/* SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_STAT */ +#define SD_LANE_25G_SD_LANE_STAT(t) \ + __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 8, 0, 1, 4) #define SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE BIT(0) #define SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE_SET(x)\ @@ -2788,8 +3025,9 @@ enum sparx5_serdes_target { #define SD_LANE_25G_SD_LANE_STAT_DBG_OBS_GET(x)\ FIELD_GET(SD_LANE_25G_SD_LANE_STAT_DBG_OBS, x) -/* SD25G_CFG_TARGET:SD_PWR_CFG:QUIET_MODE_6G */ -#define SD_LANE_25G_QUIET_MODE_6G(t) \ +/* SPARX5 ONLY */ +/* SD25G_CFG_TARGET:SD_PWR_CFG:QUIET_MODE_6G */ +#define SD_LANE_25G_QUIET_MODE_6G(t) \ __REG(TARGET_SD_LANE_25G, t, 8, 28, 0, 1, 8, 4, 0, 1, 4) #define SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE GENMASK(24, 0) diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c index 7bbf729a7c90..7cb020dd3423 100644 --- a/drivers/phy/motorola/phy-cpcap-usb.c +++ b/drivers/phy/motorola/phy-cpcap-usb.c @@ -704,7 +704,7 @@ static void cpcap_usb_phy_remove(struct platform_device *pdev) static struct platform_driver cpcap_usb_phy_driver = { .probe = cpcap_usb_phy_probe, - .remove_new = cpcap_usb_phy_remove, + .remove = cpcap_usb_phy_remove, .driver = { .name = "cpcap-usb-phy", .of_match_table = of_match_ptr(cpcap_usb_phy_id_table), diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c index 376d023a0aa9..152344e4f7e4 100644 --- a/drivers/phy/motorola/phy-mapphone-mdm6600.c +++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c @@ -655,7 +655,7 @@ static void phy_mdm6600_remove(struct platform_device *pdev) static struct platform_driver phy_mdm6600_driver = { .probe = phy_mdm6600_probe, - .remove_new = phy_mdm6600_remove, + .remove = phy_mdm6600_remove, .driver = { .name = "phy-mapphone-mdm6600", .pm = &phy_mdm6600_pm_ops, diff --git a/drivers/phy/phy-airoha-pcie-regs.h b/drivers/phy/phy-airoha-pcie-regs.h index bb1f679ca1df..b938a7b468fe 100644 --- a/drivers/phy/phy-airoha-pcie-regs.h +++ b/drivers/phy/phy-airoha-pcie-regs.h @@ -197,9 +197,9 @@ #define CSR_2L_PXP_TX1_MULTLANE_EN BIT(0) #define REG_CSR_2L_RX0_REV0 0x00fc -#define CSR_2L_PXP_VOS_PNINV GENMASK(3, 2) -#define CSR_2L_PXP_FE_GAIN_NORMAL_MODE GENMASK(6, 4) -#define CSR_2L_PXP_FE_GAIN_TRAIN_MODE GENMASK(10, 8) +#define CSR_2L_PXP_VOS_PNINV GENMASK(19, 18) +#define CSR_2L_PXP_FE_GAIN_NORMAL_MODE GENMASK(22, 20) +#define CSR_2L_PXP_FE_GAIN_TRAIN_MODE GENMASK(26, 24) #define REG_CSR_2L_RX0_PHYCK_DIV 0x0100 #define CSR_2L_PXP_RX0_PHYCK_SEL GENMASK(9, 8) diff --git a/drivers/phy/phy-airoha-pcie.c b/drivers/phy/phy-airoha-pcie.c index 1e410eb41058..56e9ade8a9fd 100644 --- a/drivers/phy/phy-airoha-pcie.c +++ b/drivers/phy/phy-airoha-pcie.c @@ -459,7 +459,7 @@ static void airoha_pcie_phy_init_clk_out(struct airoha_pcie_phy *pcie_phy) airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET, CSR_2L_PXP_CLKTX1_SR); airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_PLL_CMN_RESERVE0, - CSR_2L_PXP_PLL_RESERVE_MASK, 0xdd); + CSR_2L_PXP_PLL_RESERVE_MASK, 0xd0d); } static void airoha_pcie_phy_init_csr_2l(struct airoha_pcie_phy *pcie_phy) @@ -471,9 +471,9 @@ static void airoha_pcie_phy_init_csr_2l(struct airoha_pcie_phy *pcie_phy) PCIE_SW_XFI_RXPCS_RST | PCIE_SW_REF_RST | PCIE_SW_RX_RST); airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET, - PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET); + PCIE_TX_TOP_RST | PCIE_TX_CAL_RST); airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET, - PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET); + PCIE_TX_TOP_RST | PCIE_TX_CAL_RST); } static void airoha_pcie_phy_init_rx(struct airoha_pcie_phy *pcie_phy) @@ -802,7 +802,7 @@ static void airoha_pcie_phy_init_ssc_jcpll(struct airoha_pcie_phy *pcie_phy) airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_IFM, CSR_2L_PXP_JCPLL_SDM_IFM); airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN, - REG_CSR_2L_JCPLL_SDM_HREN); + CSR_2L_PXP_JCPLL_SDM_HREN); airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY, CSR_2L_PXP_JCPLL_SDM_DI_EN); airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC, diff --git a/drivers/phy/phy-lgm-usb.c b/drivers/phy/phy-lgm-usb.c index 410729c7f513..eb7c6fed20d3 100644 --- a/drivers/phy/phy-lgm-usb.c +++ b/drivers/phy/phy-lgm-usb.c @@ -271,7 +271,7 @@ static struct platform_driver lgm_phy_driver = { .of_match_table = intel_usb_phy_dt_ids, }, .probe = phy_probe, - .remove_new = phy_remove, + .remove = phy_remove, }; module_platform_driver(lgm_phy_driver); diff --git a/drivers/phy/phy-nxp-ptn3222.c b/drivers/phy/phy-nxp-ptn3222.c new file mode 100644 index 000000000000..c6179d8701e6 --- /dev/null +++ b/drivers/phy/phy-nxp-ptn3222.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024, Linaro Limited + */ + +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/phy/phy.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> + +#define NUM_SUPPLIES 2 + +struct ptn3222 { + struct i2c_client *client; + struct phy *phy; + struct gpio_desc *reset_gpio; + struct regulator_bulk_data *supplies; +}; + +static int ptn3222_init(struct phy *phy) +{ + struct ptn3222 *ptn3222 = phy_get_drvdata(phy); + int ret; + + ret = regulator_bulk_enable(NUM_SUPPLIES, ptn3222->supplies); + if (ret) + return ret; + + gpiod_set_value_cansleep(ptn3222->reset_gpio, 0); + + return 0; +} + +static int ptn3222_exit(struct phy *phy) +{ + struct ptn3222 *ptn3222 = phy_get_drvdata(phy); + + gpiod_set_value_cansleep(ptn3222->reset_gpio, 1); + + return regulator_bulk_disable(NUM_SUPPLIES, ptn3222->supplies); +} + +static const struct phy_ops ptn3222_ops = { + .init = ptn3222_init, + .exit = ptn3222_exit, + .owner = THIS_MODULE, +}; + +static const struct regulator_bulk_data ptn3222_supplies[NUM_SUPPLIES] = { + { + .supply = "vdd3v3", + .init_load_uA = 11000, + }, { + .supply = "vdd1v8", + .init_load_uA = 55000, + } +}; + +static int ptn3222_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct phy_provider *phy_provider; + struct ptn3222 *ptn3222; + int ret; + + ptn3222 = devm_kzalloc(dev, sizeof(*ptn3222), GFP_KERNEL); + if (!ptn3222) + return -ENOMEM; + + ptn3222->client = client; + + ptn3222->reset_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(ptn3222->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ptn3222->reset_gpio), + "unable to acquire reset gpio\n"); + + ret = devm_regulator_bulk_get_const(dev, NUM_SUPPLIES, ptn3222_supplies, + &ptn3222->supplies); + if (ret) + return ret; + + ptn3222->phy = devm_phy_create(dev, dev->of_node, &ptn3222_ops); + if (IS_ERR(ptn3222->phy)) { + dev_err(dev, "failed to create PHY: %d\n", ret); + return PTR_ERR(ptn3222->phy); + } + + phy_set_drvdata(ptn3222->phy, ptn3222); + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(phy_provider); +} + +static const struct i2c_device_id ptn3222_table[] = { + { "ptn3222" }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ptn3222_table); + +static const struct of_device_id ptn3222_of_table[] = { + { .compatible = "nxp,ptn3222" }, + { } +}; +MODULE_DEVICE_TABLE(of, ptn3222_of_table); + +static struct i2c_driver ptn3222_driver = { + .driver = { + .name = "ptn3222", + .of_match_table = ptn3222_of_table, + }, + .probe = ptn3222_probe, + .id_table = ptn3222_table, +}; + +module_i2c_driver(ptn3222_driver); + +MODULE_DESCRIPTION("NXP PTN3222 eUSB2 Redriver driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c index 3642a5d4f2f3..cae290a6e19f 100644 --- a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c +++ b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c @@ -257,12 +257,12 @@ static const struct of_device_id qcom_apq8064_sata_phy_of_match[] = { MODULE_DEVICE_TABLE(of, qcom_apq8064_sata_phy_of_match); static struct platform_driver qcom_apq8064_sata_phy_driver = { - .probe = qcom_apq8064_sata_phy_probe, - .remove_new = qcom_apq8064_sata_phy_remove, + .probe = qcom_apq8064_sata_phy_probe, + .remove = qcom_apq8064_sata_phy_remove, .driver = { - .name = "qcom-apq8064-sata-phy", - .of_match_table = qcom_apq8064_sata_phy_of_match, - } + .name = "qcom-apq8064-sata-phy", + .of_match_table = qcom_apq8064_sata_phy_of_match, + }, }; module_platform_driver(qcom_apq8064_sata_phy_driver); diff --git a/drivers/phy/qualcomm/phy-qcom-edp.c b/drivers/phy/qualcomm/phy-qcom-edp.c index da2b32fb5b45..f1b51018683d 100644 --- a/drivers/phy/qualcomm/phy-qcom-edp.c +++ b/drivers/phy/qualcomm/phy-qcom-edp.c @@ -32,16 +32,8 @@ #define DP_PHY_PD_CTL 0x001c #define DP_PHY_MODE 0x0020 -#define DP_PHY_AUX_CFG0 0x0024 -#define DP_PHY_AUX_CFG1 0x0028 -#define DP_PHY_AUX_CFG2 0x002C -#define DP_PHY_AUX_CFG3 0x0030 -#define DP_PHY_AUX_CFG4 0x0034 -#define DP_PHY_AUX_CFG5 0x0038 -#define DP_PHY_AUX_CFG6 0x003C -#define DP_PHY_AUX_CFG7 0x0040 -#define DP_PHY_AUX_CFG8 0x0044 -#define DP_PHY_AUX_CFG9 0x0048 +#define DP_AUX_CFG_SIZE 10 +#define DP_PHY_AUX_CFG(n) (0x24 + (0x04 * (n))) #define DP_PHY_AUX_INTERRUPT_MASK 0x0058 @@ -90,6 +82,7 @@ struct phy_ver_ops { struct qcom_edp_phy_cfg { bool is_edp; + const u8 *aux_cfg; const struct qcom_edp_swing_pre_emph_cfg *swing_pre_emph_cfg; const struct phy_ver_ops *ver_ops; }; @@ -186,11 +179,40 @@ static const struct qcom_edp_swing_pre_emph_cfg edp_phy_swing_pre_emph_cfg = { .pre_emphasis_hbr3_hbr2 = &edp_pre_emp_hbr2_hbr3, }; +static const u8 edp_phy_aux_cfg_v4[10] = { + 0x00, 0x13, 0x24, 0x00, 0x0a, 0x26, 0x0a, 0x03, 0x37, 0x03 +}; + +static const u8 edp_pre_emp_hbr_rbr_v5[4][4] = { + { 0x05, 0x11, 0x17, 0x1d }, + { 0x05, 0x11, 0x18, 0xff }, + { 0x06, 0x11, 0xff, 0xff }, + { 0x00, 0xff, 0xff, 0xff } +}; + +static const u8 edp_pre_emp_hbr2_hbr3_v5[4][4] = { + { 0x0c, 0x15, 0x19, 0x1e }, + { 0x0b, 0x15, 0x19, 0xff }, + { 0x0e, 0x14, 0xff, 0xff }, + { 0x0d, 0xff, 0xff, 0xff } +}; + +static const struct qcom_edp_swing_pre_emph_cfg edp_phy_swing_pre_emph_cfg_v5 = { + .swing_hbr_rbr = &edp_swing_hbr_rbr, + .swing_hbr3_hbr2 = &edp_swing_hbr2_hbr3, + .pre_emphasis_hbr_rbr = &edp_pre_emp_hbr_rbr_v5, + .pre_emphasis_hbr3_hbr2 = &edp_pre_emp_hbr2_hbr3_v5, +}; + +static const u8 edp_phy_aux_cfg_v5[10] = { + 0x00, 0x13, 0xa4, 0x00, 0x0a, 0x26, 0x0a, 0x03, 0x37, 0x03 +}; + static int qcom_edp_phy_init(struct phy *phy) { struct qcom_edp *edp = phy_get_drvdata(phy); + u8 aux_cfg[DP_AUX_CFG_SIZE]; int ret; - u8 cfg8; ret = regulator_bulk_enable(ARRAY_SIZE(edp->supplies), edp->supplies); if (ret) @@ -200,6 +222,8 @@ static int qcom_edp_phy_init(struct phy *phy) if (ret) goto out_disable_supplies; + memcpy(aux_cfg, edp->cfg->aux_cfg, sizeof(aux_cfg)); + writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN | DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN, edp->edp + DP_PHY_PD_CTL); @@ -222,22 +246,12 @@ static int qcom_edp_phy_init(struct phy *phy) * even needed. */ if (edp->cfg->swing_pre_emph_cfg && !edp->is_edp) - cfg8 = 0xb7; - else - cfg8 = 0x37; + aux_cfg[8] = 0xb7; writel(0xfc, edp->edp + DP_PHY_MODE); - writel(0x00, edp->edp + DP_PHY_AUX_CFG0); - writel(0x13, edp->edp + DP_PHY_AUX_CFG1); - writel(0x24, edp->edp + DP_PHY_AUX_CFG2); - writel(0x00, edp->edp + DP_PHY_AUX_CFG3); - writel(0x0a, edp->edp + DP_PHY_AUX_CFG4); - writel(0x26, edp->edp + DP_PHY_AUX_CFG5); - writel(0x0a, edp->edp + DP_PHY_AUX_CFG6); - writel(0x03, edp->edp + DP_PHY_AUX_CFG7); - writel(cfg8, edp->edp + DP_PHY_AUX_CFG8); - writel(0x03, edp->edp + DP_PHY_AUX_CFG9); + for (int i = 0; i < DP_AUX_CFG_SIZE; i++) + writel(aux_cfg[i], edp->edp + DP_PHY_AUX_CFG(i)); writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK | PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK | @@ -518,17 +532,27 @@ static const struct phy_ver_ops qcom_edp_phy_ops_v4 = { .com_configure_ssc = qcom_edp_com_configure_ssc_v4, }; +static const struct qcom_edp_phy_cfg sa8775p_dp_phy_cfg = { + .is_edp = false, + .aux_cfg = edp_phy_aux_cfg_v5, + .swing_pre_emph_cfg = &edp_phy_swing_pre_emph_cfg_v5, + .ver_ops = &qcom_edp_phy_ops_v4, +}; + static const struct qcom_edp_phy_cfg sc7280_dp_phy_cfg = { + .aux_cfg = edp_phy_aux_cfg_v4, .ver_ops = &qcom_edp_phy_ops_v4, }; static const struct qcom_edp_phy_cfg sc8280xp_dp_phy_cfg = { + .aux_cfg = edp_phy_aux_cfg_v4, .swing_pre_emph_cfg = &dp_phy_swing_pre_emph_cfg, .ver_ops = &qcom_edp_phy_ops_v4, }; static const struct qcom_edp_phy_cfg sc8280xp_edp_phy_cfg = { .is_edp = true, + .aux_cfg = edp_phy_aux_cfg_v4, .swing_pre_emph_cfg = &edp_phy_swing_pre_emph_cfg, .ver_ops = &qcom_edp_phy_ops_v4, }; @@ -707,6 +731,7 @@ static const struct phy_ver_ops qcom_edp_phy_ops_v6 = { }; static struct qcom_edp_phy_cfg x1e80100_phy_cfg = { + .aux_cfg = edp_phy_aux_cfg_v4, .swing_pre_emph_cfg = &dp_phy_swing_pre_emph_cfg, .ver_ops = &qcom_edp_phy_ops_v6, }; @@ -1108,6 +1133,7 @@ static int qcom_edp_phy_probe(struct platform_device *pdev) } static const struct of_device_id qcom_edp_phy_match_table[] = { + { .compatible = "qcom,sa8775p-edp-phy", .data = &sa8775p_dp_phy_cfg, }, { .compatible = "qcom,sc7280-edp-phy", .data = &sc7280_dp_phy_cfg, }, { .compatible = "qcom,sc8180x-edp-phy", .data = &sc7280_dp_phy_cfg, }, { .compatible = "qcom,sc8280xp-dp-phy", .data = &sc8280xp_dp_phy_cfg, }, diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c index 68cc8e24f383..6bd1b3c75c77 100644 --- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c +++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c @@ -294,7 +294,7 @@ MODULE_DEVICE_TABLE(of, eusb2_repeater_of_match_table); static struct platform_driver eusb2_repeater_driver = { .probe = eusb2_repeater_probe, - .remove_new = eusb2_repeater_remove, + .remove = eusb2_repeater_remove, .driver = { .name = "qcom-eusb2-repeater", .of_match_table = eusb2_repeater_of_match_table, diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c index f0a72b82c770..f5eb0bdac418 100644 --- a/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c +++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c @@ -184,11 +184,11 @@ static const struct of_device_id qcom_ipq806x_sata_phy_of_match[] = { MODULE_DEVICE_TABLE(of, qcom_ipq806x_sata_phy_of_match); static struct platform_driver qcom_ipq806x_sata_phy_driver = { - .probe = qcom_ipq806x_sata_phy_probe, - .remove_new = qcom_ipq806x_sata_phy_remove, + .probe = qcom_ipq806x_sata_phy_probe, + .remove = qcom_ipq806x_sata_phy_remove, .driver = { - .name = "qcom-ipq806x-sata-phy", - .of_match_table = qcom_ipq806x_sata_phy_of_match, + .name = "qcom-ipq806x-sata-phy", + .of_match_table = qcom_ipq806x_sata_phy_of_match, } }; module_platform_driver(qcom_ipq806x_sata_phy_driver); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c index 643045c9024e..3bae39381fd0 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c @@ -3483,7 +3483,7 @@ static int qmp_combo_typec_switch_register(struct qmp_combo *qmp) } #endif -static int qmp_combo_parse_dt_lecacy_dp(struct qmp_combo *qmp, struct device_node *np) +static int qmp_combo_parse_dt_legacy_dp(struct qmp_combo *qmp, struct device_node *np) { struct device *dev = qmp->dev; @@ -3510,7 +3510,7 @@ static int qmp_combo_parse_dt_lecacy_dp(struct qmp_combo *qmp, struct device_nod return 0; } -static int qmp_combo_parse_dt_lecacy_usb(struct qmp_combo *qmp, struct device_node *np) +static int qmp_combo_parse_dt_legacy_usb(struct qmp_combo *qmp, struct device_node *np) { const struct qmp_phy_cfg *cfg = qmp->cfg; struct device *dev = qmp->dev; @@ -3576,11 +3576,11 @@ static int qmp_combo_parse_dt_legacy(struct qmp_combo *qmp, struct device_node * if (IS_ERR(qmp->dp_serdes)) return PTR_ERR(qmp->dp_serdes); - ret = qmp_combo_parse_dt_lecacy_usb(qmp, usb_np); + ret = qmp_combo_parse_dt_legacy_usb(qmp, usb_np); if (ret) return ret; - ret = qmp_combo_parse_dt_lecacy_dp(qmp, dp_np); + ret = qmp_combo_parse_dt_legacy_dp(qmp, dp_np); if (ret) return ret; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c index 36aaac34e6c6..873f2f9844c6 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c @@ -34,6 +34,8 @@ #include "phy-qcom-qmp-pcs-pcie-v5_20.h" #include "phy-qcom-qmp-pcs-pcie-v6.h" #include "phy-qcom-qmp-pcs-pcie-v6_20.h" +#include "phy-qcom-qmp-pcs-pcie-v6_30.h" +#include "phy-qcom-qmp-pcs-v6_30.h" #include "phy-qcom-qmp-pcie-qhp.h" #define PHY_INIT_COMPLETE_TIMEOUT 10000 @@ -1344,6 +1346,154 @@ static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_pcs_misc_tbl[] = { QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_FOM_EQ_CONFIG5, 0x8a), }; +static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x8_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE1, 0x26), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x0d), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x68), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE1, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE1, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0xf8), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x0d), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x41), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_HS_SWITCH_SEL_1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x62), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_POST_DIV_MUX, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x90), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x46), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_CFG, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_MISC_1, 0x88), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_MODE, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_VCO_DC_LEVEL_CTRL, 0x0f), +}; + +static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x8_pcie_ln_shrd_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RXCLK_DIV2_CTRL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_SUMMER_CAL_SPD_MODE, 0x5b), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_DFE_DAC_ENABLE1, 0x88), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH2, 0x0d), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B0, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B1, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B2, 0xdb), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B3, 0x9a), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B4, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B5, 0xb6), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B6, 0x64), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH1_RATE210, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH1_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH2_RATE210, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH2_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH3_RATE210, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH3_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH4_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH5_RATE3, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH6_RATE3, 0x1f), +}; + +static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x8_pcie_txz_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_RES_CODE_LANE_OFFSET_TX, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_RES_CODE_LANE_OFFSET_RX, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_LANE_MODE_1, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_LANE_MODE_2, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_LANE_MODE_3, 0x51), + QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_TRAN_DRVR_EMP_EN, 0x34), +}; + +static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x8_pcie_rxz_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_2, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_SO_GAIN_RATE_2, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_3, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_PI_CONTROLS, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE3, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_IVCM_CAL_CTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_IVCM_POSTCAL_OFFSET, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_BKUP_CTRL1, 0x15), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_3, 0x45), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_VGA_CAL_MAN_VAL, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V6_20_VGA_CAL_CNTRL1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_GM_CAL, 0x0d), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_EQU_ADAPTOR_CNTRL4, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_SIGDET_ENABLES, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_PHPRE_CTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x39), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B0, 0xd4), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B1, 0x23), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B2, 0x58), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B3, 0x9a), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B4, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B5, 0xb6), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B6, 0xee), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B0, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B1, 0xe4), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B2, 0x60), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B3, 0xdf), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B4, 0x69), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B5, 0x76), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B6, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_TX_ADPT_CTRL, 0x10), +}; + +static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x8_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG_LANE(QSERDES_V6_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x3a, BIT(0)), +}; + +static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x8_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_LOCK_DETECT_CONFIG2, 0x00), + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_G3S2_PRE_GAIN, 0x2e), + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_RX_SIGDET_LVL, 0x99), + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_ALIGN_DETECT_CONFIG7, 0x00), + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_EQ_CONFIG4, 0x00), + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_EQ_CONFIG5, 0x22), + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_TX_RX_CONFIG, 0x04), + QMP_PHY_INIT_CFG(QPHY_V6_30_PCS_TX_RX_CONFIG2, 0x02), +}; + +static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x8_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_ENDPOINT_REFCLK_DRIVE, 0xc1), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_EQ_CONFIG1, 0x16), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_G4_EQ_CONFIG5, 0x02), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_G4_PRE_GAIN, 0x2e), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_RX_MARGINING_CONFIG1, 0x03), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_RX_MARGINING_CONFIG3, 0x28), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_RX_MARGINING_CONFIG5, 0x18), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_G3_FOM_EQ_CONFIG5, 0x7a), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_G4_FOM_EQ_CONFIG5, 0x8a), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_G3_RXEQEVAL_TIME, 0x27), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_G4_RXEQEVAL_TIME, 0x27), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_TX_RX_CONFIG, 0xc0), + QMP_PHY_INIT_CFG(QPHY_PCIE_V6_30_PCS_POWER_STATE_CONFIG2, 0x1d), +}; + static const struct qmp_phy_init_tbl sm8250_qmp_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08), QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34), @@ -2582,6 +2732,8 @@ struct qmp_pcie_offsets { u16 rx; u16 tx2; u16 rx2; + u16 txz; + u16 rxz; u16 ln_shrd; }; @@ -2592,6 +2744,10 @@ struct qmp_phy_cfg_tbls { int tx_num; const struct qmp_phy_init_tbl *rx; int rx_num; + const struct qmp_phy_init_tbl *txz; + int txz_num; + const struct qmp_phy_init_tbl *rxz; + int rxz_num; const struct qmp_phy_init_tbl *pcs; int pcs_num; const struct qmp_phy_init_tbl *pcs_misc; @@ -2659,6 +2815,8 @@ struct qmp_pcie { void __iomem *rx; void __iomem *tx2; void __iomem *rx2; + void __iomem *txz; + void __iomem *rxz; void __iomem *ln_shrd; void __iomem *port_b; @@ -2826,6 +2984,17 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v6_20 = { .ln_shrd = 0x0e00, }; +static const struct qmp_pcie_offsets qmp_pcie_offsets_v6_30 = { + .serdes = 0x8800, + .pcs = 0x9000, + .pcs_misc = 0x9800, + .tx = 0x0000, + .rx = 0x0200, + .txz = 0xe000, + .rxz = 0xe200, + .ln_shrd = 0x8000, +}; + static const struct qmp_phy_cfg ipq8074_pciephy_cfg = { .lanes = 1, @@ -3704,6 +3873,38 @@ static const struct qmp_phy_cfg x1e80100_qmp_gen4x4_pciephy_cfg = { .has_nocsr_reset = true, }; +static const struct qmp_phy_cfg x1e80100_qmp_gen4x8_pciephy_cfg = { + .lanes = 8, + + .offsets = &qmp_pcie_offsets_v6_30, + .tbls = { + .serdes = x1e80100_qmp_gen4x8_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(x1e80100_qmp_gen4x8_pcie_serdes_tbl), + .rx = x1e80100_qmp_gen4x8_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(x1e80100_qmp_gen4x8_pcie_rx_tbl), + .txz = x1e80100_qmp_gen4x8_pcie_txz_tbl, + .txz_num = ARRAY_SIZE(x1e80100_qmp_gen4x8_pcie_txz_tbl), + .rxz = x1e80100_qmp_gen4x8_pcie_rxz_tbl, + .rxz_num = ARRAY_SIZE(x1e80100_qmp_gen4x8_pcie_rxz_tbl), + .pcs = x1e80100_qmp_gen4x8_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(x1e80100_qmp_gen4x8_pcie_pcs_tbl), + .pcs_misc = x1e80100_qmp_gen4x8_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(x1e80100_qmp_gen4x8_pcie_pcs_misc_tbl), + .ln_shrd = x1e80100_qmp_gen4x8_pcie_ln_shrd_tbl, + .ln_shrd_num = ARRAY_SIZE(x1e80100_qmp_gen4x8_pcie_ln_shrd_tbl), + }, + + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = sm8550_qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l), + .regs = pciephy_v6_regs_layout, + + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS_4_20, + .has_nocsr_reset = true, +}; + static void qmp_pcie_init_port_b(struct qmp_pcie *qmp, const struct qmp_phy_cfg_tbls *tbls) { const struct qmp_phy_cfg *cfg = qmp->cfg; @@ -3751,6 +3952,13 @@ static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_c qmp_configure(qmp->dev, serdes, tbls->serdes, tbls->serdes_num); + /* + * Tx/Rx registers that require different settings than + * txz/rxz must be programmed after txz/rxz. + */ + qmp_configure(qmp->dev, qmp->txz, tbls->txz, tbls->txz_num); + qmp_configure(qmp->dev, qmp->rxz, tbls->rxz, tbls->rxz_num); + qmp_configure_lane(qmp->dev, tx, tbls->tx, tbls->tx_num, 1); qmp_configure_lane(qmp->dev, rx, tbls->rx, tbls->rx_num, 1); @@ -4293,6 +4501,9 @@ static int qmp_pcie_parse_dt(struct qmp_pcie *qmp) return PTR_ERR(qmp->port_b); } + qmp->txz = base + offs->txz; + qmp->rxz = base + offs->rxz; + if (cfg->tbls.ln_shrd) qmp->ln_shrd = base + offs->ln_shrd; @@ -4478,6 +4689,9 @@ static const struct of_device_id qmp_pcie_of_match_table[] = { }, { .compatible = "qcom,x1e80100-qmp-gen4x4-pcie-phy", .data = &x1e80100_qmp_gen4x4_pciephy_cfg, + }, { + .compatible = "qcom,x1e80100-qmp-gen4x8-pcie-phy", + .data = &x1e80100_qmp_gen4x8_pciephy_cfg, }, { }, }; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_30.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_30.h new file mode 100644 index 000000000000..5a58ff197e6e --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_30.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_PCIE_V6_30_H_ +#define QCOM_PHY_QMP_PCS_PCIE_V6_30_H_ + +/* Only for QMP V6_30 PHY - PCIE have different offsets than V6 */ +#define QPHY_PCIE_V6_30_PCS_POWER_STATE_CONFIG2 0x014 +#define QPHY_PCIE_V6_30_PCS_TX_RX_CONFIG 0x020 +#define QPHY_PCIE_V6_30_PCS_ENDPOINT_REFCLK_DRIVE 0x024 +#define QPHY_PCIE_V6_30_PCS_OSC_DTCT_ACTIONS 0x098 +#define QPHY_PCIE_V6_30_PCS_EQ_CONFIG1 0x0a8 +#define QPHY_PCIE_V6_30_PCS_G3_RXEQEVAL_TIME 0x0f8 +#define QPHY_PCIE_V6_30_PCS_G4_RXEQEVAL_TIME 0x0fc +#define QPHY_PCIE_V6_30_PCS_G4_EQ_CONFIG5 0x110 +#define QPHY_PCIE_V6_30_PCS_G4_PRE_GAIN 0x164 +#define QPHY_PCIE_V6_30_PCS_RX_MARGINING_CONFIG1 0x184 +#define QPHY_PCIE_V6_30_PCS_RX_MARGINING_CONFIG3 0x18c +#define QPHY_PCIE_V6_30_PCS_RX_MARGINING_CONFIG5 0x194 +#define QPHY_PCIE_V6_30_PCS_G3_FOM_EQ_CONFIG5 0x1b4 +#define QPHY_PCIE_V6_30_PCS_G4_FOM_EQ_CONFIG5 0x1c8 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_30.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_30.h new file mode 100644 index 000000000000..369120d88bc2 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_30.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_V6_30_H_ +#define QCOM_PHY_QMP_PCS_V6_30_H_ + +/* Only for QMP V6_30 PHY - PCIe PCS registers */ +#define QPHY_V6_30_PCS_LOCK_DETECT_CONFIG2 0x0cc +#define QPHY_V6_30_PCS_G3S2_PRE_GAIN 0x17c +#define QPHY_V6_30_PCS_RX_SIGDET_LVL 0x194 +#define QPHY_V6_30_PCS_ALIGN_DETECT_CONFIG7 0x1dc +#define QPHY_V6_30_PCS_TX_RX_CONFIG 0x1e0 +#define QPHY_V6_30_PCS_TX_RX_CONFIG2 0x1e4 +#define QPHY_V6_30_PCS_EQ_CONFIG4 0x1fc +#define QPHY_V6_30_PCS_EQ_CONFIG5 0x200 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c index 1246d3bc8b92..acd6075bf6d9 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c @@ -871,6 +871,16 @@ static const struct qmp_phy_init_tbl sdx75_usb3_uniphy_pcs_usb_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00), }; +static const struct qmp_phy_init_tbl qcs8300_usb3_uniphy_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0xf2), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e), +}; + static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_tx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5), QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82), @@ -989,6 +999,40 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_tx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e), }; +static const struct qmp_phy_init_tbl qcs8300_usb3_uniphy_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xec), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x7b), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xe4), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x19), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00), +}; + static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_rx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdc), QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd), @@ -1462,6 +1506,24 @@ static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = { .regs = qmp_v5_usb3phy_regs_layout, }; +static const struct qmp_phy_cfg qcs8300_usb3_uniphy_cfg = { + .offsets = &qmp_usb_offsets_v5, + + .serdes_tbl = sc8280xp_usb3_uniphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_serdes_tbl), + .tx_tbl = qcs8300_usb3_uniphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(qcs8300_usb3_uniphy_tx_tbl), + .rx_tbl = qcs8300_usb3_uniphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(qcs8300_usb3_uniphy_rx_tbl), + .pcs_tbl = sa8775p_usb3_uniphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_tbl), + .pcs_usb_tbl = sa8775p_usb3_uniphy_pcs_usb_tbl, + .pcs_usb_tbl_num = ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_usb_tbl), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v5_usb3phy_regs_layout, +}; + static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = { .offsets = &qmp_usb_offsets_v5, @@ -2248,6 +2310,9 @@ static const struct of_device_id qmp_usb_of_match_table[] = { .compatible = "qcom,msm8996-qmp-usb3-phy", .data = &msm8996_usb3phy_cfg, }, { + .compatible = "qcom,qcs8300-qmp-usb3-uni-phy", + .data = &qcs8300_usb3_uniphy_cfg, + }, { .compatible = "qcom,qdu1000-qmp-usb3-uni-phy", .data = &qdu1000_usb3_uniphy_cfg, }, { diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c index e3ad7cea5109..c3e131a64131 100644 --- a/drivers/phy/realtek/phy-rtk-usb2.c +++ b/drivers/phy/realtek/phy-rtk-usb2.c @@ -1298,7 +1298,7 @@ MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match); static struct platform_driver rtk_usb2phy_driver = { .probe = rtk_usb2phy_probe, - .remove_new = rtk_usb2phy_remove, + .remove = rtk_usb2phy_remove, .driver = { .name = "rtk-usb2phy", .of_match_table = usbphy_rtk_dt_match, diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c index dfcf4b921bba..0cef29a7ddd9 100644 --- a/drivers/phy/realtek/phy-rtk-usb3.c +++ b/drivers/phy/realtek/phy-rtk-usb3.c @@ -734,7 +734,7 @@ MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match); static struct platform_driver rtk_usb3phy_driver = { .probe = rtk_usb3phy_probe, - .remove_new = rtk_usb3phy_remove, + .remove = rtk_usb3phy_remove, .driver = { .name = "rtk-usb3phy", .of_match_table = usbphy_rtk_dt_match, diff --git a/drivers/phy/renesas/phy-rcar-gen3-pcie.c b/drivers/phy/renesas/phy-rcar-gen3-pcie.c index 0ce7e9c94444..feca4cb2ff4d 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-pcie.c +++ b/drivers/phy/renesas/phy-rcar-gen3-pcie.c @@ -132,11 +132,11 @@ static void rcar_gen3_phy_pcie_remove(struct platform_device *pdev) static struct platform_driver rcar_gen3_phy_driver = { .driver = { - .name = "phy_rcar_gen3_pcie", - .of_match_table = rcar_gen3_phy_pcie_match_table, + .name = "phy_rcar_gen3_pcie", + .of_match_table = rcar_gen3_phy_pcie_match_table, }, - .probe = rcar_gen3_phy_pcie_probe, - .remove_new = rcar_gen3_phy_pcie_remove, + .probe = rcar_gen3_phy_pcie_probe, + .remove = rcar_gen3_phy_pcie_remove, }; module_platform_driver(rcar_gen3_phy_driver); diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index 58e123305152..775f4f973a6c 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -825,7 +825,7 @@ static struct platform_driver rcar_gen3_phy_usb2_driver = { .of_match_table = rcar_gen3_phy_usb2_match_table, }, .probe = rcar_gen3_phy_usb2_probe, - .remove_new = rcar_gen3_phy_usb2_remove, + .remove = rcar_gen3_phy_usb2_remove, }; module_platform_driver(rcar_gen3_phy_usb2_driver); diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb3.c b/drivers/phy/renesas/phy-rcar-gen3-usb3.c index e2d630edd992..5c267d148c90 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb3.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb3.c @@ -206,11 +206,11 @@ static void rcar_gen3_phy_usb3_remove(struct platform_device *pdev) static struct platform_driver rcar_gen3_phy_usb3_driver = { .driver = { - .name = "phy_rcar_gen3_usb3", - .of_match_table = rcar_gen3_phy_usb3_match_table, + .name = "phy_rcar_gen3_usb3", + .of_match_table = rcar_gen3_phy_usb3_match_table, }, - .probe = rcar_gen3_phy_usb3_probe, - .remove_new = rcar_gen3_phy_usb3_remove, + .probe = rcar_gen3_phy_usb3_probe, + .remove = rcar_gen3_phy_usb3_remove, }; module_platform_driver(rcar_gen3_phy_usb3_driver); diff --git a/drivers/phy/renesas/r8a779f0-ether-serdes.c b/drivers/phy/renesas/r8a779f0-ether-serdes.c index f1f1da4a0b1f..3b2d8cef75e5 100644 --- a/drivers/phy/renesas/r8a779f0-ether-serdes.c +++ b/drivers/phy/renesas/r8a779f0-ether-serdes.c @@ -404,7 +404,7 @@ static void r8a779f0_eth_serdes_remove(struct platform_device *pdev) static struct platform_driver r8a779f0_eth_serdes_driver_platform = { .probe = r8a779f0_eth_serdes_probe, - .remove_new = r8a779f0_eth_serdes_remove, + .remove = r8a779f0_eth_serdes_remove, .driver = { .name = "r8a779f0_eth_serdes", .of_match_table = r8a779f0_eth_serdes_of_table, diff --git a/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c index 98c92d6c482f..2ab99e1d47eb 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c @@ -472,7 +472,7 @@ static struct platform_driver rockchip_inno_csidphy_driver = { .of_match_table = rockchip_inno_csidphy_match_id, }, .probe = rockchip_inno_csidphy_probe, - .remove_new = rockchip_inno_csidphy_remove, + .remove = rockchip_inno_csidphy_remove, }; module_platform_driver(rockchip_inno_csidphy_driver); diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c index 6405943a2676..d5b1a4e2f7d3 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c @@ -784,7 +784,7 @@ static struct platform_driver inno_dsidphy_driver = { .of_match_table = of_match_ptr(inno_dsidphy_of_match), }, .probe = inno_dsidphy_probe, - .remove_new = inno_dsidphy_remove, + .remove = inno_dsidphy_remove, }; module_platform_driver(inno_dsidphy_driver); diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c index 053bd62e31ba..8dcc2bb777b5 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c @@ -1424,8 +1424,8 @@ static const struct of_device_id inno_hdmi_phy_of_match[] = { MODULE_DEVICE_TABLE(of, inno_hdmi_phy_of_match); static struct platform_driver inno_hdmi_phy_driver = { - .probe = inno_hdmi_phy_probe, - .remove_new = inno_hdmi_phy_remove, + .probe = inno_hdmi_phy_probe, + .remove = inno_hdmi_phy_remove, .driver = { .name = "inno-hdmi-phy", .of_match_table = inno_hdmi_phy_of_match, diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c index 4f71373ae6e1..96f3d868a526 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c @@ -229,9 +229,10 @@ struct rockchip_usb2phy_port { * @dev: pointer to device. * @grf: General Register Files regmap. * @usbgrf: USB General Register Files regmap. - * @clk: clock struct of phy input clk. + * @clks: array of phy input clocks. * @clk480m: clock struct of phy output clk. * @clk480m_hw: clock struct of phy output clk management. + * @num_clks: number of phy input clocks. * @phy_reset: phy reset control. * @chg_state: states involved in USB charger detection. * @chg_type: USB charger types. @@ -246,9 +247,10 @@ struct rockchip_usb2phy { struct device *dev; struct regmap *grf; struct regmap *usbgrf; - struct clk *clk; + struct clk_bulk_data *clks; struct clk *clk480m; struct clk_hw clk480m_hw; + int num_clks; struct reset_control *phy_reset; enum usb_chg_state chg_state; enum power_supply_type chg_type; @@ -310,6 +312,13 @@ static int rockchip_usb2phy_reset(struct rockchip_usb2phy *rphy) return 0; } +static void rockchip_usb2phy_clk_bulk_disable(void *data) +{ + struct rockchip_usb2phy *rphy = data; + + clk_bulk_disable_unprepare(rphy->num_clks, rphy->clks); +} + static int rockchip_usb2phy_clk480m_prepare(struct clk_hw *hw) { struct rockchip_usb2phy *rphy = @@ -376,7 +385,9 @@ rockchip_usb2phy_clk480m_register(struct rockchip_usb2phy *rphy) { struct device_node *node = rphy->dev->of_node; struct clk_init_data init; + struct clk *refclk = NULL; const char *clk_name; + int i; int ret = 0; init.flags = 0; @@ -386,8 +397,15 @@ rockchip_usb2phy_clk480m_register(struct rockchip_usb2phy *rphy) /* optional override of the clockname */ of_property_read_string(node, "clock-output-names", &init.name); - if (rphy->clk) { - clk_name = __clk_get_name(rphy->clk); + for (i = 0; i < rphy->num_clks; i++) { + if (!strncmp(rphy->clks[i].id, "phyclk", 6)) { + refclk = rphy->clks[i].clk; + break; + } + } + + if (!IS_ERR(refclk)) { + clk_name = __clk_get_name(refclk); init.parent_names = &clk_name; init.num_parents = 1; } else { @@ -418,30 +436,28 @@ err_ret: static int rockchip_usb2phy_extcon_register(struct rockchip_usb2phy *rphy) { - int ret; struct device_node *node = rphy->dev->of_node; struct extcon_dev *edev; + int ret; if (of_property_read_bool(node, "extcon")) { edev = extcon_get_edev_by_phandle(rphy->dev, 0); - if (IS_ERR(edev)) { - if (PTR_ERR(edev) != -EPROBE_DEFER) - dev_err(rphy->dev, "Invalid or missing extcon\n"); - return PTR_ERR(edev); - } + if (IS_ERR(edev)) + return dev_err_probe(rphy->dev, PTR_ERR(edev), + "invalid or missing extcon\n"); } else { /* Initialize extcon device */ edev = devm_extcon_dev_allocate(rphy->dev, rockchip_usb2phy_extcon_cable); if (IS_ERR(edev)) - return -ENOMEM; + return dev_err_probe(rphy->dev, PTR_ERR(edev), + "failed to allocate extcon device\n"); ret = devm_extcon_dev_register(rphy->dev, edev); - if (ret) { - dev_err(rphy->dev, "failed to register extcon device\n"); - return ret; - } + if (ret) + return dev_err_probe(rphy->dev, ret, + "failed to register extcon device\n"); } rphy->edev = edev; @@ -1327,7 +1343,7 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) struct rockchip_usb2phy *rphy; const struct rockchip_usb2phy_cfg *phy_cfgs; unsigned int reg; - int index, ret; + int index = 0, ret; rphy = devm_kzalloc(dev, sizeof(*rphy), GFP_KERNEL); if (!rphy) @@ -1339,9 +1355,7 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) dev_err(dev, "failed to locate usbgrf\n"); return PTR_ERR(rphy->grf); } - } - - else { + } else { rphy->grf = syscon_node_to_regmap(dev->parent->of_node); if (IS_ERR(rphy->grf)) return PTR_ERR(rphy->grf); @@ -1358,16 +1372,14 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) } if (of_property_read_u32_index(np, "reg", 0, ®)) { - dev_err(dev, "the reg property is not assigned in %pOFn node\n", - np); + dev_err(dev, "the reg property is not assigned in %pOFn node\n", np); return -EINVAL; } /* support address_cells=2 */ if (of_property_count_u32_elems(np, "reg") > 2 && reg == 0) { if (of_property_read_u32_index(np, "reg", 1, ®)) { - dev_err(dev, "the reg property is not assigned in %pOFn node\n", - np); + dev_err(dev, "the reg property is not assigned in %pOFn node\n", np); return -EINVAL; } } @@ -1386,8 +1398,7 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) if (ret) return ret; - /* find out a proper config which can be matched with dt. */ - index = 0; + /* find a proper config that can be matched with the DT */ do { if (phy_cfgs[index].reg == reg) { rphy->phy_cfg = &phy_cfgs[index]; @@ -1406,17 +1417,25 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) if (IS_ERR(rphy->phy_reset)) return PTR_ERR(rphy->phy_reset); - rphy->clk = devm_clk_get_optional_enabled(dev, "phyclk"); - if (IS_ERR(rphy->clk)) { - return dev_err_probe(&pdev->dev, PTR_ERR(rphy->clk), - "failed to get phyclk\n"); - } + ret = devm_clk_bulk_get_all(dev, &rphy->clks); + if (ret == -EPROBE_DEFER) + return dev_err_probe(&pdev->dev, -EPROBE_DEFER, + "failed to get phy clock\n"); + + /* Clocks are optional */ + rphy->num_clks = ret < 0 ? 0 : ret; ret = rockchip_usb2phy_clk480m_register(rphy); - if (ret) { - dev_err(dev, "failed to register 480m output clock\n"); + if (ret) + return dev_err_probe(dev, ret, "failed to register 480m output clock\n"); + + ret = clk_bulk_prepare_enable(rphy->num_clks, rphy->clks); + if (ret) + return dev_err_probe(dev, ret, "failed to enable phy clock\n"); + + ret = devm_add_action_or_reset(dev, rockchip_usb2phy_clk_bulk_disable, rphy); + if (ret) return ret; - } if (rphy->phy_cfg->phy_tuning) { ret = rphy->phy_cfg->phy_tuning(rphy); @@ -1436,8 +1455,7 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) phy = devm_phy_create(dev, child_np, &rockchip_usb2phy_ops); if (IS_ERR(phy)) { - dev_err_probe(dev, PTR_ERR(phy), "failed to create phy\n"); - ret = PTR_ERR(phy); + ret = dev_err_probe(dev, PTR_ERR(phy), "failed to create phy\n"); goto put_child; } @@ -1446,13 +1464,11 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) /* initialize otg/host port separately */ if (of_node_name_eq(child_np, "host-port")) { - ret = rockchip_usb2phy_host_port_init(rphy, rport, - child_np); + ret = rockchip_usb2phy_host_port_init(rphy, rport, child_np); if (ret) goto put_child; } else { - ret = rockchip_usb2phy_otg_port_init(rphy, rport, - child_np); + ret = rockchip_usb2phy_otg_port_init(rphy, rport, child_np); if (ret) goto put_child; } @@ -1474,8 +1490,7 @@ next_child: "rockchip_usb2phy", rphy); if (ret) { - dev_err(rphy->dev, - "failed to request usb2phy irq handle\n"); + dev_err_probe(rphy->dev, ret, "failed to request usb2phy irq handle\n"); goto put_child; } } @@ -1495,6 +1510,30 @@ static int rk3128_usb2phy_tuning(struct rockchip_usb2phy *rphy) BIT(2) << BIT_WRITEABLE_SHIFT | 0); } +static int rk3576_usb2phy_tuning(struct rockchip_usb2phy *rphy) +{ + int ret; + u32 reg = rphy->phy_cfg->reg; + + /* Deassert SIDDQ to power on analog block */ + ret = regmap_write(rphy->grf, reg + 0x0010, GENMASK(29, 29) | 0x0000); + if (ret) + return ret; + + /* Do reset after exit IDDQ mode */ + ret = rockchip_usb2phy_reset(rphy); + if (ret) + return ret; + + /* HS DC Voltage Level Adjustment 4'b1001 : +5.89% */ + ret |= regmap_write(rphy->grf, reg + 0x000c, GENMASK(27, 24) | 0x0900); + + /* HS Transmitter Pre-Emphasis Current Control 2'b10 : 2x */ + ret |= regmap_write(rphy->grf, reg + 0x0010, GENMASK(20, 19) | 0x0010); + + return ret; +} + static int rk3588_usb2phy_tuning(struct rockchip_usb2phy *rphy) { int ret; @@ -1923,6 +1962,84 @@ static const struct rockchip_usb2phy_cfg rk3568_phy_cfgs[] = { { /* sentinel */ } }; +static const struct rockchip_usb2phy_cfg rk3576_phy_cfgs[] = { + { + .reg = 0x0, + .num_ports = 1, + .phy_tuning = rk3576_usb2phy_tuning, + .clkout_ctl = { 0x0008, 0, 0, 1, 0 }, + .port_cfgs = { + [USB2PHY_PORT_OTG] = { + .phy_sus = { 0x0000, 8, 0, 0, 0x1d1 }, + .bvalid_det_en = { 0x00c0, 1, 1, 0, 1 }, + .bvalid_det_st = { 0x00c4, 1, 1, 0, 1 }, + .bvalid_det_clr = { 0x00c8, 1, 1, 0, 1 }, + .ls_det_en = { 0x00c0, 0, 0, 0, 1 }, + .ls_det_st = { 0x00c4, 0, 0, 0, 1 }, + .ls_det_clr = { 0x00c8, 0, 0, 0, 1 }, + .disfall_en = { 0x00c0, 6, 6, 0, 1 }, + .disfall_st = { 0x00c4, 6, 6, 0, 1 }, + .disfall_clr = { 0x00c8, 6, 6, 0, 1 }, + .disrise_en = { 0x00c0, 5, 5, 0, 1 }, + .disrise_st = { 0x00c4, 5, 5, 0, 1 }, + .disrise_clr = { 0x00c8, 5, 5, 0, 1 }, + .utmi_avalid = { 0x0080, 1, 1, 0, 1 }, + .utmi_bvalid = { 0x0080, 0, 0, 0, 1 }, + .utmi_ls = { 0x0080, 5, 4, 0, 1 }, + } + }, + .chg_det = { + .cp_det = { 0x0080, 8, 8, 0, 1 }, + .dcp_det = { 0x0080, 8, 8, 0, 1 }, + .dp_det = { 0x0080, 9, 9, 1, 0 }, + .idm_sink_en = { 0x0010, 5, 5, 1, 0 }, + .idp_sink_en = { 0x0010, 5, 5, 0, 1 }, + .idp_src_en = { 0x0010, 14, 14, 0, 1 }, + .rdm_pdwn_en = { 0x0010, 14, 14, 0, 1 }, + .vdm_src_en = { 0x0010, 7, 6, 0, 3 }, + .vdp_src_en = { 0x0010, 7, 6, 0, 3 }, + }, + }, + { + .reg = 0x2000, + .num_ports = 1, + .phy_tuning = rk3576_usb2phy_tuning, + .clkout_ctl = { 0x2008, 0, 0, 1, 0 }, + .port_cfgs = { + [USB2PHY_PORT_OTG] = { + .phy_sus = { 0x2000, 8, 0, 0, 0x1d1 }, + .bvalid_det_en = { 0x20c0, 1, 1, 0, 1 }, + .bvalid_det_st = { 0x20c4, 1, 1, 0, 1 }, + .bvalid_det_clr = { 0x20c8, 1, 1, 0, 1 }, + .ls_det_en = { 0x20c0, 0, 0, 0, 1 }, + .ls_det_st = { 0x20c4, 0, 0, 0, 1 }, + .ls_det_clr = { 0x20c8, 0, 0, 0, 1 }, + .disfall_en = { 0x20c0, 6, 6, 0, 1 }, + .disfall_st = { 0x20c4, 6, 6, 0, 1 }, + .disfall_clr = { 0x20c8, 6, 6, 0, 1 }, + .disrise_en = { 0x20c0, 5, 5, 0, 1 }, + .disrise_st = { 0x20c4, 5, 5, 0, 1 }, + .disrise_clr = { 0x20c8, 5, 5, 0, 1 }, + .utmi_avalid = { 0x2080, 1, 1, 0, 1 }, + .utmi_bvalid = { 0x2080, 0, 0, 0, 1 }, + .utmi_ls = { 0x2080, 5, 4, 0, 1 }, + } + }, + .chg_det = { + .cp_det = { 0x2080, 8, 8, 0, 1 }, + .dcp_det = { 0x2080, 8, 8, 0, 1 }, + .dp_det = { 0x2080, 9, 9, 1, 0 }, + .idm_sink_en = { 0x2010, 5, 5, 1, 0 }, + .idp_sink_en = { 0x2010, 5, 5, 0, 1 }, + .idp_src_en = { 0x2010, 14, 14, 0, 1 }, + .rdm_pdwn_en = { 0x2010, 14, 14, 0, 1 }, + .vdm_src_en = { 0x2010, 7, 6, 0, 3 }, + .vdp_src_en = { 0x2010, 7, 6, 0, 3 }, + }, + }, + { /* sentinel */ } +}; + static const struct rockchip_usb2phy_cfg rk3588_phy_cfgs[] = { { .reg = 0x0000, @@ -2094,6 +2211,7 @@ static const struct of_device_id rockchip_usb2phy_dt_match[] = { { .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs }, { .compatible = "rockchip,rk3399-usb2phy", .data = &rk3399_phy_cfgs }, { .compatible = "rockchip,rk3568-usb2phy", .data = &rk3568_phy_cfgs }, + { .compatible = "rockchip,rk3576-usb2phy", .data = &rk3576_phy_cfgs }, { .compatible = "rockchip,rk3588-usb2phy", .data = &rk3588_phy_cfgs }, { .compatible = "rockchip,rv1108-usb2phy", .data = &rv1108_phy_cfgs }, {} diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c index 9f084697dd05..ceab9c71d3b5 100644 --- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c +++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c @@ -256,13 +256,10 @@ struct ropll_config { }; enum rk_hdptx_reset { - RST_PHY = 0, - RST_APB, + RST_APB = 0, RST_INIT, RST_CMN, RST_LANE, - RST_ROPLL, - RST_LCPLL, RST_MAX }; @@ -665,11 +662,6 @@ static void rk_hdptx_phy_disable(struct rk_hdptx_phy *hdptx) { u32 val; - /* reset phy and apb, or phy locked flag may keep 1 */ - reset_control_assert(hdptx->rsts[RST_PHY].rstc); - usleep_range(20, 30); - reset_control_deassert(hdptx->rsts[RST_PHY].rstc); - reset_control_assert(hdptx->rsts[RST_APB].rstc); usleep_range(20, 30); reset_control_deassert(hdptx->rsts[RST_APB].rstc); @@ -792,10 +784,6 @@ static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx, rk_hdptx_pre_power_up(hdptx); - reset_control_assert(hdptx->rsts[RST_ROPLL].rstc); - usleep_range(20, 30); - reset_control_deassert(hdptx->rsts[RST_ROPLL].rstc); - rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_common_cmn_init_seq); rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_tmds_cmn_init_seq); @@ -1098,13 +1086,10 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev) return dev_err_probe(dev, PTR_ERR(hdptx->regmap), "Failed to init regmap\n"); - hdptx->rsts[RST_PHY].id = "phy"; hdptx->rsts[RST_APB].id = "apb"; hdptx->rsts[RST_INIT].id = "init"; hdptx->rsts[RST_CMN].id = "cmn"; hdptx->rsts[RST_LANE].id = "lane"; - hdptx->rsts[RST_ROPLL].id = "ropll"; - hdptx->rsts[RST_LCPLL].id = "lcpll"; ret = devm_reset_control_bulk_get_exclusive(dev, RST_MAX, hdptx->rsts); if (ret) diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c index 4efcb78b0ab1..122ae0fdc785 100644 --- a/drivers/phy/rockchip/phy-rockchip-typec.c +++ b/drivers/phy/rockchip/phy-rockchip-typec.c @@ -1210,7 +1210,7 @@ MODULE_DEVICE_TABLE(of, rockchip_typec_phy_dt_ids); static struct platform_driver rockchip_typec_phy_driver = { .probe = rockchip_typec_phy_probe, - .remove_new = rockchip_typec_phy_remove, + .remove = rockchip_typec_phy_remove, .driver = { .name = "rockchip-typec-phy", .of_match_table = rockchip_typec_phy_dt_ids, diff --git a/drivers/phy/rockchip/phy-rockchip-usbdp.c b/drivers/phy/rockchip/phy-rockchip-usbdp.c index 2c51e5c62d3e..5b1e8a3806ed 100644 --- a/drivers/phy/rockchip/phy-rockchip-usbdp.c +++ b/drivers/phy/rockchip/phy-rockchip-usbdp.c @@ -1538,6 +1538,43 @@ static const char * const rk_udphy_rst_list[] = { "init", "cmn", "lane", "pcs_apb", "pma_apb" }; +static const struct rk_udphy_cfg rk3576_udphy_cfgs = { + .num_phys = 1, + .phy_ids = { 0x2b010000 }, + .num_rsts = ARRAY_SIZE(rk_udphy_rst_list), + .rst_list = rk_udphy_rst_list, + .grfcfg = { + /* u2phy-grf */ + .bvalid_phy_con = RK_UDPHY_GEN_GRF_REG(0x0010, 1, 0, 0x2, 0x3), + .bvalid_grf_con = RK_UDPHY_GEN_GRF_REG(0x0000, 15, 14, 0x1, 0x3), + + /* usb-grf */ + .usb3otg0_cfg = RK_UDPHY_GEN_GRF_REG(0x0030, 15, 0, 0x1100, 0x0188), + + /* usbdpphy-grf */ + .low_pwrn = RK_UDPHY_GEN_GRF_REG(0x0004, 13, 13, 0, 1), + .rx_lfps = RK_UDPHY_GEN_GRF_REG(0x0004, 14, 14, 0, 1), + }, + .vogrfcfg = { + { + .hpd_trigger = RK_UDPHY_GEN_GRF_REG(0x0000, 11, 10, 1, 3), + .dp_lane_reg = 0x0000, + }, + }, + .dp_tx_ctrl_cfg = { + rk3588_dp_tx_drv_ctrl_rbr_hbr_typec, + rk3588_dp_tx_drv_ctrl_rbr_hbr_typec, + rk3588_dp_tx_drv_ctrl_hbr2, + rk3588_dp_tx_drv_ctrl_hbr3, + }, + .dp_tx_ctrl_cfg_typec = { + rk3588_dp_tx_drv_ctrl_rbr_hbr_typec, + rk3588_dp_tx_drv_ctrl_rbr_hbr_typec, + rk3588_dp_tx_drv_ctrl_hbr2, + rk3588_dp_tx_drv_ctrl_hbr3, + }, +}; + static const struct rk_udphy_cfg rk3588_udphy_cfgs = { .num_phys = 2, .phy_ids = { @@ -1585,6 +1622,10 @@ static const struct rk_udphy_cfg rk3588_udphy_cfgs = { static const struct of_device_id rk_udphy_dt_match[] = { { + .compatible = "rockchip,rk3576-usbdp-phy", + .data = &rk3576_udphy_cfgs + }, + { .compatible = "rockchip,rk3588-usbdp-phy", .data = &rk3588_udphy_cfgs }, diff --git a/drivers/phy/st/Kconfig b/drivers/phy/st/Kconfig index 3fc3d0781fb8..304614b6dabf 100644 --- a/drivers/phy/st/Kconfig +++ b/drivers/phy/st/Kconfig @@ -33,6 +33,17 @@ config PHY_STIH407_USB Enable this support to enable the picoPHY device used by USB2 and USB3 controllers on STMicroelectronics STiH407 SoC families. +config PHY_STM32_COMBOPHY + tristate "STMicroelectronics COMBOPHY driver for STM32MP25" + depends on ARCH_STM32 || COMPILE_TEST + select GENERIC_PHY + help + Enable this to support the COMBOPHY device used by USB3 or PCIe + controllers on STMicroelectronics STM32MP25 SoC. + This driver controls the COMBOPHY block to generate the PCIe 100Mhz + reference clock from either the external clock generator or HSE + internal SoC clock source. + config PHY_STM32_USBPHYC tristate "STMicroelectronics STM32 USB HS PHY Controller driver" depends on ARCH_STM32 || COMPILE_TEST diff --git a/drivers/phy/st/Makefile b/drivers/phy/st/Makefile index c862dd937b64..cb80e954ea9f 100644 --- a/drivers/phy/st/Makefile +++ b/drivers/phy/st/Makefile @@ -3,4 +3,5 @@ obj-$(CONFIG_PHY_MIPHY28LP) += phy-miphy28lp.o obj-$(CONFIG_PHY_ST_SPEAR1310_MIPHY) += phy-spear1310-miphy.o obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY) += phy-spear1340-miphy.o obj-$(CONFIG_PHY_STIH407_USB) += phy-stih407-usb.o +obj-$(CONFIG_PHY_STM32_COMBOPHY) += phy-stm32-combophy.o obj-$(CONFIG_PHY_STM32_USBPHYC) += phy-stm32-usbphyc.o diff --git a/drivers/phy/st/phy-stm32-combophy.c b/drivers/phy/st/phy-stm32-combophy.c new file mode 100644 index 000000000000..765bb34fe358 --- /dev/null +++ b/drivers/phy/st/phy-stm32-combophy.c @@ -0,0 +1,598 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * STMicroelectronics COMBOPHY STM32MP25 Controller driver. + * + * Copyright (C) 2024 STMicroelectronics + * Author: Christian Bruel <christian.bruel@foss.st.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/mfd/syscon.h> +#include <linux/platform_device.h> +#include <linux/phy/phy.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <dt-bindings/phy/phy.h> + +#define SYSCFG_COMBOPHY_CR1 0x4c00 +#define SYSCFG_COMBOPHY_CR2 0x4c04 +#define SYSCFG_COMBOPHY_CR4 0x4c0c +#define SYSCFG_COMBOPHY_CR5 0x4c10 +#define SYSCFG_COMBOPHY_SR 0x4c14 +#define SYSCFG_PCIEPRGCR 0x6080 + +/* SYSCFG PCIEPRGCR */ +#define STM32MP25_PCIEPRGCR_EN BIT(0) +#define STM32MP25_PCIEPRG_IMPCTRL_OHM GENMASK(3, 1) +#define STM32MP25_PCIEPRG_IMPCTRL_VSWING GENMASK(5, 4) + +/* SYSCFG SYSCFG_COMBOPHY_SR */ +#define STM32MP25_PIPE0_PHYSTATUS BIT(1) + +/* SYSCFG CR1 */ +#define SYSCFG_COMBOPHY_CR1_REFUSEPAD BIT(0) +#define SYSCFG_COMBOPHY_CR1_MPLLMULT GENMASK(7, 1) +#define SYSCFG_COMBOPHY_CR1_REFCLKSEL GENMASK(16, 8) +#define SYSCFG_COMBOPHY_CR1_REFCLKDIV2 BIT(17) +#define SYSCFG_COMBOPHY_CR1_REFSSPEN BIT(18) +#define SYSCFG_COMBOPHY_CR1_SSCEN BIT(19) + +/* SYSCFG CR4 */ +#define SYSCFG_COMBOPHY_CR4_RX0_EQ GENMASK(2, 0) + +#define MPLLMULT_19_2 (0x02u << 1) +#define MPLLMULT_20 (0x7du << 1) +#define MPLLMULT_24 (0x68u << 1) +#define MPLLMULT_25 (0x64u << 1) +#define MPLLMULT_26 (0x60u << 1) +#define MPLLMULT_38_4 (0x41u << 1) +#define MPLLMULT_48 (0x6cu << 1) +#define MPLLMULT_50 (0x32u << 1) +#define MPLLMULT_52 (0x30u << 1) +#define MPLLMULT_100 (0x19u << 1) + +#define REFCLKSEL_0 0 +#define REFCLKSEL_1 (0x108u << 8) + +#define REFCLDIV_0 0 + +/* SYSCFG CR2 */ +#define SYSCFG_COMBOPHY_CR2_MODESEL GENMASK(1, 0) +#define SYSCFG_COMBOPHY_CR2_ISO_DIS BIT(15) + +#define COMBOPHY_MODESEL_PCIE 0 +#define COMBOPHY_MODESEL_USB 3 + +/* SYSCFG CR5 */ +#define SYSCFG_COMBOPHY_CR5_COMMON_CLOCKS BIT(12) + +#define COMBOPHY_SUP_ANA_MPLL_LOOP_CTL 0xc0 +#define COMBOPHY_PROP_CNTRL GENMASK(7, 4) + +/* Required apb/ker clocks first, optional pad last. */ +static const char * const combophy_clks[] = {"apb", "ker", "pad"}; +#define APB_CLK 0 +#define KER_CLK 1 +#define PAD_CLK 2 + +struct stm32_combophy { + struct phy *phy; + struct regmap *regmap; + struct device *dev; + void __iomem *base; + struct reset_control *phy_reset; + struct clk_bulk_data clks[ARRAY_SIZE(combophy_clks)]; + int num_clks; + bool have_pad_clk; + unsigned int type; + bool is_init; + int irq_wakeup; +}; + +struct clk_impedance { + u32 microohm; + u32 vswing[4]; +}; + +/* + * lookup table to hold the settings needed for a ref clock frequency + * impedance, the offset is used to set the IMP_CTL and DE_EMP bit of the + * PRG_IMP_CTRL register. Use ordered discrete values in the table + */ +static const struct clk_impedance imp_lookup[] = { + { 6090000, { 442000, 564000, 684000, 802000 } }, + { 5662000, { 528000, 621000, 712000, 803000 } }, + { 5292000, { 491000, 596000, 700000, 802000 } }, + { 4968000, { 558000, 640000, 722000, 803000 } }, + { 4684000, { 468000, 581000, 692000, 802000 } }, + { 4429000, { 554000, 613000, 717000, 803000 } }, + { 4204000, { 511000, 609000, 706000, 802000 } }, + { 3999000, { 571000, 648000, 726000, 803000 } } +}; + +static int stm32_impedance_tune(struct stm32_combophy *combophy) +{ + u8 imp_size = ARRAY_SIZE(imp_lookup); + u8 vswing_size = ARRAY_SIZE(imp_lookup[0].vswing); + u8 imp_of, vswing_of; + u32 max_imp = imp_lookup[0].microohm; + u32 min_imp = imp_lookup[imp_size - 1].microohm; + u32 max_vswing = imp_lookup[imp_size - 1].vswing[vswing_size - 1]; + u32 min_vswing = imp_lookup[0].vswing[0]; + u32 val; + + if (!of_property_read_u32(combophy->dev->of_node, "st,output-micro-ohms", &val)) { + if (val < min_imp || val > max_imp) { + dev_err(combophy->dev, "Invalid value %u for output ohm\n", val); + return -EINVAL; + } + + for (imp_of = 0; imp_of < ARRAY_SIZE(imp_lookup); imp_of++) + if (imp_lookup[imp_of].microohm <= val) + break; + + dev_dbg(combophy->dev, "Set %u micro-ohms output impedance\n", + imp_lookup[imp_of].microohm); + + regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR, + STM32MP25_PCIEPRG_IMPCTRL_OHM, + FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_OHM, imp_of)); + } else { + regmap_read(combophy->regmap, SYSCFG_PCIEPRGCR, &val); + imp_of = FIELD_GET(STM32MP25_PCIEPRG_IMPCTRL_OHM, val); + } + + if (!of_property_read_u32(combophy->dev->of_node, "st,output-vswing-microvolt", &val)) { + if (val < min_vswing || val > max_vswing) { + dev_err(combophy->dev, "Invalid value %u for output vswing\n", val); + return -EINVAL; + } + + for (vswing_of = 0; vswing_of < ARRAY_SIZE(imp_lookup[imp_of].vswing); vswing_of++) + if (imp_lookup[imp_of].vswing[vswing_of] >= val) + break; + + dev_dbg(combophy->dev, "Set %u microvolt swing\n", + imp_lookup[imp_of].vswing[vswing_of]); + + regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR, + STM32MP25_PCIEPRG_IMPCTRL_VSWING, + FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_VSWING, vswing_of)); + } + + return 0; +} + +static int stm32_combophy_pll_init(struct stm32_combophy *combophy) +{ + int ret; + u32 refclksel, pllmult, propcntrl, val; + u32 clk_rate; + struct clk *clk; + u32 cr1_val = 0, cr1_mask = 0; + + if (combophy->have_pad_clk) + clk = combophy->clks[PAD_CLK].clk; + else + clk = combophy->clks[KER_CLK].clk; + + clk_rate = clk_get_rate(clk); + + dev_dbg(combophy->dev, "%s pll init rate %d\n", + combophy->have_pad_clk ? "External" : "Ker", clk_rate); + + if (combophy->type != PHY_TYPE_PCIE) { + cr1_mask |= SYSCFG_COMBOPHY_CR1_REFSSPEN; + cr1_val |= SYSCFG_COMBOPHY_CR1_REFSSPEN; + } + + if (of_property_present(combophy->dev->of_node, "st,ssc-on")) { + dev_dbg(combophy->dev, "Enabling clock with SSC\n"); + cr1_mask |= SYSCFG_COMBOPHY_CR1_SSCEN; + cr1_val |= SYSCFG_COMBOPHY_CR1_SSCEN; + } + + switch (clk_rate) { + case 100000000: + pllmult = MPLLMULT_100; + refclksel = REFCLKSEL_0; + propcntrl = 0x8u << 4; + break; + case 19200000: + pllmult = MPLLMULT_19_2; + refclksel = REFCLKSEL_1; + propcntrl = 0x8u << 4; + break; + case 25000000: + pllmult = MPLLMULT_25; + refclksel = REFCLKSEL_0; + propcntrl = 0xeu << 4; + break; + case 24000000: + pllmult = MPLLMULT_24; + refclksel = REFCLKSEL_1; + propcntrl = 0xeu << 4; + break; + case 20000000: + pllmult = MPLLMULT_20; + refclksel = REFCLKSEL_0; + propcntrl = 0xeu << 4; + break; + default: + dev_err(combophy->dev, "Invalid rate 0x%x\n", clk_rate); + return -EINVAL; + } + + cr1_mask |= SYSCFG_COMBOPHY_CR1_REFCLKDIV2; + cr1_val |= REFCLDIV_0; + + cr1_mask |= SYSCFG_COMBOPHY_CR1_REFCLKSEL; + cr1_val |= refclksel; + + cr1_mask |= SYSCFG_COMBOPHY_CR1_MPLLMULT; + cr1_val |= pllmult; + + /* + * vddcombophy is interconnected with vddcore. Isolation bit should be unset + * before using the ComboPHY. + */ + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR2, + SYSCFG_COMBOPHY_CR2_ISO_DIS, SYSCFG_COMBOPHY_CR2_ISO_DIS); + + reset_control_assert(combophy->phy_reset); + + if (combophy->type == PHY_TYPE_PCIE) { + ret = stm32_impedance_tune(combophy); + if (ret) + goto out_iso; + + cr1_mask |= SYSCFG_COMBOPHY_CR1_REFUSEPAD; + cr1_val |= combophy->have_pad_clk ? SYSCFG_COMBOPHY_CR1_REFUSEPAD : 0; + } + + if (!of_property_read_u32(combophy->dev->of_node, "st,rx-equalizer", &val)) { + dev_dbg(combophy->dev, "Set RX equalizer %u\n", val); + if (val > SYSCFG_COMBOPHY_CR4_RX0_EQ) { + dev_err(combophy->dev, "Invalid value %u for rx0 equalizer\n", val); + ret = -EINVAL; + goto out_iso; + } + + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR4, + SYSCFG_COMBOPHY_CR4_RX0_EQ, val); + } + + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR1, cr1_mask, cr1_val); + + /* + * Force elasticity buffer to be tuned for the reference clock as + * the separated clock model is not supported + */ + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR5, + SYSCFG_COMBOPHY_CR5_COMMON_CLOCKS, SYSCFG_COMBOPHY_CR5_COMMON_CLOCKS); + + reset_control_deassert(combophy->phy_reset); + + ret = regmap_read_poll_timeout(combophy->regmap, SYSCFG_COMBOPHY_SR, val, + !(val & STM32MP25_PIPE0_PHYSTATUS), + 10, 1000); + if (ret) { + dev_err(combophy->dev, "timeout, cannot lock PLL\n"); + if (combophy->type == PHY_TYPE_PCIE && !combophy->have_pad_clk) + regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR, + STM32MP25_PCIEPRGCR_EN, 0); + + if (combophy->type != PHY_TYPE_PCIE) + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR1, + SYSCFG_COMBOPHY_CR1_REFSSPEN, 0); + + goto out; + } + + + if (combophy->type == PHY_TYPE_PCIE) { + if (!combophy->have_pad_clk) + regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR, + STM32MP25_PCIEPRGCR_EN, STM32MP25_PCIEPRGCR_EN); + + val = readl_relaxed(combophy->base + COMBOPHY_SUP_ANA_MPLL_LOOP_CTL); + val &= ~COMBOPHY_PROP_CNTRL; + val |= propcntrl; + writel_relaxed(val, combophy->base + COMBOPHY_SUP_ANA_MPLL_LOOP_CTL); + } + + return 0; + +out_iso: + reset_control_deassert(combophy->phy_reset); + +out: + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR2, + SYSCFG_COMBOPHY_CR2_ISO_DIS, 0); + + return ret; +} + +static struct phy *stm32_combophy_xlate(struct device *dev, + const struct of_phandle_args *args) +{ + struct stm32_combophy *combophy = dev_get_drvdata(dev); + unsigned int type; + + if (args->args_count != 1) { + dev_err(dev, "invalid number of cells in 'phy' property\n"); + return ERR_PTR(-EINVAL); + } + + type = args->args[0]; + if (type != PHY_TYPE_USB3 && type != PHY_TYPE_PCIE) { + dev_err(dev, "unsupported device type: %d\n", type); + return ERR_PTR(-EINVAL); + } + + if (combophy->have_pad_clk && type != PHY_TYPE_PCIE) { + dev_err(dev, "Invalid use of clk_pad for USB3 mode\n"); + return ERR_PTR(-EINVAL); + } + + combophy->type = type; + + return combophy->phy; +} + +static int stm32_combophy_set_mode(struct stm32_combophy *combophy) +{ + int type = combophy->type; + u32 val; + + switch (type) { + case PHY_TYPE_PCIE: + dev_dbg(combophy->dev, "setting PCIe ComboPHY\n"); + val = COMBOPHY_MODESEL_PCIE; + break; + case PHY_TYPE_USB3: + dev_dbg(combophy->dev, "setting USB3 ComboPHY\n"); + val = COMBOPHY_MODESEL_USB; + break; + default: + dev_err(combophy->dev, "Invalid PHY mode %d\n", type); + return -EINVAL; + } + + return regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR2, + SYSCFG_COMBOPHY_CR2_MODESEL, val); +} + +static int stm32_combophy_suspend_noirq(struct device *dev) +{ + struct stm32_combophy *combophy = dev_get_drvdata(dev); + + /* + * Clocks should be turned off since it is not needed for + * wakeup capability. In case usb-remote wakeup is not enabled, + * combo-phy is already turned off by HCD driver using exit callback + */ + if (combophy->is_init) { + clk_bulk_disable_unprepare(combophy->num_clks, combophy->clks); + + /* since wakeup is enabled for ctrl */ + enable_irq_wake(combophy->irq_wakeup); + } + + return 0; +} + +static int stm32_combophy_resume_noirq(struct device *dev) +{ + struct stm32_combophy *combophy = dev_get_drvdata(dev); + int ret; + + /* + * If clocks was turned off by suspend call for wakeup then needs + * to be turned back ON in resume. In case usb-remote wakeup is not + * enabled, clocks already turned ON by HCD driver using init callback + */ + if (combophy->is_init) { + /* since wakeup was enabled for ctrl */ + disable_irq_wake(combophy->irq_wakeup); + + ret = clk_bulk_prepare_enable(combophy->num_clks, combophy->clks); + if (ret) { + dev_err(dev, "can't enable clocks (%d)\n", ret); + return ret; + } + } + + return 0; +} + +static int stm32_combophy_exit(struct phy *phy) +{ + struct stm32_combophy *combophy = phy_get_drvdata(phy); + struct device *dev = combophy->dev; + + combophy->is_init = false; + + if (combophy->type == PHY_TYPE_PCIE && !combophy->have_pad_clk) + regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR, + STM32MP25_PCIEPRGCR_EN, 0); + + if (combophy->type != PHY_TYPE_PCIE) + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR1, + SYSCFG_COMBOPHY_CR1_REFSSPEN, 0); + + regmap_update_bits(combophy->regmap, SYSCFG_COMBOPHY_CR2, + SYSCFG_COMBOPHY_CR2_ISO_DIS, 0); + + clk_bulk_disable_unprepare(combophy->num_clks, combophy->clks); + + pm_runtime_put_noidle(dev); + + return 0; +} + +static int stm32_combophy_init(struct phy *phy) +{ + struct stm32_combophy *combophy = phy_get_drvdata(phy); + struct device *dev = combophy->dev; + int ret; + + pm_runtime_get_noresume(dev); + + ret = clk_bulk_prepare_enable(combophy->num_clks, combophy->clks); + if (ret) { + dev_err(dev, "can't enable clocks (%d)\n", ret); + pm_runtime_put_noidle(dev); + return ret; + } + + ret = stm32_combophy_set_mode(combophy); + if (ret) { + dev_err(dev, "combophy mode not set\n"); + clk_bulk_disable_unprepare(combophy->num_clks, combophy->clks); + pm_runtime_put_noidle(dev); + return ret; + } + + ret = stm32_combophy_pll_init(combophy); + if (ret) { + clk_bulk_disable_unprepare(combophy->num_clks, combophy->clks); + pm_runtime_put_noidle(dev); + return ret; + } + + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + combophy->is_init = true; + + return ret; +} + +static const struct phy_ops stm32_combophy_phy_data = { + .init = stm32_combophy_init, + .exit = stm32_combophy_exit, + .owner = THIS_MODULE +}; + +static irqreturn_t stm32_combophy_irq_wakeup_handler(int irq, void *dev_id) +{ + return IRQ_HANDLED; +} + +static int stm32_combophy_get_clocks(struct stm32_combophy *combophy) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(combophy_clks); i++) + combophy->clks[i].id = combophy_clks[i]; + + combophy->num_clks = ARRAY_SIZE(combophy_clks) - 1; + + ret = devm_clk_bulk_get(combophy->dev, combophy->num_clks, combophy->clks); + if (ret) + return ret; + + ret = devm_clk_bulk_get_optional(combophy->dev, 1, combophy->clks + combophy->num_clks); + if (ret) + return ret; + + if (combophy->clks[combophy->num_clks].clk != NULL) { + combophy->have_pad_clk = true; + combophy->num_clks++; + } + + return 0; +} + +static int stm32_combophy_probe(struct platform_device *pdev) +{ + struct stm32_combophy *combophy; + struct device *dev = &pdev->dev; + struct phy_provider *phy_provider; + int ret, irq; + + combophy = devm_kzalloc(dev, sizeof(*combophy), GFP_KERNEL); + if (!combophy) + return -ENOMEM; + + combophy->dev = dev; + + dev_set_drvdata(dev, combophy); + + combophy->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(combophy->base)) + return PTR_ERR(combophy->base); + + ret = stm32_combophy_get_clocks(combophy); + if (ret) + return ret; + + combophy->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); + if (IS_ERR(combophy->phy_reset)) + return dev_err_probe(dev, PTR_ERR(combophy->phy_reset), + "Failed to get PHY reset\n"); + + combophy->regmap = syscon_regmap_lookup_by_compatible("st,stm32mp25-syscfg"); + if (IS_ERR(combophy->regmap)) + return dev_err_probe(dev, PTR_ERR(combophy->regmap), + "No syscfg specified\n"); + + combophy->phy = devm_phy_create(dev, NULL, &stm32_combophy_phy_data); + if (IS_ERR(combophy->phy)) + return dev_err_probe(dev, PTR_ERR(combophy->phy), + "failed to create PCIe/USB3 ComboPHY\n"); + + if (device_property_read_bool(dev, "wakeup-source")) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return dev_err_probe(dev, irq, "failed to get IRQ\n"); + combophy->irq_wakeup = irq; + + ret = devm_request_threaded_irq(dev, combophy->irq_wakeup, NULL, + stm32_combophy_irq_wakeup_handler, IRQF_ONESHOT, + NULL, NULL); + if (ret) + return dev_err_probe(dev, ret, "unable to request wake IRQ %d\n", + combophy->irq_wakeup); + } + + ret = devm_pm_runtime_enable(dev); + if (ret) + return dev_err_probe(dev, ret, "Failed to enable pm runtime\n"); + + phy_set_drvdata(combophy->phy, combophy); + + phy_provider = devm_of_phy_provider_register(dev, stm32_combophy_xlate); + + return PTR_ERR_OR_ZERO(phy_provider); +} + +static const struct dev_pm_ops stm32_combophy_pm_ops = { + NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32_combophy_suspend_noirq, + stm32_combophy_resume_noirq) +}; + +static const struct of_device_id stm32_combophy_of_match[] = { + { .compatible = "st,stm32mp25-combophy", }, + { }, +}; +MODULE_DEVICE_TABLE(of, stm32_combophy_of_match); + +static struct platform_driver stm32_combophy_driver = { + .probe = stm32_combophy_probe, + .driver = { + .name = "stm32-combophy", + .of_match_table = stm32_combophy_of_match, + .pm = pm_sleep_ptr(&stm32_combophy_pm_ops) + } +}; + +module_platform_driver(stm32_combophy_driver); + +MODULE_AUTHOR("Christian Bruel <christian.bruel@foss.st.com>"); +MODULE_DESCRIPTION("STM32MP25 Combophy USB3/PCIe controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c index 9dbe60dcf319..b917cd413de7 100644 --- a/drivers/phy/st/phy-stm32-usbphyc.c +++ b/drivers/phy/st/phy-stm32-usbphyc.c @@ -812,7 +812,7 @@ MODULE_DEVICE_TABLE(of, stm32_usbphyc_of_match); static struct platform_driver stm32_usbphyc_driver = { .probe = stm32_usbphyc_probe, - .remove_new = stm32_usbphyc_remove, + .remove = stm32_usbphyc_remove, .driver = { .of_match_table = stm32_usbphyc_of_match, .name = "stm32-usbphyc", diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 342f5ccf611d..f9b862c1c438 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -1327,7 +1327,7 @@ static struct platform_driver tegra_xusb_padctl_driver = { .pm = &tegra_xusb_padctl_pm_ops, }, .probe = tegra_xusb_padctl_probe, - .remove_new = tegra_xusb_padctl_remove, + .remove = tegra_xusb_padctl_remove, }; module_platform_driver(tegra_xusb_padctl_driver); diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c index 3bf3aff4b1c7..431b223996e0 100644 --- a/drivers/phy/ti/phy-am654-serdes.c +++ b/drivers/phy/ti/phy-am654-serdes.c @@ -837,7 +837,7 @@ static void serdes_am654_remove(struct platform_device *pdev) static struct platform_driver serdes_am654_driver = { .probe = serdes_am654_probe, - .remove_new = serdes_am654_remove, + .remove = serdes_am654_remove, .driver = { .name = "phy-am654", .of_match_table = serdes_am654_id_table, diff --git a/drivers/phy/ti/phy-da8xx-usb.c b/drivers/phy/ti/phy-da8xx-usb.c index 68aa595b6ad8..1d81a1e6ec6b 100644 --- a/drivers/phy/ti/phy-da8xx-usb.c +++ b/drivers/phy/ti/phy-da8xx-usb.c @@ -277,11 +277,11 @@ MODULE_DEVICE_TABLE(of, da8xx_usb_phy_ids); static struct platform_driver da8xx_usb_phy_driver = { .probe = da8xx_usb_phy_probe, - .remove_new = da8xx_usb_phy_remove, + .remove = da8xx_usb_phy_remove, .driver = { .name = "da8xx-usb-phy", .pm = &da8xx_usb_phy_pm_ops, - .of_match_table = da8xx_usb_phy_ids, + .of_match_table = da8xx_usb_phy_ids, }, }; diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c index d5ae972a31fb..e8f842d4e841 100644 --- a/drivers/phy/ti/phy-dm816x-usb.c +++ b/drivers/phy/ti/phy-dm816x-usb.c @@ -259,7 +259,7 @@ static void dm816x_usb_phy_remove(struct platform_device *pdev) static struct platform_driver dm816x_usb_phy_driver = { .probe = dm816x_usb_phy_probe, - .remove_new = dm816x_usb_phy_remove, + .remove = dm816x_usb_phy_remove, .driver = { .name = "dm816x-usb-phy", .pm = &dm816x_usb_phy_pm_ops, diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c index 103b266fec77..e0ca59ae3153 100644 --- a/drivers/phy/ti/phy-gmii-sel.c +++ b/drivers/phy/ti/phy-gmii-sel.c @@ -230,7 +230,8 @@ static const struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw5g_soc_j7200 = { .use_of_data = true, .regfields = phy_gmii_sel_fields_am654, - .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII), + .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) | + BIT(PHY_INTERFACE_MODE_USXGMII), .num_ports = 4, .num_qsgmii_main_ports = 1, }; diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c index c6e846d385d2..ab2a4f2c0a5b 100644 --- a/drivers/phy/ti/phy-j721e-wiz.c +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -1685,7 +1685,7 @@ static DEFINE_NOIRQ_DEV_PM_OPS(wiz_pm_ops, NULL, wiz_resume_noirq); static struct platform_driver wiz_driver = { .probe = wiz_probe, - .remove_new = wiz_remove, + .remove = wiz_remove, .driver = { .name = "wiz", .of_match_table = wiz_id_table, diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c index 78e19b128962..c1a0ef979142 100644 --- a/drivers/phy/ti/phy-omap-usb2.c +++ b/drivers/phy/ti/phy-omap-usb2.c @@ -511,7 +511,7 @@ static void omap_usb2_remove(struct platform_device *pdev) static struct platform_driver omap_usb2_driver = { .probe = omap_usb2_probe, - .remove_new = omap_usb2_remove, + .remove = omap_usb2_remove, .driver = { .name = "omap-usb2", .of_match_table = omap_usb2_id_table, diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c index 874c1a25ce36..da2cbacb982c 100644 --- a/drivers/phy/ti/phy-ti-pipe3.c +++ b/drivers/phy/ti/phy-ti-pipe3.c @@ -920,7 +920,7 @@ MODULE_DEVICE_TABLE(of, ti_pipe3_id_table); static struct platform_driver ti_pipe3_driver = { .probe = ti_pipe3_probe, - .remove_new = ti_pipe3_remove, + .remove = ti_pipe3_remove, .driver = { .name = "ti-pipe3", .of_match_table = ti_pipe3_id_table, diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c index 6b265992d988..6f12b38cd894 100644 --- a/drivers/phy/ti/phy-twl4030-usb.c +++ b/drivers/phy/ti/phy-twl4030-usb.c @@ -834,7 +834,7 @@ MODULE_DEVICE_TABLE(of, twl4030_usb_id_table); static struct platform_driver twl4030_usb_driver = { .probe = twl4030_usb_probe, - .remove_new = twl4030_usb_remove, + .remove = twl4030_usb_remove, .driver = { .name = "twl4030_usb", .pm = &twl4030_usb_pm_ops, diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c index e6579002f114..05a4a59f7c40 100644 --- a/drivers/phy/xilinx/phy-zynqmp.c +++ b/drivers/phy/xilinx/phy-zynqmp.c @@ -1071,7 +1071,7 @@ MODULE_DEVICE_TABLE(of, xpsgtr_of_match); static struct platform_driver xpsgtr_driver = { .probe = xpsgtr_probe, - .remove_new = xpsgtr_remove, + .remove = xpsgtr_remove, .driver = { .name = "xilinx-psgtr", .of_match_table = xpsgtr_of_match, diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 389d5a193e5d..f5fc33a8bf44 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -79,6 +79,7 @@ config POWER_RESET_EP93XX bool "Cirrus EP93XX reset driver" if COMPILE_TEST depends on MFD_SYSCON default ARCH_EP93XX + select AUXILIARY_BUS help This driver provides restart support for Cirrus EP93XX SoC. diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c index 93eece027865..7bc779055cf3 100644 --- a/drivers/power/reset/at91-poweroff.c +++ b/drivers/power/reset/at91-poweroff.c @@ -223,7 +223,7 @@ MODULE_DEVICE_TABLE(of, at91_poweroff_of_match); static struct platform_driver at91_poweroff_driver = { .probe = at91_poweroff_probe, - .remove_new = at91_poweroff_remove, + .remove = at91_poweroff_remove, .driver = { .name = "at91-poweroff", .of_match_table = at91_poweroff_of_match, diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c index 16512654295f..036b18a1f90f 100644 --- a/drivers/power/reset/at91-reset.c +++ b/drivers/power/reset/at91-reset.c @@ -427,7 +427,7 @@ static void at91_reset_remove(struct platform_device *pdev) static struct platform_driver at91_reset_driver = { .probe = at91_reset_probe, - .remove_new = at91_reset_remove, + .remove = at91_reset_remove, .driver = { .name = "at91-reset", .of_match_table = at91_reset_of_match, diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c index 959ce0dbe91d..edb0df86aff4 100644 --- a/drivers/power/reset/at91-sama5d2_shdwc.c +++ b/drivers/power/reset/at91-sama5d2_shdwc.c @@ -441,7 +441,7 @@ static void at91_shdwc_remove(struct platform_device *pdev) static struct platform_driver at91_shdwc_driver = { .probe = at91_shdwc_probe, - .remove_new = at91_shdwc_remove, + .remove = at91_shdwc_remove, .driver = { .name = "at91-shdwc", .of_match_table = at91_shdwc_of_match, diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c index dbc4ff61cd74..cfaa54ced0d0 100644 --- a/drivers/power/reset/keystone-reset.c +++ b/drivers/power/reset/keystone-reset.c @@ -16,7 +16,6 @@ #include <linux/mfd/syscon.h> #include <linux/of.h> -#define RSTYPE_RG 0x0 #define RSCTRL_RG 0x4 #define RSCFG_RG 0x8 #define RSISO_RG 0xc @@ -28,7 +27,6 @@ #define RSMUX_OMODE_MASK 0xe #define RSMUX_OMODE_RESET_ON 0xa #define RSMUX_OMODE_RESET_OFF 0x0 -#define RSMUX_LOCK_MASK 0x1 #define RSMUX_LOCK_SET 0x1 #define RSCFG_RSTYPE_SOFT 0x300f diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c index fa25fbd53934..1a6fc8d38e20 100644 --- a/drivers/power/reset/ltc2952-poweroff.c +++ b/drivers/power/reset/ltc2952-poweroff.c @@ -305,7 +305,7 @@ MODULE_DEVICE_TABLE(of, of_ltc2952_poweroff_match); static struct platform_driver ltc2952_poweroff_driver = { .probe = ltc2952_poweroff_probe, - .remove_new = ltc2952_poweroff_remove, + .remove = ltc2952_poweroff_remove, .driver = { .name = "ltc2952-poweroff", .of_match_table = of_ltc2952_poweroff_match, diff --git a/drivers/power/reset/qnap-poweroff.c b/drivers/power/reset/qnap-poweroff.c index e0f2ff6b147c..f7d8fc935d51 100644 --- a/drivers/power/reset/qnap-poweroff.c +++ b/drivers/power/reset/qnap-poweroff.c @@ -118,7 +118,7 @@ static void qnap_power_off_remove(struct platform_device *pdev) static struct platform_driver qnap_power_off_driver = { .probe = qnap_power_off_probe, - .remove_new = qnap_power_off_remove, + .remove = qnap_power_off_remove, .driver = { .name = "qnap_power_off", .of_match_table = of_match_ptr(qnap_power_off_of_match_table), diff --git a/drivers/power/reset/syscon-reboot.c b/drivers/power/reset/syscon-reboot.c index 4d622c19bc48..d623d77e657e 100644 --- a/drivers/power/reset/syscon-reboot.c +++ b/drivers/power/reset/syscon-reboot.c @@ -61,7 +61,8 @@ static int syscon_reboot_probe(struct platform_device *pdev) priority = 192; if (of_property_read_u32(pdev->dev.of_node, "offset", &ctx->offset)) - return -EINVAL; + if (of_property_read_u32(pdev->dev.of_node, "reg", &ctx->offset)) + return -EINVAL; value_err = of_property_read_u32(pdev->dev.of_node, "value", &ctx->value); mask_err = of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask); diff --git a/drivers/power/supply/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c index 34619c4d4ece..b7938fbb24a5 100644 --- a/drivers/power/supply/88pm860x_battery.c +++ b/drivers/power/supply/88pm860x_battery.c @@ -422,7 +422,7 @@ static irqreturn_t pm860x_batt_handler(int irq, void *data) info->temp_type = PM860X_TEMP_TINT; } mutex_unlock(&info->lock); - /* clear ccnt since battery is attached or dettached */ + /* clear ccnt since battery is attached or detached */ clear_ccnt(info, &ccnt_data); return IRQ_HANDLED; } @@ -566,7 +566,7 @@ static int measure_temp(struct pm860x_battery_info *info, int *data) ret = measure_12bit_voltage(info, PM8607_GPADC1_MEAS1, data); if (ret) return ret; - /* meausered Vtbat(mV) / Ibias_current(11uA)*/ + /* measured Vtbat(mV) / Ibias_current(11uA)*/ *data = (*data * 1000) / GPBIAS2_GPADC1_UA; if (*data > TBAT_NEG_25D) { diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index bcfa63fb9f1e..9f2eef6787f7 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -493,6 +493,16 @@ config CHARGER_TWL4030 help Say Y here to enable support for TWL4030 Battery Charge Interface. +config CHARGER_TWL6030 + tristate "OMAP TWL6030 BCI charger driver" + depends on IIO && TWL4030_CORE + help + Say Y here to enable support for TWL6030/6032 Battery Charge + Interface. + + This driver can be build as a module. If so, the module will be + called twl6030_charger. + config CHARGER_LP8727 tristate "TI/National Semiconductor LP8727 charger driver" depends on I2C diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile index 8dcb41545317..59c4a9f40d28 100644 --- a/drivers/power/supply/Makefile +++ b/drivers/power/supply/Makefile @@ -69,6 +69,7 @@ obj-$(CONFIG_CHARGER_CPCAP) += cpcap-charger.o obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o +obj-$(CONFIG_CHARGER_TWL6030) += twl6030_charger.o obj-$(CONFIG_CHARGER_LP8727) += lp8727_charger.o obj-$(CONFIG_CHARGER_LP8788) += lp8788-charger.o obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o diff --git a/drivers/power/supply/ab8500_bmdata.c b/drivers/power/supply/ab8500_bmdata.c index 3e6ea22372b2..19ed52852804 100644 --- a/drivers/power/supply/ab8500_bmdata.c +++ b/drivers/power/supply/ab8500_bmdata.c @@ -16,7 +16,7 @@ /* Default: temperature hysteresis */ #define AB8500_TEMP_HYSTERESIS 3 -static struct power_supply_battery_ocv_table ocv_cap_tbl[] = { +static const struct power_supply_battery_ocv_table ocv_cap_tbl[] = { { .ocv = 4186000, .capacity = 100}, { .ocv = 4163000, .capacity = 99}, { .ocv = 4114000, .capacity = 95}, @@ -48,7 +48,7 @@ static struct power_supply_battery_ocv_table ocv_cap_tbl[] = { * temperature values to work. Factory resistance is 300 mOhm and the * resistance values to the right are percentages of 300 mOhm. */ -static struct power_supply_resistance_temp_table temp_to_batres_tbl_thermistor[] = { +static const struct power_supply_resistance_temp_table temp_to_batres_tbl_thermistor[] = { { .temp = 40, .resistance = 40 /* 120 mOhm */ }, { .temp = 30, .resistance = 45 /* 135 mOhm */ }, { .temp = 20, .resistance = 55 /* 165 mOhm */ }, diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c index 56f136b2d071..37039e28fc4b 100644 --- a/drivers/power/supply/ab8500_btemp.c +++ b/drivers/power/supply/ab8500_btemp.c @@ -283,7 +283,7 @@ static void ab8500_btemp_periodic_work(struct work_struct *work) dev_warn(di->dev, "failed to identify the battery\n"); } - /* Failover if a reading is erroneous, use last meausurement */ + /* Failover if a reading is erroneous, use last measurement */ ret = thermal_zone_get_temp(di->tz, &bat_temp); if (ret) { dev_err(di->dev, "error reading temperature\n"); @@ -818,7 +818,7 @@ MODULE_DEVICE_TABLE(of, ab8500_btemp_match); struct platform_driver ab8500_btemp_driver = { .probe = ab8500_btemp_probe, - .remove_new = ab8500_btemp_remove, + .remove = ab8500_btemp_remove, .driver = { .name = "ab8500-btemp", .of_match_table = ab8500_btemp_match, diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c index 854491ad3ecd..14e1b448bd39 100644 --- a/drivers/power/supply/ab8500_chargalg.c +++ b/drivers/power/supply/ab8500_chargalg.c @@ -1837,7 +1837,7 @@ static const struct of_device_id ab8500_chargalg_match[] = { struct platform_driver ab8500_chargalg_driver = { .probe = ab8500_chargalg_probe, - .remove_new = ab8500_chargalg_remove, + .remove = ab8500_chargalg_remove, .driver = { .name = "ab8500_chargalg", .of_match_table = ab8500_chargalg_match, diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c index 93181ebfb324..cece8d6753ac 100644 --- a/drivers/power/supply/ab8500_charger.c +++ b/drivers/power/supply/ab8500_charger.c @@ -3711,7 +3711,7 @@ MODULE_DEVICE_TABLE(of, ab8500_charger_match); static struct platform_driver ab8500_charger_driver = { .probe = ab8500_charger_probe, - .remove_new = ab8500_charger_remove, + .remove = ab8500_charger_remove, .driver = { .name = "ab8500-charger", .of_match_table = ab8500_charger_match, diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index a71903b1bf78..78871a2143de 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -3242,7 +3242,7 @@ MODULE_DEVICE_TABLE(of, ab8500_fg_match); struct platform_driver ab8500_fg_driver = { .probe = ab8500_fg_probe, - .remove_new = ab8500_fg_remove, + .remove = ab8500_fg_remove, .driver = { .name = "ab8500-fg", .of_match_table = ab8500_fg_match, diff --git a/drivers/power/supply/acer_a500_battery.c b/drivers/power/supply/acer_a500_battery.c index ef5c419b1b7f..39d85b11a13c 100644 --- a/drivers/power/supply/acer_a500_battery.c +++ b/drivers/power/supply/acer_a500_battery.c @@ -233,14 +233,15 @@ static int a500_battery_probe(struct platform_device *pdev) psy_cfg.of_node = pdev->dev.parent->of_node; psy_cfg.drv_data = bat; + psy_cfg.no_wakeup_source = true; bat->regmap = dev_get_regmap(pdev->dev.parent, "KB930"); if (!bat->regmap) return -EINVAL; - bat->psy = devm_power_supply_register_no_ws(&pdev->dev, - &a500_battery_desc, - &psy_cfg); + bat->psy = devm_power_supply_register(&pdev->dev, + &a500_battery_desc, + &psy_cfg); if (IS_ERR(bat->psy)) return dev_err_probe(&pdev->dev, PTR_ERR(bat->psy), "failed to register battery\n"); @@ -285,7 +286,7 @@ static struct platform_driver a500_battery_driver = { .pm = &a500_battery_pm_ops, }, .probe = a500_battery_probe, - .remove_new = a500_battery_remove, + .remove = a500_battery_remove, }; module_platform_driver(a500_battery_driver); diff --git a/drivers/power/supply/act8945a_charger.c b/drivers/power/supply/act8945a_charger.c index 51122bfbf196..b2b82f97a471 100644 --- a/drivers/power/supply/act8945a_charger.c +++ b/drivers/power/supply/act8945a_charger.c @@ -651,7 +651,7 @@ static struct platform_driver act8945a_charger_driver = { .name = "act8945a-charger", }, .probe = act8945a_charger_probe, - .remove_new = act8945a_charger_remove, + .remove = act8945a_charger_remove, }; module_platform_driver(act8945a_charger_driver); diff --git a/drivers/power/supply/adp5061.c b/drivers/power/supply/adp5061.c index dac9875d993c..458fd3024373 100644 --- a/drivers/power/supply/adp5061.c +++ b/drivers/power/supply/adp5061.c @@ -590,7 +590,7 @@ static int adp5061_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_VOLTAGE_AVG: /* * This property is used to set the VWEAK threshold - * bellow this value, weak charge mode is entered + * below this value, weak charge mode is entered * above this value, fast chargerge mode is entered */ return adp5061_get_vweak_th(st, val); diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c index f71cc90fea12..fa27195f074e 100644 --- a/drivers/power/supply/axp20x_battery.c +++ b/drivers/power/supply/axp20x_battery.c @@ -354,17 +354,18 @@ static int axp20x_battery_get_prop(struct power_supply *psy, if (ret) return ret; + /* IIO framework gives mA but Power Supply framework gives uA */ if (reg & AXP20X_PWR_STATUS_BAT_CHARGING) { - ret = iio_read_channel_processed(axp20x_batt->batt_chrg_i, &val->intval); + ret = iio_read_channel_processed_scale(axp20x_batt->batt_chrg_i, + &val->intval, 1000); } else { - ret = iio_read_channel_processed(axp20x_batt->batt_dischrg_i, &val1); + ret = iio_read_channel_processed_scale(axp20x_batt->batt_dischrg_i, + &val1, 1000); val->intval = -val1; } if (ret) return ret; - /* IIO framework gives mA but Power Supply framework gives uA */ - val->intval *= 1000; break; case POWER_SUPPLY_PROP_CAPACITY: @@ -406,13 +407,12 @@ static int axp20x_battery_get_prop(struct power_supply *psy, break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: - ret = iio_read_channel_processed(axp20x_batt->batt_v, - &val->intval); + /* IIO framework gives mV but Power Supply framework gives uV */ + ret = iio_read_channel_processed_scale(axp20x_batt->batt_v, + &val->intval, 1000); if (ret) return ret; - /* IIO framework gives mV but Power Supply framework gives uV */ - val->intval *= 1000; break; default: @@ -519,13 +519,15 @@ static int axp717_battery_get_prop(struct power_supply *psy, * The offset of this value is currently unknown and is * not documented in the datasheet. Based on * observation it's assumed to be somewhere around - * 450ma. I will leave the value raw for now. + * 450ma. I will leave the value raw for now. Note that + * IIO framework gives mA but Power Supply framework + * gives uA. */ - ret = iio_read_channel_processed(axp20x_batt->batt_chrg_i, &val->intval); + ret = iio_read_channel_processed_scale(axp20x_batt->batt_chrg_i, + &val->intval, 1000); if (ret) return ret; - /* IIO framework gives mA but Power Supply framework gives uA */ - val->intval *= 1000; + return 0; case POWER_SUPPLY_PROP_CAPACITY: @@ -564,13 +566,12 @@ static int axp717_battery_get_prop(struct power_supply *psy, return 0; case POWER_SUPPLY_PROP_VOLTAGE_NOW: - ret = iio_read_channel_processed(axp20x_batt->batt_v, - &val->intval); + /* IIO framework gives mV but Power Supply framework gives uV */ + ret = iio_read_channel_processed_scale(axp20x_batt->batt_v, + &val->intval, 1000); if (ret) return ret; - /* IIO framework gives mV but Power Supply framework gives uV */ - val->intval *= 1000; return 0; case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c index 2766352ab737..9722912268fe 100644 --- a/drivers/power/supply/axp20x_usb_power.c +++ b/drivers/power/supply/axp20x_usb_power.c @@ -220,16 +220,15 @@ static int axp20x_usb_power_get_property(struct power_supply *psy, return 0; case POWER_SUPPLY_PROP_VOLTAGE_NOW: if (IS_ENABLED(CONFIG_AXP20X_ADC)) { - ret = iio_read_channel_processed(power->vbus_v, - &val->intval); - if (ret) - return ret; - /* * IIO framework gives mV but Power Supply framework * gives uV. */ - val->intval *= 1000; + ret = iio_read_channel_processed_scale(power->vbus_v, + &val->intval, 1000); + if (ret) + return ret; + return 0; } @@ -253,16 +252,15 @@ static int axp20x_usb_power_get_property(struct power_supply *psy, return 0; case POWER_SUPPLY_PROP_CURRENT_NOW: if (IS_ENABLED(CONFIG_AXP20X_ADC)) { - ret = iio_read_channel_processed(power->vbus_i, - &val->intval); - if (ret) - return ret; - /* * IIO framework gives mA but Power Supply framework * gives uA. */ - val->intval *= 1000; + ret = iio_read_channel_processed_scale(power->vbus_i, + &val->intval, 1000); + if (ret) + return ret; + return 0; } @@ -374,16 +372,15 @@ static int axp717_usb_power_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: if (IS_ENABLED(CONFIG_AXP20X_ADC)) { - ret = iio_read_channel_processed(power->vbus_v, - &val->intval); - if (ret) - return ret; - /* * IIO framework gives mV but Power Supply framework * gives uV. */ - val->intval *= 1000; + ret = iio_read_channel_processed_scale(power->vbus_v, + &val->intval, 1000); + if (ret) + return ret; + return 0; } diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 750fda543308..40c5ac7a1118 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -449,9 +449,29 @@ static u8 [BQ27XXX_REG_AP] = 0x18, BQ27XXX_DM_REG_ROWS, }, + bq27426_regs[BQ27XXX_REG_MAX] = { + [BQ27XXX_REG_CTRL] = 0x00, + [BQ27XXX_REG_TEMP] = 0x02, + [BQ27XXX_REG_INT_TEMP] = 0x1e, + [BQ27XXX_REG_VOLT] = 0x04, + [BQ27XXX_REG_AI] = 0x10, + [BQ27XXX_REG_FLAGS] = 0x06, + [BQ27XXX_REG_TTE] = INVALID_REG_ADDR, + [BQ27XXX_REG_TTF] = INVALID_REG_ADDR, + [BQ27XXX_REG_TTES] = INVALID_REG_ADDR, + [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR, + [BQ27XXX_REG_NAC] = 0x08, + [BQ27XXX_REG_RC] = 0x0c, + [BQ27XXX_REG_FCC] = 0x0e, + [BQ27XXX_REG_CYCT] = INVALID_REG_ADDR, + [BQ27XXX_REG_AE] = INVALID_REG_ADDR, + [BQ27XXX_REG_SOC] = 0x1c, + [BQ27XXX_REG_DCAP] = INVALID_REG_ADDR, + [BQ27XXX_REG_AP] = 0x18, + BQ27XXX_DM_REG_ROWS, + }, #define bq27411_regs bq27421_regs #define bq27425_regs bq27421_regs -#define bq27426_regs bq27421_regs #define bq27441_regs bq27421_regs #define bq27621_regs bq27421_regs bq27z561_regs[BQ27XXX_REG_MAX] = { @@ -769,10 +789,23 @@ static enum power_supply_property bq27421_props[] = { }; #define bq27411_props bq27421_props #define bq27425_props bq27421_props -#define bq27426_props bq27421_props #define bq27441_props bq27421_props #define bq27621_props bq27421_props +static enum power_supply_property bq27426_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_LEVEL, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_MANUFACTURER, +}; + static enum power_supply_property bq27z561_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, @@ -2131,6 +2164,7 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di) struct power_supply_config psy_cfg = { .of_node = di->dev->of_node, .drv_data = di, + .no_wakeup_source = true, }; int ret; @@ -2157,7 +2191,7 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di) psy_desc->get_property = bq27xxx_battery_get_property; psy_desc->external_power_changed = bq27xxx_external_power_changed; - di->bat = devm_power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg); + di->bat = devm_power_supply_register(di->dev, psy_desc, &psy_cfg); if (IS_ERR(di->bat)) return dev_err_probe(di->dev, PTR_ERR(di->bat), "failed to register battery\n"); diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c index 09ec0ecf1486..a69faef444c0 100644 --- a/drivers/power/supply/charger-manager.c +++ b/drivers/power/supply/charger-manager.c @@ -221,7 +221,7 @@ static bool is_charging(struct charger_manager *cm) /* If at least one of the charger is charging, return yes */ for (i = 0; cm->desc->psy_charger_stat[i]; i++) { - /* 1. The charger sholuld not be DISABLED */ + /* 1. The charger should not be DISABLED */ if (cm->emergency_stop) continue; if (!cm->charger_enabled) @@ -1739,7 +1739,7 @@ static struct platform_driver charger_manager_driver = { .of_match_table = charger_manager_match, }, .probe = charger_manager_probe, - .remove_new = charger_manager_remove, + .remove = charger_manager_remove, .id_table = charger_manager_id, }; diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c index 30ec76cdf34b..813037c00ded 100644 --- a/drivers/power/supply/cpcap-battery.c +++ b/drivers/power/supply/cpcap-battery.c @@ -1169,7 +1169,7 @@ static struct platform_driver cpcap_battery_driver = { .of_match_table = of_match_ptr(cpcap_battery_id_table), }, .probe = cpcap_battery_probe, - .remove_new = cpcap_battery_remove, + .remove = cpcap_battery_remove, }; module_platform_driver(cpcap_battery_driver); diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c index 91e7292d86bb..7781b45a67a7 100644 --- a/drivers/power/supply/cpcap-charger.c +++ b/drivers/power/supply/cpcap-charger.c @@ -969,7 +969,7 @@ static struct platform_driver cpcap_charger_driver = { .of_match_table = cpcap_charger_id_table, }, .shutdown = cpcap_charger_shutdown, - .remove_new = cpcap_charger_remove, + .remove = cpcap_charger_remove, }; module_platform_driver(cpcap_charger_driver); diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c index bed3e2e9bfea..47d3f58aa15c 100644 --- a/drivers/power/supply/cros_usbpd-charger.c +++ b/drivers/power/supply/cros_usbpd-charger.c @@ -618,6 +618,7 @@ static int cros_usbpd_charger_probe(struct platform_device *pd) psy_desc->external_power_changed = cros_usbpd_charger_power_changed; psy_cfg.drv_data = port; + psy_cfg.no_wakeup_source = true; if (cros_usbpd_charger_port_is_dedicated(port)) { sprintf(port->name, CHARGER_DEDICATED_DIR_NAME); @@ -644,8 +645,7 @@ static int cros_usbpd_charger_probe(struct platform_device *pd) psy_desc->name = port->name; - psy = devm_power_supply_register_no_ws(dev, psy_desc, - &psy_cfg); + psy = devm_power_supply_register(dev, psy_desc, &psy_cfg); if (IS_ERR(psy)) { dev_err(dev, "Failed to register power supply\n"); continue; diff --git a/drivers/power/supply/da9030_battery.c b/drivers/power/supply/da9030_battery.c index 04e0f4162d42..34328f5d556e 100644 --- a/drivers/power/supply/da9030_battery.c +++ b/drivers/power/supply/da9030_battery.c @@ -269,7 +269,7 @@ static void da9030_charger_check_state(struct da9030_charger *charger) } if (charger->adc.vchmax_res > charger->thresholds.vcharge_max || charger->adc.vchmin_res < charger->thresholds.vcharge_min || - /* Tempreture readings are negative */ + /* Temperature readings are negative */ charger->adc.tbat_res < charger->thresholds.tbat_high || charger->adc.tbat_res > charger->thresholds.tbat_low) { /* disable charger */ @@ -470,7 +470,7 @@ static int da9030_battery_charger_init(struct da9030_charger *charger) if (ret) return ret; - /* enable auto ADC measuremnts */ + /* enable auto ADC measurements */ return da903x_write(charger->master, DA9030_ADC_AUTO_CONTROL, DA9030_ADC_TBAT_ENABLE | DA9030_ADC_VBAT_IN_TXON | DA9030_ADC_VCH_ENABLE | DA9030_ADC_ICH_ENABLE | @@ -571,7 +571,7 @@ static struct platform_driver da903x_battery_driver = { .name = "da903x-battery", }, .probe = da9030_battery_probe, - .remove_new = da9030_battery_remove, + .remove = da9030_battery_remove, }; module_platform_driver(da903x_battery_driver); diff --git a/drivers/power/supply/da9052-battery.c b/drivers/power/supply/da9052-battery.c index 0d84c42c624e..6f17e2fa1a28 100644 --- a/drivers/power/supply/da9052-battery.c +++ b/drivers/power/supply/da9052-battery.c @@ -648,7 +648,7 @@ static void da9052_bat_remove(struct platform_device *pdev) static struct platform_driver da9052_bat_driver = { .probe = da9052_bat_probe, - .remove_new = da9052_bat_remove, + .remove = da9052_bat_remove, .driver = { .name = "da9052-bat", }, diff --git a/drivers/power/supply/da9150-charger.c b/drivers/power/supply/da9150-charger.c index b13cecd84f58..27f36ef5b88d 100644 --- a/drivers/power/supply/da9150-charger.c +++ b/drivers/power/supply/da9150-charger.c @@ -636,7 +636,7 @@ static struct platform_driver da9150_charger_driver = { .name = "da9150-charger", }, .probe = da9150_charger_probe, - .remove_new = da9150_charger_remove, + .remove = da9150_charger_remove, }; module_platform_driver(da9150_charger_driver); diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c index 7bdc6b263609..d5d215f5ad8b 100644 --- a/drivers/power/supply/generic-adc-battery.c +++ b/drivers/power/supply/generic-adc-battery.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Generic battery driver using IIO - * Copyright (C) 2012, Anish Kumar <anish198519851985@gmail.com> + * Copyright (C) 2012, Anish Kumar <yesanishhere@gmail.com> * Copyright (c) 2023, Sebastian Reichel <sre@kernel.org> */ #include <linux/interrupt.h> @@ -295,6 +295,6 @@ static struct platform_driver gab_driver = { }; module_platform_driver(gab_driver); -MODULE_AUTHOR("anish kumar <anish198519851985@gmail.com>"); +MODULE_AUTHOR("anish kumar <yesanishhere@gmail.com>"); MODULE_DESCRIPTION("generic battery driver using IIO"); MODULE_LICENSE("GPL"); diff --git a/drivers/power/supply/ipaq_micro_battery.c b/drivers/power/supply/ipaq_micro_battery.c index 66cc649f702a..7e0568a5353f 100644 --- a/drivers/power/supply/ipaq_micro_battery.c +++ b/drivers/power/supply/ipaq_micro_battery.c @@ -302,7 +302,7 @@ static struct platform_driver micro_batt_device_driver = { .pm = µ_batt_dev_pm_ops, }, .probe = micro_batt_probe, - .remove_new = micro_batt_remove, + .remove = micro_batt_remove, }; module_platform_driver(micro_batt_device_driver); diff --git a/drivers/power/supply/isp1704_charger.c b/drivers/power/supply/isp1704_charger.c index 860d8614c98f..237912a92272 100644 --- a/drivers/power/supply/isp1704_charger.c +++ b/drivers/power/supply/isp1704_charger.c @@ -501,7 +501,7 @@ static struct platform_driver isp1704_charger_driver = { .of_match_table = of_match_ptr(omap_isp1704_of_match), }, .probe = isp1704_charger_probe, - .remove_new = isp1704_charger_remove, + .remove = isp1704_charger_remove, }; module_platform_driver(isp1704_charger_driver); diff --git a/drivers/power/supply/lenovo_yoga_c630_battery.c b/drivers/power/supply/lenovo_yoga_c630_battery.c index f98f65e00831..7a6c8af9e8c2 100644 --- a/drivers/power/supply/lenovo_yoga_c630_battery.c +++ b/drivers/power/supply/lenovo_yoga_c630_battery.c @@ -368,11 +368,12 @@ static int yoga_c630_psy_register_bat_psy(struct yoga_c630_psy *ecbat) bat_cfg.drv_data = ecbat; bat_cfg.fwnode = ecbat->fwnode; - ecbat->bat_psy = power_supply_register_no_ws(ecbat->dev, - ecbat->unit_mA ? - &yoga_c630_psy_bat_psy_desc_mA : - &yoga_c630_psy_bat_psy_desc_mWh, - &bat_cfg); + bat_cfg.no_wakeup_source = true; + ecbat->bat_psy = power_supply_register(ecbat->dev, + ecbat->unit_mA ? + &yoga_c630_psy_bat_psy_desc_mA : + &yoga_c630_psy_bat_psy_desc_mWh, + &bat_cfg); if (IS_ERR(ecbat->bat_psy)) { dev_err(ecbat->dev, "failed to register battery supply\n"); return PTR_ERR(ecbat->bat_psy); @@ -442,7 +443,8 @@ static int yoga_c630_psy_probe(struct auxiliary_device *adev, adp_cfg.fwnode = ecbat->fwnode; adp_cfg.supplied_to = (char **)&yoga_c630_psy_bat_psy_desc_mA.name; adp_cfg.num_supplicants = 1; - ecbat->adp_psy = devm_power_supply_register_no_ws(dev, &yoga_c630_psy_adpt_psy_desc, &adp_cfg); + adp_cfg.no_wakeup_source = true; + ecbat->adp_psy = devm_power_supply_register(dev, &yoga_c630_psy_adpt_psy_desc, &adp_cfg); if (IS_ERR(ecbat->adp_psy)) { dev_err(dev, "failed to register AC adapter supply\n"); return PTR_ERR(ecbat->adp_psy); diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c index 72b170b4ac46..f0a680c155c4 100644 --- a/drivers/power/supply/lp8788-charger.c +++ b/drivers/power/supply/lp8788-charger.c @@ -716,7 +716,7 @@ static void lp8788_charger_remove(struct platform_device *pdev) static struct platform_driver lp8788_charger_driver = { .probe = lp8788_charger_probe, - .remove_new = lp8788_charger_remove, + .remove = lp8788_charger_remove, .driver = { .name = LP8788_DEV_CHARGER, }, diff --git a/drivers/power/supply/max14577_charger.c b/drivers/power/supply/max14577_charger.c index b28c04157709..1cef2f860b5f 100644 --- a/drivers/power/supply/max14577_charger.c +++ b/drivers/power/supply/max14577_charger.c @@ -634,7 +634,7 @@ static struct platform_driver max14577_charger_driver = { .of_match_table = of_max14577_charger_dt_match, }, .probe = max14577_charger_probe, - .remove_new = max14577_charger_remove, + .remove = max14577_charger_remove, .id_table = max14577_charger_id, }; module_platform_driver(max14577_charger_driver); diff --git a/drivers/power/supply/max77650-charger.c b/drivers/power/supply/max77650-charger.c index 818e13c613e3..5f58c0c24b4d 100644 --- a/drivers/power/supply/max77650-charger.c +++ b/drivers/power/supply/max77650-charger.c @@ -364,7 +364,7 @@ static struct platform_driver max77650_charger_driver = { .of_match_table = max77650_charger_of_match, }, .probe = max77650_charger_probe, - .remove_new = max77650_charger_remove, + .remove = max77650_charger_remove, }; module_platform_driver(max77650_charger_driver); diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c index 4caac142c428..cdea35c0d1de 100644 --- a/drivers/power/supply/max77693_charger.c +++ b/drivers/power/supply/max77693_charger.c @@ -798,7 +798,7 @@ static struct platform_driver max77693_charger_driver = { .name = "max77693-charger", }, .probe = max77693_charger_probe, - .remove_new = max77693_charger_remove, + .remove = max77693_charger_remove, .id_table = max77693_charger_id, }; module_platform_driver(max77693_charger_driver); diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c index d7e520da7688..e6fe68cebc32 100644 --- a/drivers/power/supply/max77976_charger.c +++ b/drivers/power/supply/max77976_charger.c @@ -452,6 +452,7 @@ static int max77976_probe(struct i2c_client *client) i2c_set_clientdata(client, chg); psy_cfg.drv_data = chg; + psy_cfg.no_wakeup_source = true; chg->client = client; chg->regmap = devm_regmap_init_i2c(client, &max77976_regmap_config); @@ -475,7 +476,7 @@ static int max77976_probe(struct i2c_client *client) if (err) return err; - psy = devm_power_supply_register_no_ws(dev, &max77976_psy_desc, &psy_cfg); + psy = devm_power_supply_register(dev, &max77976_psy_desc, &psy_cfg); if (IS_ERR(psy)) return dev_err_probe(dev, PTR_ERR(psy), "cannot register\n"); diff --git a/drivers/power/supply/max8925_power.c b/drivers/power/supply/max8925_power.c index 621a006d52a9..d753145de3bb 100644 --- a/drivers/power/supply/max8925_power.c +++ b/drivers/power/supply/max8925_power.c @@ -73,7 +73,7 @@ struct max8925_power_info { unsigned usb_online:1; unsigned bat_online:1; unsigned chg_mode:2; - unsigned batt_detect:1; /* detecing MB by ID pin */ + unsigned batt_detect:1; /* detecting MB by ID pin */ unsigned topoff_threshold:2; unsigned fast_charge:3; unsigned no_temp_support:1; @@ -563,7 +563,7 @@ static void max8925_power_remove(struct platform_device *pdev) static struct platform_driver max8925_power_driver = { .probe = max8925_power_probe, - .remove_new = max8925_power_remove, + .remove = max8925_power_remove, .driver = { .name = "max8925-power", }, diff --git a/drivers/power/supply/pcf50633-charger.c b/drivers/power/supply/pcf50633-charger.c index 0e980522fee5..0136bc87b105 100644 --- a/drivers/power/supply/pcf50633-charger.c +++ b/drivers/power/supply/pcf50633-charger.c @@ -455,7 +455,7 @@ static struct platform_driver pcf50633_mbc_driver = { .name = "pcf50633-mbc", }, .probe = pcf50633_mbc_probe, - .remove_new = pcf50633_mbc_remove, + .remove = pcf50633_mbc_remove, }; module_platform_driver(pcf50633_mbc_driver); diff --git a/drivers/power/supply/pmu_battery.c b/drivers/power/supply/pmu_battery.c index eaab7500d99b..ed83c5e05ca3 100644 --- a/drivers/power/supply/pmu_battery.c +++ b/drivers/power/supply/pmu_battery.c @@ -170,6 +170,7 @@ static int __init pmu_bat_init(void) pbat->bat_desc.properties = pmu_bat_props; pbat->bat_desc.num_properties = ARRAY_SIZE(pmu_bat_props); pbat->bat_desc.get_property = pmu_bat_get_property; + pbat->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY; pbat->pbi = &pmu_batteries[i]; psy_cfg.drv_data = pbat; diff --git a/drivers/power/supply/power_supply.h b/drivers/power/supply/power_supply.h index 3cbafc58bdad..7434a6f24775 100644 --- a/drivers/power/supply/power_supply.h +++ b/drivers/power/supply/power_supply.h @@ -13,9 +13,12 @@ struct device; struct device_type; struct power_supply; +extern int power_supply_property_is_writeable(struct power_supply *psy, + enum power_supply_property psp); + #ifdef CONFIG_SYSFS -extern void power_supply_init_attrs(void); +extern void __init power_supply_init_attrs(void); extern int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env); extern const struct attribute_group *power_supply_attr_groups[]; @@ -41,3 +44,20 @@ static inline int power_supply_create_triggers(struct power_supply *psy) static inline void power_supply_remove_triggers(struct power_supply *psy) {} #endif /* CONFIG_LEDS_TRIGGERS */ + +#ifdef CONFIG_POWER_SUPPLY_HWMON + +int power_supply_add_hwmon_sysfs(struct power_supply *psy); +void power_supply_remove_hwmon_sysfs(struct power_supply *psy); + +#else + +static inline int power_supply_add_hwmon_sysfs(struct power_supply *psy) +{ + return 0; +} + +static inline +void power_supply_remove_hwmon_sysfs(struct power_supply *psy) {} + +#endif /* CONFIG_POWER_SUPPLY_HWMON */ diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 49534458a9f7..16085eff0084 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -152,7 +152,7 @@ static void power_supply_deferred_register_work(struct work_struct *work) deferred_register_work.work); if (psy->dev.parent) { - while (!mutex_trylock(&psy->dev.parent->mutex)) { + while (!device_trylock(psy->dev.parent)) { if (psy->removing) return; msleep(10); @@ -162,7 +162,7 @@ static void power_supply_deferred_register_work(struct work_struct *work) power_supply_changed(psy); if (psy->dev.parent) - mutex_unlock(&psy->dev.parent->mutex); + device_unlock(psy->dev.parent); } #ifdef CONFIG_OF @@ -484,8 +484,6 @@ EXPORT_SYMBOL_GPL(power_supply_get_by_name); */ void power_supply_put(struct power_supply *psy) { - might_sleep(); - atomic_dec(&psy->use_cnt); put_device(&psy->dev); } @@ -777,7 +775,7 @@ int power_supply_get_battery_info(struct power_supply *psy, tab_len = size / (2 * sizeof(__be32)); info->ocv_table_size[index] = tab_len; - table = info->ocv_table[index] = + info->ocv_table[index] = table = devm_kcalloc(&psy->dev, tab_len, sizeof(*table), GFP_KERNEL); if (!info->ocv_table[index]) { power_supply_put_battery_info(psy, info); @@ -798,7 +796,7 @@ int power_supply_get_battery_info(struct power_supply *psy, goto out_ret_pointer; info->resist_table_size = len / (2 * sizeof(__be32)); - resist_table = info->resist_table = devm_kcalloc(&psy->dev, + info->resist_table = resist_table = devm_kcalloc(&psy->dev, info->resist_table_size, sizeof(*resist_table), GFP_KERNEL); @@ -982,7 +980,7 @@ EXPORT_SYMBOL_GPL(power_supply_battery_info_get_prop); * * Return: the battery internal resistance percent */ -int power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table, +int power_supply_temp2resist_simple(const struct power_supply_resistance_temp_table *table, int table_len, int temp) { int i, high, low; @@ -1093,7 +1091,7 @@ EXPORT_SYMBOL_GPL(power_supply_get_maintenance_charging_setting); * * Return: the battery capacity. */ -int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table, +int power_supply_ocv2cap_simple(const struct power_supply_battery_ocv_table *table, int table_len, int ocv) { int i, high, low; @@ -1118,7 +1116,7 @@ int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table, } EXPORT_SYMBOL_GPL(power_supply_ocv2cap_simple); -struct power_supply_battery_ocv_table * +const struct power_supply_battery_ocv_table * power_supply_find_ocv2cap_table(struct power_supply_battery_info *info, int temp, int *table_len) { @@ -1149,7 +1147,7 @@ EXPORT_SYMBOL_GPL(power_supply_find_ocv2cap_table); int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info, int ocv, int temp) { - struct power_supply_battery_ocv_table *table; + const struct power_supply_battery_ocv_table *table; int table_len; table = power_supply_find_ocv2cap_table(info, temp, &table_len); @@ -1233,7 +1231,6 @@ int power_supply_property_is_writeable(struct power_supply *psy, { return psy->desc->property_is_writeable && psy->desc->property_is_writeable(psy, psp); } -EXPORT_SYMBOL_GPL(power_supply_property_is_writeable); void power_supply_external_power_changed(struct power_supply *psy) { @@ -1342,8 +1339,7 @@ static void psy_unregister_thermal(struct power_supply *psy) static struct power_supply *__must_check __power_supply_register(struct device *parent, const struct power_supply_desc *desc, - const struct power_supply_config *cfg, - bool ws) + const struct power_supply_config *cfg) { struct device *dev; struct power_supply *psy; @@ -1410,7 +1406,7 @@ __power_supply_register(struct device *parent, if (rc) goto device_add_failed; - rc = device_init_wakeup(dev, ws); + rc = device_init_wakeup(dev, cfg ? !cfg->no_wakeup_source : true); if (rc) goto wakeup_init_failed; @@ -1476,33 +1472,10 @@ struct power_supply *__must_check power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg) { - return __power_supply_register(parent, desc, cfg, true); + return __power_supply_register(parent, desc, cfg); } EXPORT_SYMBOL_GPL(power_supply_register); -/** - * power_supply_register_no_ws() - Register new non-waking-source power supply - * @parent: Device to be a parent of power supply's device, usually - * the device which probe function calls this - * @desc: Description of power supply, must be valid through whole - * lifetime of this power supply - * @cfg: Run-time specific configuration accessed during registering, - * may be NULL - * - * Return: A pointer to newly allocated power_supply on success - * or ERR_PTR otherwise. - * Use power_supply_unregister() on returned power_supply pointer to release - * resources. - */ -struct power_supply *__must_check -power_supply_register_no_ws(struct device *parent, - const struct power_supply_desc *desc, - const struct power_supply_config *cfg) -{ - return __power_supply_register(parent, desc, cfg, false); -} -EXPORT_SYMBOL_GPL(power_supply_register_no_ws); - static void devm_power_supply_release(struct device *dev, void *res) { struct power_supply **psy = res; @@ -1535,7 +1508,7 @@ devm_power_supply_register(struct device *parent, if (!ptr) return ERR_PTR(-ENOMEM); - psy = __power_supply_register(parent, desc, cfg, true); + psy = __power_supply_register(parent, desc, cfg); if (IS_ERR(psy)) { devres_free(ptr); } else { @@ -1547,42 +1520,6 @@ devm_power_supply_register(struct device *parent, EXPORT_SYMBOL_GPL(devm_power_supply_register); /** - * devm_power_supply_register_no_ws() - Register managed non-waking-source power supply - * @parent: Device to be a parent of power supply's device, usually - * the device which probe function calls this - * @desc: Description of power supply, must be valid through whole - * lifetime of this power supply - * @cfg: Run-time specific configuration accessed during registering, - * may be NULL - * - * Return: A pointer to newly allocated power_supply on success - * or ERR_PTR otherwise. - * The returned power_supply pointer will be automatically unregistered - * on driver detach. - */ -struct power_supply *__must_check -devm_power_supply_register_no_ws(struct device *parent, - const struct power_supply_desc *desc, - const struct power_supply_config *cfg) -{ - struct power_supply **ptr, *psy; - - ptr = devres_alloc(devm_power_supply_release, sizeof(*ptr), GFP_KERNEL); - - if (!ptr) - return ERR_PTR(-ENOMEM); - psy = __power_supply_register(parent, desc, cfg, false); - if (IS_ERR(psy)) { - devres_free(ptr); - } else { - *ptr = psy; - devres_add(parent, ptr); - } - return psy; -} -EXPORT_SYMBOL_GPL(devm_power_supply_register_no_ws); - -/** * power_supply_unregister() - Remove this power supply from system * @psy: Pointer to power supply to unregister * diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c index 6fbbfb1c685e..01be04903d7d 100644 --- a/drivers/power/supply/power_supply_hwmon.c +++ b/drivers/power/supply/power_supply_hwmon.c @@ -7,6 +7,7 @@ #include <linux/hwmon.h> #include <linux/power_supply.h> #include <linux/slab.h> +#include "power_supply.h" struct power_supply_hwmon { struct power_supply *psy; diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 16b3c5880cd8..571de43fcca9 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -142,7 +142,7 @@ static const char * const POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT[] = { [POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE] = "force-discharge", }; -static struct power_supply_attr power_supply_attrs[] = { +static struct power_supply_attr power_supply_attrs[] __ro_after_init = { /* Properties of type `int' */ POWER_SUPPLY_ENUM_ATTR(STATUS), POWER_SUPPLY_ENUM_ATTR(CHARGE_TYPE), @@ -225,9 +225,9 @@ static struct power_supply_attr power_supply_attrs[] = { #define POWER_SUPPLY_ATTR_CNT ARRAY_SIZE(power_supply_attrs) static struct attribute * -__power_supply_attrs[POWER_SUPPLY_ATTR_CNT + 1]; +__power_supply_attrs[POWER_SUPPLY_ATTR_CNT + 1] __ro_after_init; -static struct power_supply_attr *to_ps_attr(struct device_attribute *attr) +static const struct power_supply_attr *to_ps_attr(struct device_attribute *attr) { return container_of(attr, struct power_supply_attr, dev_attr); } @@ -273,7 +273,7 @@ static ssize_t power_supply_show_property(struct device *dev, char *buf) { ssize_t ret; struct power_supply *psy = dev_get_drvdata(dev); - struct power_supply_attr *ps_attr = to_ps_attr(attr); + const struct power_supply_attr *ps_attr = to_ps_attr(attr); enum power_supply_property psp = dev_attr_psp(attr); union power_supply_propval value; @@ -326,7 +326,7 @@ static ssize_t power_supply_store_property(struct device *dev, const char *buf, size_t count) { ssize_t ret; struct power_supply *psy = dev_get_drvdata(dev); - struct power_supply_attr *ps_attr = to_ps_attr(attr); + const struct power_supply_attr *ps_attr = to_ps_attr(attr); enum power_supply_property psp = dev_attr_psp(attr); union power_supply_propval value; @@ -401,7 +401,7 @@ const struct attribute_group *power_supply_attr_groups[] = { NULL }; -void power_supply_init_attrs(void) +void __init power_supply_init_attrs(void) { int i; diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c index f0a64c00ddaa..47d29271ddf4 100644 --- a/drivers/power/supply/qcom_battmgr.c +++ b/drivers/power/supply/qcom_battmgr.c @@ -151,7 +151,7 @@ struct qcom_battmgr_message { __le32 capacity_low; __le32 capacity_warning; __le32 cycle_count; - /* thousandth of persent */ + /* thousandth of percent */ __le32 accuracy; __le32 max_sample_time_ms; __le32 min_sample_time_ms; diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_pmi8998_charger.c index 81acbd8b2169..3b4132376649 100644 --- a/drivers/power/supply/qcom_pmi8998_charger.c +++ b/drivers/power/supply/qcom_pmi8998_charger.c @@ -832,7 +832,7 @@ static const struct smb2_register smb2_init_seq[] = { AUTO_RECHG_BIT | EN_ANALOG_DROP_IN_VBATT_BIT | CHARGER_INHIBIT_BIT, .val = CHARGER_INHIBIT_BIT }, - /* STAT pin software override, match downstream. Parallell charging? */ + /* STAT pin software override, match downstream. Parallel charging? */ { .addr = STAT_CFG, .mask = STAT_SW_OVERRIDE_CFG_BIT, .val = STAT_SW_OVERRIDE_CFG_BIT }, diff --git a/drivers/power/supply/qcom_smbb.c b/drivers/power/supply/qcom_smbb.c index 4e57762e27ba..a79563f6ff7a 100644 --- a/drivers/power/supply/qcom_smbb.c +++ b/drivers/power/supply/qcom_smbb.c @@ -1017,10 +1017,10 @@ static const struct of_device_id smbb_charger_id_table[] = { MODULE_DEVICE_TABLE(of, smbb_charger_id_table); static struct platform_driver smbb_charger_driver = { - .probe = smbb_charger_probe, - .remove_new = smbb_charger_remove, - .driver = { - .name = "qcom-smbb", + .probe = smbb_charger_probe, + .remove = smbb_charger_remove, + .driver = { + .name = "qcom-smbb", .of_match_table = smbb_charger_id_table, }, }; diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c index 57b6ddefad28..e5f35d083c23 100644 --- a/drivers/power/supply/rk817_charger.c +++ b/drivers/power/supply/rk817_charger.c @@ -240,9 +240,32 @@ static int rk817_record_battery_nvram_values(struct rk817_charger *charger) static int rk817_bat_calib_cap(struct rk817_charger *charger) { struct rk808 *rk808 = charger->rk808; - int tmp, charge_now, charge_now_adc, volt_avg; + int charge_now, charge_now_adc; u8 bulk_reg[4]; + /* Don't do anything if there's no battery. */ + if (!charger->battery_present) + return 0; + + /* + * When resuming from suspend, sometimes the voltage value would be + * incorrect. BSP would simply wait two seconds and try reading the + * values again. Do not do any sort of calibration activity when the + * reported value is incorrect. The next scheduled update of battery + * vaules should then return valid data and the driver can continue. + * Use 2.7v as the sanity value because per the datasheet the PMIC + * can in no way support a battery voltage lower than this. BSP only + * checked for values too low, but I'm adding in a check for values + * too high just in case; again the PMIC can in no way support + * voltages above 4.45v, so this seems like a good value. + */ + if ((charger->volt_avg_uv < 2700000) || (charger->volt_avg_uv > 4450000)) { + dev_dbg(charger->dev, + "Battery voltage of %d is invalid, ignoring.\n", + charger->volt_avg_uv); + return -EINVAL; + } + /* Calibrate the soc and fcc on a fully charged battery */ if (charger->charge_status == CHARGE_FINISH && (!charger->soc_cal)) { @@ -304,51 +327,6 @@ static int rk817_bat_calib_cap(struct rk817_charger *charger) } } - /* - * Calibrate the fully charged capacity when we previously had a full - * battery (soc_cal = 1) and are now empty (at or below minimum design - * voltage). If our columb counter is still positive, subtract that - * from our fcc value to get a calibrated fcc, and if our columb - * counter is negative add that to our fcc (but not to exceed our - * design capacity). - */ - regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_BAT_VOL_H, - bulk_reg, 2); - tmp = get_unaligned_be16(bulk_reg); - volt_avg = (charger->voltage_k * tmp) + 1000 * charger->voltage_b; - if (volt_avg <= charger->bat_voltage_min_design_uv && - charger->soc_cal) { - regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3, - bulk_reg, 4); - charge_now_adc = get_unaligned_be32(bulk_reg); - charge_now = ADC_TO_CHARGE_UAH(charge_now_adc, - charger->res_div); - /* - * Note, if charge_now is negative this will add it (what we - * want) and if it's positive this will subtract (also what - * we want). - */ - charger->fcc_mah = charger->fcc_mah - (charge_now / 1000); - - dev_dbg(charger->dev, - "Recalibrating full charge capacity to %d uah\n", - charger->fcc_mah * 1000); - } - - /* - * Set the SOC to 0 if we are below the minimum system voltage. - */ - if (volt_avg <= charger->bat_voltage_min_design_uv) { - charger->soc = 0; - charge_now_adc = CHARGE_TO_ADC(0, charger->res_div); - put_unaligned_be32(charge_now_adc, bulk_reg); - regmap_bulk_write(rk808->regmap, - RK817_GAS_GAUGE_Q_INIT_H3, bulk_reg, 4); - dev_warn(charger->dev, - "Battery voltage %d below minimum voltage %d\n", - volt_avg, charger->bat_voltage_min_design_uv); - } - rk817_record_battery_nvram_values(charger); return 0; @@ -648,6 +626,24 @@ static irqreturn_t rk817_plug_out_isr(int irq, void *cg) return IRQ_HANDLED; } +static int rk817_bat_set_prop(struct power_supply *ps, + enum power_supply_property prop, + const union power_supply_propval *val) +{ + struct rk817_charger *charger = power_supply_get_drvdata(ps); + + switch (prop) { + case POWER_SUPPLY_PROP_CHARGE_FULL: + if ((val->intval < 500000) || + (val->intval > charger->bat_charge_full_design_uah)) + return -EINVAL; + charger->fcc_mah = val->intval / 1000; + return rk817_bat_calib_cap(charger); + default: + return -EINVAL; + } +} + static enum power_supply_property rk817_bat_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_STATUS, @@ -673,12 +669,25 @@ static enum power_supply_property rk817_chg_props[] = { POWER_SUPPLY_PROP_VOLTAGE_AVG, }; +static int rk817_bat_prop_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_FULL: + return 1; + default: + return 0; + } +} + static const struct power_supply_desc rk817_bat_desc = { .name = "rk817-battery", .type = POWER_SUPPLY_TYPE_BATTERY, .properties = rk817_bat_props, + .property_is_writeable = rk817_bat_prop_writeable, .num_properties = ARRAY_SIZE(rk817_bat_props), .get_property = rk817_bat_get_prop, + .set_property = rk817_bat_set_prop, }; static const struct power_supply_desc rk817_chg_desc = { @@ -1202,6 +1211,15 @@ static int rk817_charger_probe(struct platform_device *pdev) return 0; } +static int __maybe_unused rk817_suspend(struct device *dev) +{ + struct rk817_charger *charger = dev_get_drvdata(dev); + + cancel_delayed_work_sync(&charger->work); + + return 0; +} + static int __maybe_unused rk817_resume(struct device *dev) { @@ -1213,7 +1231,7 @@ static int __maybe_unused rk817_resume(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(rk817_charger_pm, NULL, rk817_resume); +static SIMPLE_DEV_PM_OPS(rk817_charger_pm, rk817_suspend, rk817_resume); static struct platform_driver rk817_charger_driver = { .probe = rk817_charger_probe, diff --git a/drivers/power/supply/rt9471.c b/drivers/power/supply/rt9471.c index c04af1ee89c6..67b86ac91a21 100644 --- a/drivers/power/supply/rt9471.c +++ b/drivers/power/supply/rt9471.c @@ -139,6 +139,19 @@ enum { RT9471_PORTSTAT_DCP, }; +enum { + RT9471_ICSTAT_SLEEP = 0, + RT9471_ICSTAT_VBUSRDY, + RT9471_ICSTAT_TRICKLECHG, + RT9471_ICSTAT_PRECHG, + RT9471_ICSTAT_FASTCHG, + RT9471_ICSTAT_IEOC, + RT9471_ICSTAT_BGCHG, + RT9471_ICSTAT_CHGDONE, + RT9471_ICSTAT_CHGFAULT, + RT9471_ICSTAT_OTG = 15, +}; + struct rt9471_chip { struct device *dev; struct regmap *regmap; @@ -153,8 +166,8 @@ struct rt9471_chip { }; static const struct reg_field rt9471_reg_fields[F_MAX_FIELDS] = { - [F_WDT] = REG_FIELD(RT9471_REG_TOP, 0, 0), - [F_WDT_RST] = REG_FIELD(RT9471_REG_TOP, 1, 1), + [F_WDT] = REG_FIELD(RT9471_REG_TOP, 0, 1), + [F_WDT_RST] = REG_FIELD(RT9471_REG_TOP, 2, 2), [F_CHG_EN] = REG_FIELD(RT9471_REG_FUNC, 0, 0), [F_HZ] = REG_FIELD(RT9471_REG_FUNC, 5, 5), [F_BATFET_DIS] = REG_FIELD(RT9471_REG_FUNC, 7, 7), @@ -255,31 +268,32 @@ static int rt9471_get_ieoc(struct rt9471_chip *chip, int *microamp) static int rt9471_get_status(struct rt9471_chip *chip, int *status) { - unsigned int chg_ready, chg_done, fault_stat; + unsigned int ic_stat; int ret; - ret = regmap_field_read(chip->rm_fields[F_ST_CHG_RDY], &chg_ready); - if (ret) - return ret; - - ret = regmap_field_read(chip->rm_fields[F_ST_CHG_DONE], &chg_done); + ret = regmap_field_read(chip->rm_fields[F_IC_STAT], &ic_stat); if (ret) return ret; - ret = regmap_read(chip->regmap, RT9471_REG_STAT1, &fault_stat); - if (ret) - return ret; - - fault_stat &= RT9471_CHGFAULT_MASK; - - if (chg_ready && chg_done) - *status = POWER_SUPPLY_STATUS_FULL; - else if (chg_ready && fault_stat) + switch (ic_stat) { + case RT9471_ICSTAT_VBUSRDY: + case RT9471_ICSTAT_CHGFAULT: *status = POWER_SUPPLY_STATUS_NOT_CHARGING; - else if (chg_ready && !fault_stat) + break; + case RT9471_ICSTAT_TRICKLECHG ... RT9471_ICSTAT_BGCHG: *status = POWER_SUPPLY_STATUS_CHARGING; - else + break; + case RT9471_ICSTAT_CHGDONE: + *status = POWER_SUPPLY_STATUS_FULL; + break; + case RT9471_ICSTAT_SLEEP: + case RT9471_ICSTAT_OTG: *status = POWER_SUPPLY_STATUS_DISCHARGING; + break; + default: + *status = POWER_SUPPLY_STATUS_UNKNOWN; + break; + } return 0; } diff --git a/drivers/power/supply/samsung-sdi-battery.c b/drivers/power/supply/samsung-sdi-battery.c index b63fd2758c2f..33565002ee27 100644 --- a/drivers/power/supply/samsung-sdi-battery.c +++ b/drivers/power/supply/samsung-sdi-battery.c @@ -431,7 +431,7 @@ static const struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb58515 * temperature compensation tables so we just state 100% for every temperature. * If you have the datasheets, please provide these tables. */ -static struct power_supply_resistance_temp_table samsung_temp2res[] = { +static const struct power_supply_resistance_temp_table samsung_temp2res[] = { { .temp = 50, .resistance = 100 }, { .temp = 40, .resistance = 100 }, { .temp = 30, .resistance = 100 }, @@ -447,7 +447,7 @@ static struct power_supply_resistance_temp_table samsung_temp2res[] = { * These must be sorted by falling OCV value. */ -static struct power_supply_battery_ocv_table samsung_ocv_cap_eb485159lu[] = { +static const struct power_supply_battery_ocv_table samsung_ocv_cap_eb485159lu[] = { { .ocv = 4330000, .capacity = 100}, { .ocv = 4320000, .capacity = 99}, { .ocv = 4283000, .capacity = 95}, @@ -499,7 +499,7 @@ static struct power_supply_battery_ocv_table samsung_ocv_cap_eb485159lu[] = { }; /* Same capacity table is used by eb-l1m7flu, eb425161la, eb425161lu */ -static struct power_supply_battery_ocv_table samsung_ocv_cap_1500mah[] = { +static const struct power_supply_battery_ocv_table samsung_ocv_cap_1500mah[] = { { .ocv = 4328000, .capacity = 100}, { .ocv = 4299000, .capacity = 99}, { .ocv = 4281000, .capacity = 98}, @@ -540,7 +540,7 @@ static struct power_supply_battery_ocv_table samsung_ocv_cap_1500mah[] = { { .ocv = 3300000, .capacity = 0}, }; -static struct power_supply_battery_ocv_table samsung_ocv_cap_eb535151vu[] = { +static const struct power_supply_battery_ocv_table samsung_ocv_cap_eb535151vu[] = { { .ocv = 4178000, .capacity = 100}, { .ocv = 4148000, .capacity = 99}, { .ocv = 4105000, .capacity = 95}, @@ -572,7 +572,7 @@ static struct power_supply_battery_ocv_table samsung_ocv_cap_eb535151vu[] = { { .ocv = 3300000, .capacity = 0}, }; -static struct power_supply_battery_ocv_table samsung_ocv_cap_eb585157lu[] = { +static const struct power_supply_battery_ocv_table samsung_ocv_cap_eb585157lu[] = { { .ocv = 4320000, .capacity = 100}, { .ocv = 4296000, .capacity = 99}, { .ocv = 4283000, .capacity = 98}, diff --git a/drivers/power/supply/sc2731_charger.c b/drivers/power/supply/sc2731_charger.c index b3d8b1ca97da..50d5157af927 100644 --- a/drivers/power/supply/sc2731_charger.c +++ b/drivers/power/supply/sc2731_charger.c @@ -530,7 +530,7 @@ static struct platform_driver sc2731_charger_driver = { .of_match_table = sc2731_charger_of_match, }, .probe = sc2731_charger_probe, - .remove_new = sc2731_charger_remove, + .remove = sc2731_charger_remove, }; module_platform_driver(sc2731_charger_driver); diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c index bd23c4d9fed4..f36edc2ba708 100644 --- a/drivers/power/supply/sc27xx_fuel_gauge.c +++ b/drivers/power/supply/sc27xx_fuel_gauge.c @@ -992,7 +992,7 @@ static int sc27xx_fgu_calibration(struct sc27xx_fgu_data *data) static int sc27xx_fgu_hw_init(struct sc27xx_fgu_data *data) { struct power_supply_battery_info *info; - struct power_supply_battery_ocv_table *table; + const struct power_supply_battery_ocv_table *table; int ret, delta_clbcnt, alarm_adc; ret = power_supply_get_battery_info(data->battery, &info); @@ -1183,10 +1183,14 @@ static int sc27xx_fgu_probe(struct platform_device *pdev) return PTR_ERR(data->charge_chan); } - data->gpiod = devm_gpiod_get(dev, "bat-detect", GPIOD_IN); + data->gpiod = devm_gpiod_get(dev, "battery-detect", GPIOD_IN); if (IS_ERR(data->gpiod)) { - dev_err(dev, "failed to get battery detection GPIO\n"); - return PTR_ERR(data->gpiod); + data->gpiod = devm_gpiod_get(dev, "bat-detect", GPIOD_IN); + if (IS_ERR(data->gpiod)) { + dev_err(dev, "failed to get battery detection GPIO\n"); + return PTR_ERR(data->gpiod); + } + dev_warn(dev, "bat-detect is deprecated, please use battery-detect\n"); } ret = gpiod_get_value_cansleep(data->gpiod); diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c index d41595764caa..d65193e410a6 100644 --- a/drivers/power/supply/tps65090-charger.c +++ b/drivers/power/supply/tps65090-charger.c @@ -343,7 +343,7 @@ static struct platform_driver tps65090_charger_driver = { .of_match_table = of_tps65090_charger_match, }, .probe = tps65090_charger_probe, - .remove_new = tps65090_charger_remove, + .remove = tps65090_charger_remove, }; module_platform_driver(tps65090_charger_driver); diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c index 2382749a2f53..6fff44e1ecac 100644 --- a/drivers/power/supply/tps65217_charger.c +++ b/drivers/power/supply/tps65217_charger.c @@ -269,7 +269,7 @@ MODULE_DEVICE_TABLE(of, tps65217_charger_match_table); static struct platform_driver tps65217_charger_driver = { .probe = tps65217_charger_probe, - .remove_new = tps65217_charger_remove, + .remove = tps65217_charger_remove, .driver = { .name = "tps65217-charger", .of_match_table = of_match_ptr(tps65217_charger_match_table), diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c index f3f1a0862e93..9dcb5457bef4 100644 --- a/drivers/power/supply/twl4030_charger.c +++ b/drivers/power/supply/twl4030_charger.c @@ -1133,7 +1133,7 @@ MODULE_DEVICE_TABLE(of, twl_bci_of_match); static struct platform_driver twl4030_bci_driver = { .probe = twl4030_bci_probe, - .remove_new = twl4030_bci_remove, + .remove = twl4030_bci_remove, .driver = { .name = "twl4030_bci", .of_match_table = of_match_ptr(twl_bci_of_match), diff --git a/drivers/power/supply/twl6030_charger.c b/drivers/power/supply/twl6030_charger.c new file mode 100644 index 000000000000..b4ec26ff257c --- /dev/null +++ b/drivers/power/supply/twl6030_charger.c @@ -0,0 +1,581 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * TWL6030 charger + * + * Copyright (C) 2024 Andreas Kemnade <andreas@kemnade.info> + * + * based on older 6030 driver found in a v3.0 vendor kernel + * + * based on twl4030_bci_battery.c by TI + * Copyright (C) 2008 Texas Instruments, Inc. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/of.h> +#include <linux/bits.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/mfd/twl.h> +#include <linux/power_supply.h> +#include <linux/notifier.h> +#include <linux/usb/otg.h> +#include <linux/iio/consumer.h> +#include <linux/devm-helpers.h> + +#define CONTROLLER_INT_MASK 0x00 +#define CONTROLLER_CTRL1 0x01 +#define CONTROLLER_WDG 0x02 +#define CONTROLLER_STAT1 0x03 +#define CHARGERUSB_INT_STATUS 0x04 +#define CHARGERUSB_INT_MASK 0x05 +#define CHARGERUSB_STATUS_INT1 0x06 +#define CHARGERUSB_STATUS_INT2 0x07 +#define CHARGERUSB_CTRL1 0x08 +#define CHARGERUSB_CTRL2 0x09 +#define CHARGERUSB_CTRL3 0x0A +#define CHARGERUSB_STAT1 0x0B +#define CHARGERUSB_VOREG 0x0C +#define CHARGERUSB_VICHRG 0x0D +#define CHARGERUSB_CINLIMIT 0x0E +#define CHARGERUSB_CTRLLIMIT1 0x0F +#define CHARGERUSB_CTRLLIMIT2 0x10 +#define ANTICOLLAPSE_CTRL1 0x11 +#define ANTICOLLAPSE_CTRL2 0x12 + +/* TWL6032 registers 0xDA to 0xDE - TWL6032_MODULE_CHARGER */ +#define CONTROLLER_CTRL2 0x00 +#define CONTROLLER_VSEL_COMP 0x01 +#define CHARGERUSB_VSYSREG 0x02 +#define CHARGERUSB_VICHRG_PC 0x03 +#define LINEAR_CHRG_STS 0x04 + +#define LINEAR_CHRG_STS_CRYSTL_OSC_OK 0x40 +#define LINEAR_CHRG_STS_END_OF_CHARGE 0x20 +#define LINEAR_CHRG_STS_VBATOV 0x10 +#define LINEAR_CHRG_STS_VSYSOV 0x08 +#define LINEAR_CHRG_STS_DPPM_STS 0x04 +#define LINEAR_CHRG_STS_CV_STS 0x02 +#define LINEAR_CHRG_STS_CC_STS 0x01 + +#define FG_REG_00 0x00 +#define FG_REG_01 0x01 +#define FG_REG_02 0x02 +#define FG_REG_03 0x03 +#define FG_REG_04 0x04 +#define FG_REG_05 0x05 +#define FG_REG_06 0x06 +#define FG_REG_07 0x07 +#define FG_REG_08 0x08 +#define FG_REG_09 0x09 +#define FG_REG_10 0x0A +#define FG_REG_11 0x0B + +/* CONTROLLER_INT_MASK */ +#define MVAC_FAULT BIT(7) +#define MAC_EOC BIT(6) +#define LINCH_GATED BIT(5) +#define MBAT_REMOVED BIT(4) +#define MFAULT_WDG BIT(3) +#define MBAT_TEMP BIT(2) +#define MVBUS_DET BIT(1) +#define MVAC_DET BIT(0) + +/* CONTROLLER_CTRL1 */ +#define CONTROLLER_CTRL1_EN_LINCH BIT(5) +#define CONTROLLER_CTRL1_EN_CHARGER BIT(4) +#define CONTROLLER_CTRL1_SEL_CHARGER BIT(3) + +/* CONTROLLER_STAT1 */ +#define CONTROLLER_STAT1_EXTCHRG_STATZ BIT(7) +#define CONTROLLER_STAT1_LINCH_GATED BIT(6) +#define CONTROLLER_STAT1_CHRG_DET_N BIT(5) +#define CONTROLLER_STAT1_FAULT_WDG BIT(4) +#define CONTROLLER_STAT1_VAC_DET BIT(3) +#define VAC_DET BIT(3) +#define CONTROLLER_STAT1_VBUS_DET BIT(2) +#define VBUS_DET BIT(2) +#define CONTROLLER_STAT1_BAT_REMOVED BIT(1) +#define CONTROLLER_STAT1_BAT_TEMP_OVRANGE BIT(0) + +/* CHARGERUSB_INT_STATUS */ +#define EN_LINCH BIT(4) +#define CURRENT_TERM_INT BIT(3) +#define CHARGERUSB_STAT BIT(2) +#define CHARGERUSB_THMREG BIT(1) +#define CHARGERUSB_FAULT BIT(0) + +/* CHARGERUSB_INT_MASK */ +#define MASK_MCURRENT_TERM BIT(3) +#define MASK_MCHARGERUSB_STAT BIT(2) +#define MASK_MCHARGERUSB_THMREG BIT(1) +#define MASK_MCHARGERUSB_FAULT BIT(0) + +/* CHARGERUSB_STATUS_INT1 */ +#define CHARGERUSB_STATUS_INT1_TMREG BIT(7) +#define CHARGERUSB_STATUS_INT1_NO_BAT BIT(6) +#define CHARGERUSB_STATUS_INT1_BST_OCP BIT(5) +#define CHARGERUSB_STATUS_INT1_TH_SHUTD BIT(4) +#define CHARGERUSB_STATUS_INT1_BAT_OVP BIT(3) +#define CHARGERUSB_STATUS_INT1_POOR_SRC BIT(2) +#define CHARGERUSB_STATUS_INT1_SLP_MODE BIT(1) +#define CHARGERUSB_STATUS_INT1_VBUS_OVP BIT(0) + +/* CHARGERUSB_STATUS_INT2 */ +#define ICCLOOP BIT(3) +#define CURRENT_TERM BIT(2) +#define CHARGE_DONE BIT(1) +#define ANTICOLLAPSE BIT(0) + +/* CHARGERUSB_CTRL1 */ +#define SUSPEND_BOOT BIT(7) +#define OPA_MODE BIT(6) +#define HZ_MODE BIT(5) +#define TERM BIT(4) + +/* CHARGERUSB_CTRL2 */ +#define UA_TO_VITERM(x) (((x) / 50000 - 1) << 5) + +/* CHARGERUSB_CTRL3 */ +#define VBUSCHRG_LDO_OVRD BIT(7) +#define CHARGE_ONCE BIT(6) +#define BST_HW_PR_DIS BIT(5) +#define AUTOSUPPLY BIT(3) +#define BUCK_HSILIM BIT(0) + +/* CHARGERUSB_VOREG */ +#define UV_TO_VOREG(x) (((x) - 3500000) / 20000) +#define VOREG_TO_UV(x) (((x) & 0x3F) * 20000 + 3500000) +#define CHARGERUSB_VOREG_3P52 0x01 +#define CHARGERUSB_VOREG_4P0 0x19 +#define CHARGERUSB_VOREG_4P2 0x23 +#define CHARGERUSB_VOREG_4P76 0x3F + +/* CHARGERUSB_VICHRG */ +/* + * might be inaccurate for < 500 mA, diffent scale might apply, + * either starting from 100 mA or 300 mA + */ +#define UA_TO_VICHRG(x) (((x) / 100000) - 1) +#define VICHRG_TO_UA(x) (((x) & 0xf) * 100000 + 100000) + +/* CHARGERUSB_CINLIMIT */ +#define CHARGERUSB_CIN_LIMIT_100 0x1 +#define CHARGERUSB_CIN_LIMIT_300 0x5 +#define CHARGERUSB_CIN_LIMIT_500 0x9 +#define CHARGERUSB_CIN_LIMIT_NONE 0xF + +/* CHARGERUSB_CTRLLIMIT2 */ +#define CHARGERUSB_CTRLLIMIT2_1500 0x0E +#define LOCK_LIMIT BIT(4) + +/* ANTICOLLAPSE_CTRL2 */ +#define BUCK_VTH_SHIFT 5 + +/* FG_REG_00 */ +#define CC_ACTIVE_MODE_SHIFT 6 +#define CC_AUTOCLEAR BIT(2) +#define CC_CAL_EN BIT(1) +#define CC_PAUSE BIT(0) + +#define REG_TOGGLE1 0x90 +#define REG_PWDNSTATUS1 0x93 +#define FGDITHS BIT(7) +#define FGDITHR BIT(6) +#define FGS BIT(5) +#define FGR BIT(4) +#define BBSPOR_CFG 0xE6 +#define BB_CHG_EN BIT(3) + +struct twl6030_charger_info { + struct device *dev; + struct power_supply *usb; + struct power_supply_battery_info *binfo; + struct work_struct work; + int irq_chg; + int input_current_limit; + struct iio_channel *channel_vusb; + struct delayed_work charger_monitor; + bool extended_current_range; +}; + +struct twl6030_charger_chip_data { + bool extended_current_range; +}; + +static int twl6030_charger_read(u8 reg, u8 *val) +{ + return twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, val, reg); +} + +static int twl6030_charger_write(u8 reg, u8 val) +{ + return twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, val, reg); +} + +static int twl6030_config_cinlimit_reg(struct twl6030_charger_info *charger, + unsigned int ua) +{ + if (ua >= 50000 && ua <= 750000) { + ua = (ua - 50000) / 50000; + } else if ((ua > 750000) && (ua <= 1500000) && charger->extended_current_range) { + ua = ((ua % 100000) ? 0x30 : 0x20) + ((ua - 100000) / 100000); + } else { + if (ua < 50000) { + dev_err(charger->dev, "invalid input current limit\n"); + return -EINVAL; + } + /* This is no current limit */ + ua = 0x0F; + } + + return twl6030_charger_write(CHARGERUSB_CINLIMIT, ua); +} + +/* + * rewriting all stuff here, resets to extremely conservative defaults were + * seen under some circumstances, like charge voltage to 3.5V + */ +static int twl6030_enable_usb(struct twl6030_charger_info *charger) +{ + int ret; + + ret = twl6030_charger_write(CHARGERUSB_VICHRG, + UA_TO_VICHRG(charger->binfo->constant_charge_current_max_ua)); + if (ret < 0) + return ret; + + ret = twl6030_charger_write(CONTROLLER_WDG, 0xff); + if (ret < 0) + return ret; + + charger->input_current_limit = 500000; + ret = twl6030_config_cinlimit_reg(charger, charger->input_current_limit); + if (ret < 0) + return ret; + + ret = twl6030_charger_write(CHARGERUSB_CINLIMIT, CHARGERUSB_CIN_LIMIT_500); + if (ret < 0) + return ret; + + ret = twl6030_charger_write(CHARGERUSB_VOREG, + UV_TO_VOREG(charger->binfo->constant_charge_voltage_max_uv)); + if (ret < 0) + return ret; + + ret = twl6030_charger_write(CHARGERUSB_CTRL1, TERM); + if (ret < 0) + return ret; + + if (charger->binfo->charge_term_current_ua != -EINVAL) { + ret = twl6030_charger_write(CHARGERUSB_CTRL2, + UA_TO_VITERM(charger->binfo->charge_term_current_ua)); + if (ret < 0) + return ret; + } + + return twl6030_charger_write(CONTROLLER_CTRL1, CONTROLLER_CTRL1_EN_CHARGER); +} + +static void twl6030_charger_wdg(struct work_struct *data) +{ + struct twl6030_charger_info *charger = + container_of(data, struct twl6030_charger_info, + charger_monitor.work); + + u8 val; + u8 int_stat; + u8 stat_int1; + u8 stat_int2; + + twl6030_charger_read(CONTROLLER_STAT1, &val); + twl6030_charger_read(CHARGERUSB_INT_STATUS, &int_stat); + twl6030_charger_read(CHARGERUSB_STATUS_INT1, &stat_int1); + twl6030_charger_read(CHARGERUSB_STATUS_INT2, &stat_int2); + dev_dbg(charger->dev, + "wdg: stat1: %02x %s INT_STATUS %02x STATUS_INT1 %02x STATUS_INT2 %02x\n", + val, (val & VBUS_DET) ? "usb online" : "usb offline", + int_stat, stat_int1, stat_int2); + + twl6030_charger_write(CONTROLLER_WDG, 0xff); + schedule_delayed_work(&charger->charger_monitor, + msecs_to_jiffies(10000)); +} + +static irqreturn_t twl6030_charger_interrupt(int irq, void *arg) +{ + struct twl6030_charger_info *charger = arg; + u8 val; + u8 int_stat; + u8 stat_int1; + u8 stat_int2; + + if (twl6030_charger_read(CONTROLLER_STAT1, &val) < 0) + return IRQ_HANDLED; + + if (twl6030_charger_read(CHARGERUSB_INT_STATUS, &int_stat) < 0) + return IRQ_HANDLED; + + if (twl6030_charger_read(CHARGERUSB_STATUS_INT1, &stat_int1) < 0) + return IRQ_HANDLED; + + if (twl6030_charger_read(CHARGERUSB_STATUS_INT2, &stat_int2) < 0) + return IRQ_HANDLED; + + dev_dbg(charger->dev, + "charger irq: stat1: %02x %s INT_STATUS %02x STATUS_INT1 %02x STATUS_INT2 %02x\n", + val, (val & VBUS_DET) ? "usb online" : "usb offline", + int_stat, stat_int1, stat_int2); + power_supply_changed(charger->usb); + + if (val & VBUS_DET) { + if (twl6030_charger_read(CONTROLLER_CTRL1, &val) < 0) + return IRQ_HANDLED; + + if (!(val & CONTROLLER_CTRL1_EN_CHARGER)) { + if (twl6030_enable_usb(charger) < 0) + return IRQ_HANDLED; + + schedule_delayed_work(&charger->charger_monitor, + msecs_to_jiffies(10000)); + } + } else { + cancel_delayed_work(&charger->charger_monitor); + } + return IRQ_HANDLED; +} + +static int twl6030_charger_usb_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct twl6030_charger_info *charger = power_supply_get_drvdata(psy); + int ret; + u8 stat1; + u8 intstat; + + ret = twl6030_charger_read(CONTROLLER_STAT1, &stat1); + if (ret) + return ret; + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + if (!(stat1 & VBUS_DET)) { + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + } + ret = twl6030_charger_read(CHARGERUSB_STATUS_INT2, &intstat); + if (ret) + return ret; + + if (intstat & CHARGE_DONE) + val->intval = POWER_SUPPLY_STATUS_FULL; + else if (intstat & CURRENT_TERM) + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; + else + val->intval = POWER_SUPPLY_STATUS_CHARGING; + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + if (!charger->channel_vusb) + return -ENODATA; + + ret = iio_read_channel_processed_scale(charger->channel_vusb, &val->intval, 1000); + if (ret < 0) + return ret; + + break; + case POWER_SUPPLY_PROP_ONLINE: + val->intval = !!(stat1 & VBUS_DET); + break; + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + val->intval = charger->input_current_limit; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int twl6030_charger_usb_set_property(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct twl6030_charger_info *charger = power_supply_get_drvdata(psy); + + switch (psp) { + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + charger->input_current_limit = val->intval; + return twl6030_config_cinlimit_reg(charger, charger->input_current_limit); + default: + return -EINVAL; + } + + return 0; +} + +static int twl6030_charger_usb_property_is_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + dev_info(&psy->dev, "is %d writeable?\n", (int)psp); + switch (psp) { + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + return true; + default: + return false; + } +} + +static enum power_supply_property twl6030_charger_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, +}; + +static const struct power_supply_desc twl6030_charger_usb_desc = { + .name = "twl6030_usb", + .type = POWER_SUPPLY_TYPE_USB, + .properties = twl6030_charger_props, + .num_properties = ARRAY_SIZE(twl6030_charger_props), + .get_property = twl6030_charger_usb_get_property, + .set_property = twl6030_charger_usb_set_property, + .property_is_writeable = twl6030_charger_usb_property_is_writeable, +}; + +static int twl6030_charger_probe(struct platform_device *pdev) +{ + struct twl6030_charger_info *charger; + const struct twl6030_charger_chip_data *chip_data; + struct power_supply_config psy_cfg = {}; + int ret; + u8 val; + + charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL); + if (!charger) + return -ENOMEM; + + charger->dev = &pdev->dev; + charger->irq_chg = platform_get_irq(pdev, 0); + + chip_data = device_get_match_data(&pdev->dev); + if (!chip_data) + return dev_err_probe(&pdev->dev, -EINVAL, "missing chip data\n"); + + charger->extended_current_range = chip_data->extended_current_range; + platform_set_drvdata(pdev, charger); + psy_cfg.drv_data = charger; + psy_cfg.fwnode = dev_fwnode(&pdev->dev); + + charger->channel_vusb = devm_iio_channel_get(&pdev->dev, "vusb"); + if (IS_ERR(charger->channel_vusb)) { + ret = PTR_ERR(charger->channel_vusb); + if (ret == -EPROBE_DEFER) + return ret; /* iio not ready */ + dev_warn(&pdev->dev, "could not request vusb iio channel (%d)", + ret); + charger->channel_vusb = NULL; + } + + charger->usb = devm_power_supply_register(&pdev->dev, + &twl6030_charger_usb_desc, + &psy_cfg); + if (IS_ERR(charger->usb)) + return dev_err_probe(&pdev->dev, PTR_ERR(charger->usb), + "Failed to register usb\n"); + + ret = power_supply_get_battery_info(charger->usb, &charger->binfo); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "Failed to get battery info\n"); + + dev_info(&pdev->dev, "battery with vmax %d imax: %d\n", + charger->binfo->constant_charge_voltage_max_uv, + charger->binfo->constant_charge_current_max_ua); + + if (charger->binfo->constant_charge_voltage_max_uv == -EINVAL) { + ret = twl6030_charger_read(CHARGERUSB_CTRLLIMIT1, &val); + if (ret < 0) + return ret; + + charger->binfo->constant_charge_voltage_max_uv = + VOREG_TO_UV(val); + } + + if (charger->binfo->constant_charge_voltage_max_uv > 4760000 || + charger->binfo->constant_charge_voltage_max_uv < 350000) + return dev_err_probe(&pdev->dev, -EINVAL, + "Invalid charge voltage\n"); + + if (charger->binfo->constant_charge_current_max_ua == -EINVAL) { + ret = twl6030_charger_read(CHARGERUSB_CTRLLIMIT2, &val); + if (ret < 0) + return ret; + + charger->binfo->constant_charge_current_max_ua = VICHRG_TO_UA(val); + } + + if (charger->binfo->constant_charge_current_max_ua < 100000 || + charger->binfo->constant_charge_current_max_ua > 1500000) { + return dev_err_probe(&pdev->dev, -EINVAL, + "Invalid charge current\n"); + } + + if ((charger->binfo->charge_term_current_ua != -EINVAL) && + (charger->binfo->charge_term_current_ua > 400000 || + charger->binfo->charge_term_current_ua < 50000)) { + return dev_err_probe(&pdev->dev, -EINVAL, + "Invalid charge termination current\n"); + } + + ret = devm_delayed_work_autocancel(&pdev->dev, + &charger->charger_monitor, + twl6030_charger_wdg); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "Failed to register delayed work\n"); + + ret = devm_request_threaded_irq(&pdev->dev, charger->irq_chg, NULL, + twl6030_charger_interrupt, + IRQF_ONESHOT, pdev->name, + charger); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "could not request irq %d\n", + charger->irq_chg); + + /* turing to charging to configure things */ + twl6030_charger_write(CONTROLLER_CTRL1, 0); + twl6030_charger_interrupt(0, charger); + + return 0; +} + +static const struct twl6030_charger_chip_data twl6030_data = { + .extended_current_range = false, +}; + +static const struct twl6030_charger_chip_data twl6032_data = { + .extended_current_range = true, +}; + +static const struct of_device_id twl_charger_of_match[] = { + {.compatible = "ti,twl6030-charger", .data = &twl6030_data}, + {.compatible = "ti,twl6032-charger", .data = &twl6032_data}, + { } +}; +MODULE_DEVICE_TABLE(of, twl_charger_of_match); + +static struct platform_driver twl6030_charger_driver = { + .probe = twl6030_charger_probe, + .driver = { + .name = "twl6030_charger", + .of_match_table = twl_charger_of_match, + }, +}; +module_platform_driver(twl6030_charger_driver); + +MODULE_DESCRIPTION("TWL6030 Battery Charger Interface driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/power/supply/wm831x_power.c b/drivers/power/supply/wm831x_power.c index d56e499ac59f..538055b29dec 100644 --- a/drivers/power/supply/wm831x_power.c +++ b/drivers/power/supply/wm831x_power.c @@ -720,7 +720,7 @@ static void wm831x_power_remove(struct platform_device *pdev) static struct platform_driver wm831x_power_driver = { .probe = wm831x_power_probe, - .remove_new = wm831x_power_remove, + .remove = wm831x_power_remove, .driver = { .name = "wm831x-power", }, diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c index 3f79ab6f6abf..b0eb6e0ce8bc 100644 --- a/drivers/power/supply/wm8350_power.c +++ b/drivers/power/supply/wm8350_power.c @@ -577,7 +577,7 @@ static void wm8350_power_remove(struct platform_device *pdev) static struct platform_driver wm8350_power_driver = { .probe = wm8350_power_probe, - .remove_new = wm8350_power_remove, + .remove = wm8350_power_remove, .driver = { .name = "wm8350-power", }, diff --git a/drivers/power/supply/wm97xx_battery.c b/drivers/power/supply/wm97xx_battery.c index 1cc38d1437d9..b3b0c37a9dd2 100644 --- a/drivers/power/supply/wm97xx_battery.c +++ b/drivers/power/supply/wm97xx_battery.c @@ -265,7 +265,7 @@ static struct platform_driver wm97xx_bat_driver = { #endif }, .probe = wm97xx_bat_probe, - .remove_new = wm97xx_bat_remove, + .remove = wm97xx_bat_remove, }; module_platform_driver(wm97xx_bat_driver); diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 84d48e310aa8..8cb948a91e60 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -497,7 +497,8 @@ static int regulator_check_current_limit(struct regulator_dev *rdev, return -EPERM; } - if (*max_uA > rdev->constraints->max_uA) + if (*max_uA > rdev->constraints->max_uA && + rdev->constraints->max_uA) *max_uA = rdev->constraints->max_uA; if (*min_uA < rdev->constraints->min_uA) *min_uA = rdev->constraints->min_uA; diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index 6c343b4b9d15..7870722b6ee2 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -843,26 +843,15 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps520 = { .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, }; -static const struct rpmh_vreg_hw_data pmic5_ftsmps525_lv = { +static const struct rpmh_vreg_hw_data pmic5_ftsmps525 = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_ops, .voltage_ranges = (struct linear_range[]) { REGULATOR_LINEAR_RANGE(300000, 0, 267, 4000), + REGULATOR_LINEAR_RANGE(1376000, 268, 438, 8000), }, - .n_linear_ranges = 1, - .n_voltages = 268, - .pmic_mode_map = pmic_mode_map_pmic5_smps, - .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, -}; - -static const struct rpmh_vreg_hw_data pmic5_ftsmps525_mv = { - .regulator_type = VRM, - .ops = &rpmh_regulator_vrm_ops, - .voltage_ranges = (struct linear_range[]) { - REGULATOR_LINEAR_RANGE(600000, 0, 267, 8000), - }, - .n_linear_ranges = 1, - .n_voltages = 268, + .n_linear_ranges = 2, + .n_voltages = 439, .pmic_mode_map = pmic_mode_map_pmic5_smps, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, }; @@ -1190,12 +1179,12 @@ static const struct rpmh_vreg_init_data pm8550_vreg_data[] = { }; static const struct rpmh_vreg_init_data pm8550vs_vreg_data[] = { - RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"), - RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"), - RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"), - RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_lv, "vdd-s4"), - RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"), - RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_mv, "vdd-s6"), + RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525, "vdd-s1"), + RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525, "vdd-s2"), + RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525, "vdd-s3"), + RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525, "vdd-s4"), + RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525, "vdd-s5"), + RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525, "vdd-s6"), RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"), RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2"), RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"), @@ -1203,14 +1192,14 @@ static const struct rpmh_vreg_init_data pm8550vs_vreg_data[] = { }; static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = { - RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"), - RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"), - RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"), - RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"), - RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"), - RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"), - RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"), - RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"), + RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525, "vdd-s1"), + RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525, "vdd-s2"), + RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525, "vdd-s3"), + RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525, "vdd-s4"), + RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525, "vdd-s5"), + RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525, "vdd-s6"), + RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525, "vdd-s7"), + RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525, "vdd-s8"), RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"), RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2"), RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"), @@ -1218,14 +1207,14 @@ static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = { }; static const struct rpmh_vreg_init_data pmc8380_vreg_data[] = { - RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"), - RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"), - RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"), - RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"), - RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"), - RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"), - RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"), - RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"), + RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525, "vdd-s1"), + RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525, "vdd-s2"), + RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525, "vdd-s3"), + RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525, "vdd-s4"), + RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525, "vdd-s5"), + RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525, "vdd-s6"), + RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525, "vdd-s7"), + RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525, "vdd-s8"), RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"), RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2"), RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"), @@ -1409,16 +1398,16 @@ static const struct rpmh_vreg_init_data pmx65_vreg_data[] = { }; static const struct rpmh_vreg_init_data pmx75_vreg_data[] = { - RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"), - RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"), - RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"), - RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"), - RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"), - RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"), - RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"), - RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"), - RPMH_VREG("smps9", "smp%s9", &pmic5_ftsmps525_lv, "vdd-s9"), - RPMH_VREG("smps10", "smp%s10", &pmic5_ftsmps525_lv, "vdd-s10"), + RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525, "vdd-s1"), + RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525, "vdd-s2"), + RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525, "vdd-s3"), + RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525, "vdd-s4"), + RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525, "vdd-s5"), + RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525, "vdd-s6"), + RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525, "vdd-s7"), + RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525, "vdd-s8"), + RPMH_VREG("smps9", "smp%s9", &pmic5_ftsmps525, "vdd-s9"), + RPMH_VREG("smps10", "smp%s10", &pmic5_ftsmps525, "vdd-s10"), RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"), RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2-18"), RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"), diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index e14638936de6..26e1ea1940ec 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -453,7 +453,7 @@ static void ap_tasklet_fn(unsigned long dummy) * important that no requests on any AP get lost. */ if (ap_irq_flag) - xchg(ap_airq.lsi_ptr, 0); + WRITE_ONCE(*ap_airq.lsi_ptr, 0); spin_lock_bh(&ap_queues_lock); hash_for_each(ap_queues, bkt, aq, hnode) { diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 8c0b40d8eb39..a52c2690933f 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -360,10 +360,26 @@ static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib) return 0; } -static int ensure_nib_shared(unsigned long addr, struct gmap *gmap) +/** + * ensure_nib_shared() - Ensure the address of the NIB is secure and shared + * @addr: the physical (absolute) address of the NIB + * + * This function checks whether the NIB page, which has been pinned with + * vfio_pin_pages(), is a shared page belonging to a secure guest. + * + * It will call uv_pin_shared() on it; if the page was already pinned shared + * (i.e. if the NIB belongs to a secure guest and is shared), then 0 + * (success) is returned. If the NIB was not shared, vfio_pin_pages() had + * exported it and now it does not belong to the secure guest anymore. In + * that case, an error is returned. + * + * Context: the NIB (at physical address @addr) has to be pinned with + * vfio_pin_pages() before calling this function. + * + * Return: 0 in case of success, otherwise an error < 0. + */ +static int ensure_nib_shared(unsigned long addr) { - int ret; - /* * The nib has to be located in shared storage since guest and * host access it. vfio_pin_pages() will do a pin shared and @@ -374,12 +390,7 @@ static int ensure_nib_shared(unsigned long addr, struct gmap *gmap) * * If the page is already pinned shared the UV will return a success. */ - ret = uv_pin_shared(addr); - if (ret) { - /* vfio_pin_pages() likely exported the page so let's re-import */ - gmap_convert_to_secure(gmap, addr); - } - return ret; + return uv_pin_shared(addr); } /** @@ -425,6 +436,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, return status; } + /* The pin will probably be successful even if the NIB was not shared */ ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1, IOMMU_READ | IOMMU_WRITE, &h_page); switch (ret) { @@ -447,7 +459,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, /* NIB in non-shared storage is a rc 6 for PV guests */ if (kvm_s390_pv_cpu_is_protected(vcpu) && - ensure_nib_shared(h_nib & PAGE_MASK, kvm->arch.gmap)) { + ensure_nib_shared(h_nib & PAGE_MASK)) { vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); status.response_code = AP_RESPONSE_INVALID_ADDRESS; return status; diff --git a/drivers/soundwire/amd_init.c b/drivers/soundwire/amd_init.c index 53d1d707ca1a..d11b60efda33 100644 --- a/drivers/soundwire/amd_init.c +++ b/drivers/soundwire/amd_init.c @@ -121,6 +121,7 @@ static struct sdw_amd_ctx *sdw_amd_probe_controller(struct sdw_amd_res *res) sdw_pdata[index].instance = index; sdw_pdata[index].acp_sdw_lock = res->acp_lock; + sdw_pdata[index].acp_rev = res->acp_rev; pdevinfo[index].name = "amd_sdw_manager"; pdevinfo[index].id = index; pdevinfo[index].parent = res->parent; diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c index 0d01849c3586..5a4bfaef65fb 100644 --- a/drivers/soundwire/amd_manager.c +++ b/drivers/soundwire/amd_manager.c @@ -433,12 +433,18 @@ static int amd_sdw_port_params(struct sdw_bus *bus, struct sdw_port_params *p_pa u32 frame_fmt_reg, dpn_frame_fmt; dev_dbg(amd_manager->dev, "p_params->num:0x%x\n", p_params->num); - switch (amd_manager->instance) { - case ACP_SDW0: - frame_fmt_reg = sdw0_manager_dp_reg[p_params->num].frame_fmt_reg; - break; - case ACP_SDW1: - frame_fmt_reg = sdw1_manager_dp_reg[p_params->num].frame_fmt_reg; + switch (amd_manager->acp_rev) { + case ACP63_PCI_REV_ID: + switch (amd_manager->instance) { + case ACP_SDW0: + frame_fmt_reg = acp63_sdw0_dp_reg[p_params->num].frame_fmt_reg; + break; + case ACP_SDW1: + frame_fmt_reg = acp63_sdw1_dp_reg[p_params->num].frame_fmt_reg; + break; + default: + return -EINVAL; + } break; default: return -EINVAL; @@ -465,20 +471,28 @@ static int amd_sdw_transport_params(struct sdw_bus *bus, u32 frame_fmt_reg, sample_int_reg, hctrl_dp0_reg; u32 offset_reg, lane_ctrl_ch_en_reg; - switch (amd_manager->instance) { - case ACP_SDW0: - frame_fmt_reg = sdw0_manager_dp_reg[params->port_num].frame_fmt_reg; - sample_int_reg = sdw0_manager_dp_reg[params->port_num].sample_int_reg; - hctrl_dp0_reg = sdw0_manager_dp_reg[params->port_num].hctrl_dp0_reg; - offset_reg = sdw0_manager_dp_reg[params->port_num].offset_reg; - lane_ctrl_ch_en_reg = sdw0_manager_dp_reg[params->port_num].lane_ctrl_ch_en_reg; - break; - case ACP_SDW1: - frame_fmt_reg = sdw1_manager_dp_reg[params->port_num].frame_fmt_reg; - sample_int_reg = sdw1_manager_dp_reg[params->port_num].sample_int_reg; - hctrl_dp0_reg = sdw1_manager_dp_reg[params->port_num].hctrl_dp0_reg; - offset_reg = sdw1_manager_dp_reg[params->port_num].offset_reg; - lane_ctrl_ch_en_reg = sdw1_manager_dp_reg[params->port_num].lane_ctrl_ch_en_reg; + switch (amd_manager->acp_rev) { + case ACP63_PCI_REV_ID: + switch (amd_manager->instance) { + case ACP_SDW0: + frame_fmt_reg = acp63_sdw0_dp_reg[params->port_num].frame_fmt_reg; + sample_int_reg = acp63_sdw0_dp_reg[params->port_num].sample_int_reg; + hctrl_dp0_reg = acp63_sdw0_dp_reg[params->port_num].hctrl_dp0_reg; + offset_reg = acp63_sdw0_dp_reg[params->port_num].offset_reg; + lane_ctrl_ch_en_reg = + acp63_sdw0_dp_reg[params->port_num].lane_ctrl_ch_en_reg; + break; + case ACP_SDW1: + frame_fmt_reg = acp63_sdw1_dp_reg[params->port_num].frame_fmt_reg; + sample_int_reg = acp63_sdw1_dp_reg[params->port_num].sample_int_reg; + hctrl_dp0_reg = acp63_sdw1_dp_reg[params->port_num].hctrl_dp0_reg; + offset_reg = acp63_sdw1_dp_reg[params->port_num].offset_reg; + lane_ctrl_ch_en_reg = + acp63_sdw1_dp_reg[params->port_num].lane_ctrl_ch_en_reg; + break; + default: + return -EINVAL; + } break; default: return -EINVAL; @@ -520,12 +534,20 @@ static int amd_sdw_port_enable(struct sdw_bus *bus, u32 dpn_ch_enable; u32 lane_ctrl_ch_en_reg; - switch (amd_manager->instance) { - case ACP_SDW0: - lane_ctrl_ch_en_reg = sdw0_manager_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg; - break; - case ACP_SDW1: - lane_ctrl_ch_en_reg = sdw1_manager_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg; + switch (amd_manager->acp_rev) { + case ACP63_PCI_REV_ID: + switch (amd_manager->instance) { + case ACP_SDW0: + lane_ctrl_ch_en_reg = + acp63_sdw0_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg; + break; + case ACP_SDW1: + lane_ctrl_ch_en_reg = + acp63_sdw1_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg; + break; + default: + return -EINVAL; + } break; default: return -EINVAL; @@ -910,6 +932,7 @@ static int amd_sdw_manager_probe(struct platform_device *pdev) amd_manager->mmio = amd_manager->acp_mmio + (amd_manager->instance * SDW_MANAGER_REG_OFFSET); amd_manager->acp_sdw_lock = pdata->acp_sdw_lock; + amd_manager->acp_rev = pdata->acp_rev; amd_manager->cols_index = sdw_find_col_index(AMD_SDW_DEFAULT_COLUMNS); amd_manager->rows_index = sdw_find_row_index(AMD_SDW_DEFAULT_ROWS); amd_manager->dev = dev; @@ -926,15 +949,21 @@ static int amd_sdw_manager_probe(struct platform_device *pdev) * information. */ amd_manager->bus.controller_id = 0; - - switch (amd_manager->instance) { - case ACP_SDW0: - amd_manager->num_dout_ports = AMD_SDW0_MAX_TX_PORTS; - amd_manager->num_din_ports = AMD_SDW0_MAX_RX_PORTS; - break; - case ACP_SDW1: - amd_manager->num_dout_ports = AMD_SDW1_MAX_TX_PORTS; - amd_manager->num_din_ports = AMD_SDW1_MAX_RX_PORTS; + dev_dbg(dev, "acp_rev:0x%x\n", amd_manager->acp_rev); + switch (amd_manager->acp_rev) { + case ACP63_PCI_REV_ID: + switch (amd_manager->instance) { + case ACP_SDW0: + amd_manager->num_dout_ports = AMD_ACP63_SDW0_MAX_TX_PORTS; + amd_manager->num_din_ports = AMD_ACP63_SDW0_MAX_RX_PORTS; + break; + case ACP_SDW1: + amd_manager->num_dout_ports = AMD_ACP63_SDW1_MAX_TX_PORTS; + amd_manager->num_din_ports = AMD_ACP63_SDW1_MAX_RX_PORTS; + break; + default: + return -EINVAL; + } break; default: return -EINVAL; diff --git a/drivers/soundwire/amd_manager.h b/drivers/soundwire/amd_manager.h index 707065468e05..cc2170e4521e 100644 --- a/drivers/soundwire/amd_manager.h +++ b/drivers/soundwire/amd_manager.h @@ -155,12 +155,12 @@ #define AMD_SDW_IRQ_MASK_8TO11 0x000c7777 #define AMD_SDW_IRQ_ERROR_MASK 0xff #define AMD_SDW_MAX_FREQ_NUM 1 -#define AMD_SDW0_MAX_TX_PORTS 3 -#define AMD_SDW0_MAX_RX_PORTS 3 -#define AMD_SDW1_MAX_TX_PORTS 1 -#define AMD_SDW1_MAX_RX_PORTS 1 -#define AMD_SDW0_MAX_DAI 6 -#define AMD_SDW1_MAX_DAI 2 +#define AMD_ACP63_SDW0_MAX_TX_PORTS 3 +#define AMD_ACP63_SDW0_MAX_RX_PORTS 3 +#define AMD_ACP63_SDW1_MAX_TX_PORTS 1 +#define AMD_ACP63_SDW1_MAX_RX_PORTS 1 +#define AMD_ACP63_SDW0_MAX_DAI 6 +#define AMD_ACP63_SDW1_MAX_DAI 2 #define AMD_SDW_SLAVE_0_ATTACHED 5 #define AMD_SDW_SSP_COUNTER_VAL 3 @@ -222,7 +222,7 @@ struct sdw_manager_dp_reg { * in SoundWire DMA driver. */ -static struct sdw_manager_dp_reg sdw0_manager_dp_reg[AMD_SDW0_MAX_DAI] = { +static struct sdw_manager_dp_reg acp63_sdw0_dp_reg[AMD_ACP63_SDW0_MAX_DAI] = { {ACP_SW_AUDIO0_TX_FRAME_FORMAT, ACP_SW_AUDIO0_TX_SAMPLEINTERVAL, ACP_SW_AUDIO0_TX_HCTRL_DP0, ACP_SW_AUDIO0_TX_OFFSET_DP0, ACP_SW_AUDIO0_TX_CHANNEL_ENABLE_DP0}, {ACP_SW_AUDIO1_TX_FRAME_FORMAT, ACP_SW_AUDIO1_TX_SAMPLEINTERVAL, ACP_SW_AUDIO1_TX_HCTRL, @@ -237,7 +237,7 @@ static struct sdw_manager_dp_reg sdw0_manager_dp_reg[AMD_SDW0_MAX_DAI] = { ACP_SW_AUDIO2_RX_OFFSET, ACP_SW_AUDIO2_RX_CHANNEL_ENABLE_DP0}, }; -static struct sdw_manager_dp_reg sdw1_manager_dp_reg[AMD_SDW1_MAX_DAI] = { +static struct sdw_manager_dp_reg acp63_sdw1_dp_reg[AMD_ACP63_SDW1_MAX_DAI] = { {ACP_SW_AUDIO1_TX_FRAME_FORMAT, ACP_SW_AUDIO1_TX_SAMPLEINTERVAL, ACP_SW_AUDIO1_TX_HCTRL, ACP_SW_AUDIO1_TX_OFFSET, ACP_SW_AUDIO1_TX_CHANNEL_ENABLE_DP0}, {ACP_SW_AUDIO1_RX_FRAME_FORMAT, ACP_SW_AUDIO1_RX_SAMPLEINTERVAL, ACP_SW_AUDIO1_RX_HCTRL, diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index 263ca32f0c5c..d1dc62c34f1c 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -112,7 +112,7 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, /* Set higher order bits */ *bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM); - /* Set enumuration device number and broadcast device number */ + /* Set enumeration device number and broadcast device number */ set_bit(SDW_ENUM_DEV_NUM, bus->assigned); set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned); diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index 05652e983539..f367670ea991 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -1378,6 +1378,31 @@ static void cdns_init_clock_ctrl(struct sdw_cdns *cdns) } /** + * sdw_cdns_soft_reset() - Cadence soft-reset + * @cdns: Cadence instance + */ +int sdw_cdns_soft_reset(struct sdw_cdns *cdns) +{ + int ret; + + cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_SOFT_RST, + CDNS_MCP_CONTROL_SOFT_RST); + + ret = cdns_config_update(cdns); + if (ret < 0) { + dev_err(cdns->dev, "%s: config update failed\n", __func__); + return ret; + } + + ret = cdns_set_wait(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_SOFT_RST, 0); + if (ret < 0) + dev_err(cdns->dev, "%s: Soft Reset timed out\n", __func__); + + return ret; +} +EXPORT_SYMBOL(sdw_cdns_soft_reset); + +/** * sdw_cdns_init() - Cadence initialization * @cdns: Cadence instance */ @@ -1400,6 +1425,11 @@ int sdw_cdns_init(struct sdw_cdns *cdns) cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL, CDNS_IP_MCP_CONTROL_CMD_ACCEPT, CDNS_IP_MCP_CONTROL_CMD_ACCEPT); + /* disable wakeup */ + cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL, + CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP, + 0); + /* Configure mcp config */ val = cdns_readl(cdns, CDNS_MCP_CONFIG); diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h index e1d7969ba48a..c34fb050fe4f 100644 --- a/drivers/soundwire/cadence_master.h +++ b/drivers/soundwire/cadence_master.h @@ -168,6 +168,7 @@ int sdw_cdns_probe(struct sdw_cdns *cdns); irqreturn_t sdw_cdns_irq(int irq, void *dev_id); irqreturn_t sdw_cdns_thread(int irq, void *dev_id); +int sdw_cdns_soft_reset(struct sdw_cdns *cdns); int sdw_cdns_init(struct sdw_cdns *cdns); int sdw_cdns_pdi_init(struct sdw_cdns *cdns, struct sdw_cdns_stream_config config); diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c index ae689d5d1ab9..599954d92752 100644 --- a/drivers/soundwire/intel_auxdevice.c +++ b/drivers/soundwire/intel_auxdevice.c @@ -41,6 +41,10 @@ static int md_flags; module_param_named(sdw_md_flags, md_flags, int, 0444); MODULE_PARM_DESC(sdw_md_flags, "SoundWire Intel Master device flags (0x0 all off)"); +static int mclk_divider; +module_param_named(sdw_mclk_divider, mclk_divider, int, 0444); +MODULE_PARM_DESC(sdw_mclk_divider, "SoundWire Intel mclk divider"); + struct wake_capable_part { const u16 mfg_id; const u16 part_id; @@ -142,8 +146,12 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus) "intel-sdw-ip-clock", &prop->mclk_freq); - /* the values reported by BIOS are the 2x clock, not the bus clock */ - prop->mclk_freq /= 2; + if (mclk_divider) + /* use kernel parameter for BIOS or board work-arounds */ + prop->mclk_freq /= mclk_divider; + else + /* the values reported by BIOS are the 2x clock, not the bus clock */ + prop->mclk_freq /= 2; fwnode_property_read_u32(link, "intel-quirk-mask", diff --git a/drivers/soundwire/intel_bus_common.c b/drivers/soundwire/intel_bus_common.c index d3ff6c65b64c..ad1f8ebdbfc9 100644 --- a/drivers/soundwire/intel_bus_common.c +++ b/drivers/soundwire/intel_bus_common.c @@ -16,6 +16,12 @@ int intel_start_bus(struct sdw_intel *sdw) struct sdw_bus *bus = &cdns->bus; int ret; + ret = sdw_cdns_soft_reset(cdns); + if (ret < 0) { + dev_err(dev, "%s: unable to soft-reset Cadence IP: %d\n", __func__, ret); + return ret; + } + /* * follow recommended programming flows to avoid timeouts when * gsync is enabled diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c index e5d9df26d4dc..9d59f486edbe 100644 --- a/drivers/soundwire/mipi_disco.c +++ b/drivers/soundwire/mipi_disco.c @@ -23,6 +23,26 @@ #include <linux/soundwire/sdw.h> #include "bus.h" +static bool mipi_fwnode_property_read_bool(const struct fwnode_handle *fwnode, + const char *propname) +{ + int ret; + u8 val; + + if (!fwnode_property_present(fwnode, propname)) + return false; + ret = fwnode_property_read_u8_array(fwnode, propname, &val, 1); + if (ret < 0) + return false; + return !!val; +} + +static bool mipi_device_property_read_bool(const struct device *dev, + const char *propname) +{ + return mipi_fwnode_property_read_bool(dev_fwnode(dev), propname); +} + /** * sdw_master_read_prop() - Read Master properties * @bus: SDW bus instance @@ -31,8 +51,11 @@ int sdw_master_read_prop(struct sdw_bus *bus) { struct sdw_master_prop *prop = &bus->prop; struct fwnode_handle *link; + const char *scales_prop; char name[32]; - int nval, i; + int nval; + int ret; + int i; device_property_read_u32(bus->dev, "mipi-sdw-sw-interface-revision", @@ -48,11 +71,11 @@ int sdw_master_read_prop(struct sdw_bus *bus) return -EIO; } - if (fwnode_property_read_bool(link, + if (mipi_fwnode_property_read_bool(link, "mipi-sdw-clock-stop-mode0-supported")) prop->clk_stop_modes |= BIT(SDW_CLK_STOP_MODE0); - if (fwnode_property_read_bool(link, + if (mipi_fwnode_property_read_bool(link, "mipi-sdw-clock-stop-mode1-supported")) prop->clk_stop_modes |= BIT(SDW_CLK_STOP_MODE1); @@ -71,9 +94,11 @@ int sdw_master_read_prop(struct sdw_bus *bus) return -ENOMEM; } - fwnode_property_read_u32_array(link, + ret = fwnode_property_read_u32_array(link, "mipi-sdw-clock-frequencies-supported", prop->clk_freq, prop->num_clk_freq); + if (ret < 0) + return ret; } /* @@ -88,7 +113,12 @@ int sdw_master_read_prop(struct sdw_bus *bus) } } - nval = fwnode_property_count_u32(link, "mipi-sdw-supported-clock-gears"); + scales_prop = "mipi-sdw-supported-clock-scales"; + nval = fwnode_property_count_u32(link, scales_prop); + if (nval == 0) { + scales_prop = "mipi-sdw-supported-clock-gears"; + nval = fwnode_property_count_u32(link, scales_prop); + } if (nval > 0) { prop->num_clk_gears = nval; prop->clk_gears = devm_kcalloc(bus->dev, prop->num_clk_gears, @@ -99,10 +129,12 @@ int sdw_master_read_prop(struct sdw_bus *bus) return -ENOMEM; } - fwnode_property_read_u32_array(link, - "mipi-sdw-supported-clock-gears", + ret = fwnode_property_read_u32_array(link, + scales_prop, prop->clk_gears, prop->num_clk_gears); + if (ret < 0) + return ret; } fwnode_property_read_u32(link, "mipi-sdw-default-frame-rate", @@ -114,7 +146,7 @@ int sdw_master_read_prop(struct sdw_bus *bus) fwnode_property_read_u32(link, "mipi-sdw-default-frame-col-size", &prop->default_col); - prop->dynamic_frame = fwnode_property_read_bool(link, + prop->dynamic_frame = mipi_fwnode_property_read_bool(link, "mipi-sdw-dynamic-frame-shape"); fwnode_property_read_u32(link, "mipi-sdw-command-error-threshold", @@ -131,6 +163,7 @@ static int sdw_slave_read_dp0(struct sdw_slave *slave, struct sdw_dp0_prop *dp0) { int nval; + int ret; fwnode_property_read_u32(port, "mipi-sdw-port-max-wordlength", &dp0->max_word); @@ -148,20 +181,38 @@ static int sdw_slave_read_dp0(struct sdw_slave *slave, if (!dp0->words) return -ENOMEM; - fwnode_property_read_u32_array(port, + ret = fwnode_property_read_u32_array(port, "mipi-sdw-port-wordlength-configs", dp0->words, dp0->num_words); + if (ret < 0) + return ret; } - dp0->BRA_flow_controlled = fwnode_property_read_bool(port, + dp0->BRA_flow_controlled = mipi_fwnode_property_read_bool(port, "mipi-sdw-bra-flow-controlled"); - dp0->simple_ch_prep_sm = fwnode_property_read_bool(port, + dp0->simple_ch_prep_sm = mipi_fwnode_property_read_bool(port, "mipi-sdw-simplified-channel-prepare-sm"); - dp0->imp_def_interrupts = fwnode_property_read_bool(port, + dp0->imp_def_interrupts = mipi_fwnode_property_read_bool(port, "mipi-sdw-imp-def-dp0-interrupts-supported"); + nval = fwnode_property_count_u32(port, "mipi-sdw-lane-list"); + if (nval > 0) { + dp0->num_lanes = nval; + dp0->lane_list = devm_kcalloc(&slave->dev, + dp0->num_lanes, sizeof(*dp0->lane_list), + GFP_KERNEL); + if (!dp0->lane_list) + return -ENOMEM; + + ret = fwnode_property_read_u32_array(port, + "mipi-sdw-lane-list", + dp0->lane_list, dp0->num_lanes); + if (ret < 0) + return ret; + } + return 0; } @@ -171,9 +222,10 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, { struct fwnode_handle *node; u32 bit, i = 0; - int nval; unsigned long addr; char name[40]; + int nval; + int ret; addr = ports; /* valid ports are 1 to 14 so apply mask */ @@ -208,9 +260,11 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, return -ENOMEM; } - fwnode_property_read_u32_array(node, + ret = fwnode_property_read_u32_array(node, "mipi-sdw-port-wordlength-configs", dpn[i].words, dpn[i].num_words); + if (ret < 0) + return ret; } fwnode_property_read_u32(node, "mipi-sdw-data-port-type", @@ -220,7 +274,7 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, "mipi-sdw-max-grouping-supported", &dpn[i].max_grouping); - dpn[i].simple_ch_prep_sm = fwnode_property_read_bool(node, + dpn[i].simple_ch_prep_sm = mipi_fwnode_property_read_bool(node, "mipi-sdw-simplified-channelprepare-sm"); fwnode_property_read_u32(node, @@ -249,9 +303,11 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, return -ENOMEM; } - fwnode_property_read_u32_array(node, + ret = fwnode_property_read_u32_array(node, "mipi-sdw-channel-number-list", dpn[i].channels, dpn[i].num_channels); + if (ret < 0) + return ret; } nval = fwnode_property_count_u32(node, "mipi-sdw-channel-combination-list"); @@ -266,10 +322,12 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, return -ENOMEM; } - fwnode_property_read_u32_array(node, + ret = fwnode_property_read_u32_array(node, "mipi-sdw-channel-combination-list", dpn[i].ch_combinations, dpn[i].num_ch_combinations); + if (ret < 0) + return ret; } fwnode_property_read_u32(node, @@ -278,13 +336,27 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave, fwnode_property_read_u32(node, "mipi-sdw-max-async-buffer", &dpn[i].max_async_buffer); - dpn[i].block_pack_mode = fwnode_property_read_bool(node, + dpn[i].block_pack_mode = mipi_fwnode_property_read_bool(node, "mipi-sdw-block-packing-mode"); fwnode_property_read_u32(node, "mipi-sdw-port-encoding-type", &dpn[i].port_encoding); - /* TODO: Read audio mode */ + nval = fwnode_property_count_u32(node, "mipi-sdw-lane-list"); + if (nval > 0) { + dpn[i].num_lanes = nval; + dpn[i].lane_list = devm_kcalloc(&slave->dev, + dpn[i].num_lanes, sizeof(*dpn[i].lane_list), + GFP_KERNEL); + if (!dpn[i].lane_list) + return -ENOMEM; + + ret = fwnode_property_read_u32_array(node, + "mipi-sdw-lane-list", + dpn[i].lane_list, dpn[i].num_lanes); + if (ret < 0) + return ret; + } fwnode_handle_put(node); @@ -304,42 +376,46 @@ int sdw_slave_read_prop(struct sdw_slave *slave) struct device *dev = &slave->dev; struct fwnode_handle *port; int nval; + int ret; device_property_read_u32(dev, "mipi-sdw-sw-interface-revision", &prop->mipi_revision); - prop->wake_capable = device_property_read_bool(dev, + prop->wake_capable = mipi_device_property_read_bool(dev, "mipi-sdw-wake-up-unavailable"); prop->wake_capable = !prop->wake_capable; - prop->test_mode_capable = device_property_read_bool(dev, + prop->test_mode_capable = mipi_device_property_read_bool(dev, "mipi-sdw-test-mode-supported"); prop->clk_stop_mode1 = false; - if (device_property_read_bool(dev, + if (mipi_device_property_read_bool(dev, "mipi-sdw-clock-stop-mode1-supported")) prop->clk_stop_mode1 = true; - prop->simple_clk_stop_capable = device_property_read_bool(dev, + prop->simple_clk_stop_capable = mipi_device_property_read_bool(dev, "mipi-sdw-simplified-clockstopprepare-sm-supported"); device_property_read_u32(dev, "mipi-sdw-clockstopprepare-timeout", &prop->clk_stop_timeout); - device_property_read_u32(dev, "mipi-sdw-slave-channelprepare-timeout", - &prop->ch_prep_timeout); + ret = device_property_read_u32(dev, "mipi-sdw-peripheral-channelprepare-timeout", + &prop->ch_prep_timeout); + if (ret < 0) + device_property_read_u32(dev, "mipi-sdw-slave-channelprepare-timeout", + &prop->ch_prep_timeout); device_property_read_u32(dev, "mipi-sdw-clockstopprepare-hard-reset-behavior", &prop->reset_behave); - prop->high_PHY_capable = device_property_read_bool(dev, + prop->high_PHY_capable = mipi_device_property_read_bool(dev, "mipi-sdw-highPHY-capable"); - prop->paging_support = device_property_read_bool(dev, + prop->paging_support = mipi_device_property_read_bool(dev, "mipi-sdw-paging-support"); - prop->bank_delay_support = device_property_read_bool(dev, + prop->bank_delay_support = mipi_device_property_read_bool(dev, "mipi-sdw-bank-delay-support"); device_property_read_u32(dev, @@ -354,7 +430,17 @@ int sdw_slave_read_prop(struct sdw_slave *slave) device_property_read_u32(dev, "mipi-sdw-sink-port-list", &prop->sink_ports); - /* Read dp0 properties */ + device_property_read_u32(dev, "mipi-sdw-sdca-interrupt-register-list", + &prop->sdca_interrupt_register_list); + + prop->commit_register_supported = mipi_device_property_read_bool(dev, + "mipi-sdw-commit-register-supported"); + + /* + * Read dp0 properties - we don't rely on the 'mipi-sdw-dp-0-supported' + * property since the 'mipi-sdw-dp0-subproperties' property is logically + * equivalent. + */ port = device_get_named_child_node(dev, "mipi-sdw-dp-0-subproperties"); if (!port) { dev_dbg(dev, "DP0 node not found!!\n"); diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index aed57002fd0e..2b403b14066c 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -1173,7 +1173,7 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl, else sconfig.direction = SDW_DATA_DIR_RX; - /* hw parameters wil be ignored as we only support PDM */ + /* hw parameters will be ignored as we only support PDM */ sconfig.ch_count = 1; sconfig.frame_rate = params_rate(params); sconfig.type = stream->type; diff --git a/drivers/soundwire/sysfs_slave.c b/drivers/soundwire/sysfs_slave.c index f4259710dd0f..c5c22d1708ec 100644 --- a/drivers/soundwire/sysfs_slave.c +++ b/drivers/soundwire/sysfs_slave.c @@ -215,7 +215,7 @@ const struct attribute_group *sdw_attr_groups[] = { /* * the status is shown in capital letters for UNATTACHED and RESERVED - * on purpose, to highligh users to the fact that these status values + * on purpose, to highlight users to the fact that these status values * are not expected. */ static const char *const slave_status[] = { diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 91108ddfaef2..316bce577081 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -183,7 +183,7 @@ static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz) case QSPI_MR: return "MR"; case QSPI_RD: - return "MR"; + return "RD"; case QSPI_TD: return "TD"; case QSPI_SR: diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 0b6b0151b3a3..eeb7d082c247 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1685,7 +1685,7 @@ static unsigned int spi_imx_transfer_estimate_time_us(struct spi_transfer *trans words = DIV_ROUND_UP(transfer->len * BITS_PER_BYTE, transfer->bits_per_word); word_delay_us = DIV_ROUND_CLOSEST(spi_delay_to_ns(&transfer->word_delay, transfer), NSEC_PER_USEC); - result += words * word_delay_us; + result += (u64)words * word_delay_us; } return min(result, U32_MAX); diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c index 316e3db88492..69d0f2175568 100644 --- a/drivers/spi/spi-rockchip-sfc.c +++ b/drivers/spi/spi-rockchip-sfc.c @@ -503,7 +503,7 @@ static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op rockchip_sfc_adjust_op_work((struct spi_mem_op *)op); rockchip_sfc_xfer_setup(sfc, mem, op, len); if (len) { - if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD) { + if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD && !(len & 0x3)) { init_completion(&sfc->cp); rockchip_sfc_irq_unmask(sfc, SFC_IMR_DMA); ret = rockchip_sfc_xfer_data_dma(sfc, op, len); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 460a49d9a0de..ff1add2ecb91 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -424,6 +424,16 @@ static int spi_probe(struct device *dev) spi->irq = 0; } + if (has_acpi_companion(dev) && spi->irq < 0) { + struct acpi_device *adev = to_acpi_device_node(dev->fwnode); + + spi->irq = acpi_dev_gpio_irq_get(adev, 0); + if (spi->irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (spi->irq < 0) + spi->irq = 0; + } + ret = dev_pm_domain_attach(dev, true); if (ret) return ret; @@ -2866,9 +2876,6 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, sizeof(spi->modalias)); - if (spi->irq < 0) - spi->irq = acpi_dev_gpio_irq_get(adev, 0); - acpi_device_set_enumerated(adev); adev->power.flags.ignore_parent = true; diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c index ac6fa6b8f99f..3b644de3292e 100644 --- a/drivers/thermal/gov_power_allocator.c +++ b/drivers/thermal/gov_power_allocator.c @@ -588,10 +588,15 @@ static void allow_maximum_power(struct thermal_zone_device *tz) static int check_power_actors(struct thermal_zone_device *tz, struct power_allocator_params *params) { - const struct thermal_trip_desc *td = trip_to_trip_desc(params->trip_max); + const struct thermal_trip_desc *td; struct thermal_instance *instance; int ret = 0; + if (!params->trip_max) + return 0; + + td = trip_to_trip_desc(params->trip_max); + list_for_each_entry(instance, &td->thermal_instances, trip_node) { if (!cdev_is_power_actor(instance->cdev)) { dev_warn(&tz->device, "power_allocator: %s is not a power actor\n", diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index b2fc02c3a767..8660ef2175be 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -75,11 +75,6 @@ struct odvp_attr { static BIN_ATTR_SIMPLE_RO(data_vault); -static struct bin_attribute *data_attributes[] = { - &bin_attr_data_vault, - NULL, -}; - static ssize_t imok_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -108,10 +103,6 @@ static const struct attribute_group imok_attribute_group = { .attrs = imok_attr, }; -static const struct attribute_group data_attribute_group = { - .bin_attrs = data_attributes, -}; - static ssize_t available_uuids_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -137,7 +128,7 @@ static ssize_t current_uuid_show(struct device *dev, struct int3400_thermal_priv *priv = dev_get_drvdata(dev); int i, length = 0; - if (priv->current_uuid_index > 0) + if (priv->current_uuid_index >= 0) return sprintf(buf, "%s\n", int3400_thermal_uuids[priv->current_uuid_index]); @@ -624,8 +615,7 @@ static int int3400_thermal_probe(struct platform_device *pdev) } if (!ZERO_OR_NULL_PTR(priv->data_vault)) { - result = sysfs_create_group(&pdev->dev.kobj, - &data_attribute_group); + result = device_create_bin_file(&pdev->dev, &bin_attr_data_vault); if (result) goto free_uuid; } @@ -648,7 +638,7 @@ free_notify: free_sysfs: cleanup_odvp(priv); if (!ZERO_OR_NULL_PTR(priv->data_vault)) { - sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group); + device_remove_bin_file(&pdev->dev, &bin_attr_data_vault); kfree(priv->data_vault); } free_uuid: @@ -683,7 +673,7 @@ static void int3400_thermal_remove(struct platform_device *pdev) acpi_thermal_rel_misc_device_remove(priv->adev->handle); if (!ZERO_OR_NULL_PTR(priv->data_vault)) - sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group); + device_remove_bin_file(&pdev->dev, &bin_attr_data_vault); sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group); sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group); thermal_zone_device_unregister(priv->thermal); diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c index 5e94a45eba3e..d7f2e6ca92c2 100644 --- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c +++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c @@ -938,7 +938,6 @@ static const struct adc_tm5_data adc_tm5_gen2_data_pmic = { static int adc_tm5_get_dt_data(struct adc_tm5_chip *adc_tm, struct device_node *node) { struct adc_tm5_channel *channels; - struct device_node *child; u32 value; int ret; struct device *dev = adc_tm->dev; @@ -982,12 +981,10 @@ static int adc_tm5_get_dt_data(struct adc_tm5_chip *adc_tm, struct device_node * adc_tm->avg_samples = VADC_DEF_AVG_SAMPLES; } - for_each_available_child_of_node(node, child) { + for_each_available_child_of_node_scoped(node, child) { ret = adc_tm5_get_dt_channel_data(adc_tm, channels, child); - if (ret) { - of_node_put(child); + if (ret) return ret; - } channels++; } diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c index 3203d8bd13a8..22674790629a 100644 --- a/drivers/thermal/sun8i_thermal.c +++ b/drivers/thermal/sun8i_thermal.c @@ -9,6 +9,7 @@ */ #include <linux/bitmap.h> +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/interrupt.h> @@ -348,19 +349,18 @@ static void sun8i_ths_reset_control_assert(void *data) static struct regmap *sun8i_ths_get_sram_regmap(struct device_node *node) { - struct device_node *sram_node; struct platform_device *sram_pdev; struct regmap *regmap = NULL; - sram_node = of_parse_phandle(node, "allwinner,sram", 0); + struct device_node *sram_node __free(device_node) = + of_parse_phandle(node, "allwinner,sram", 0); if (!sram_node) return ERR_PTR(-ENODEV); sram_pdev = of_find_device_by_node(sram_node); if (!sram_pdev) { /* platform device might not be probed yet */ - regmap = ERR_PTR(-EPROBE_DEFER); - goto out_put_node; + return ERR_PTR(-EPROBE_DEFER); } /* If no regmap is found then the other device driver is at fault */ @@ -369,8 +369,7 @@ static struct regmap *sun8i_ths_get_sram_regmap(struct device_node *node) regmap = ERR_PTR(-EINVAL); platform_device_put(sram_pdev); -out_put_node: - of_node_put(sram_node); + return regmap; } diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c index 6f1501e3fcc4..2c5ddf0db40c 100644 --- a/drivers/thermal/tegra/soctherm.c +++ b/drivers/thermal/tegra/soctherm.c @@ -1651,7 +1651,7 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct tegra_soctherm *ts = dev_get_drvdata(dev); - struct device_node *np_stc, *np_stcc; + struct device_node *np_stc; const char *name; int i; @@ -1668,7 +1668,7 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev) return; } - for_each_child_of_node(np_stc, np_stcc) { + for_each_child_of_node_scoped(np_stc, np_stcc) { struct soctherm_throt_cfg *stc; struct thermal_cooling_device *tcd; int err; @@ -1683,7 +1683,6 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev) if (stc->init) { dev_err(dev, "throttle-cfg: %s: redefined!\n", name); - of_node_put(np_stcc); break; } diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index 07e09897165f..fab11b98ca49 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -95,13 +95,11 @@ static int thermal_of_populate_trip(struct device_node *np, static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *ntrips) { - struct thermal_trip *tt; - struct device_node *trips; int ret, count; *ntrips = 0; - trips = of_get_child_by_name(np, "trips"); + struct device_node *trips __free(device_node) = of_get_child_by_name(np, "trips"); if (!trips) return NULL; @@ -109,39 +107,27 @@ static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *n if (!count) return NULL; - tt = kzalloc(sizeof(*tt) * count, GFP_KERNEL); - if (!tt) { - ret = -ENOMEM; - goto out_of_node_put; - } - - *ntrips = count; + struct thermal_trip *tt __free(kfree) = kzalloc(sizeof(*tt) * count, GFP_KERNEL); + if (!tt) + return ERR_PTR(-ENOMEM); count = 0; for_each_child_of_node_scoped(trips, trip) { ret = thermal_of_populate_trip(trip, &tt[count++]); if (ret) - goto out_kfree; + return ERR_PTR(ret); } - of_node_put(trips); - - return tt; - -out_kfree: - kfree(tt); -out_of_node_put: - of_node_put(trips); + *ntrips = count; - return ERR_PTR(ret); + return no_free_ptr(tt); } static struct device_node *of_thermal_zone_find(struct device_node *sensor, int id) { - struct device_node *np, *tz; struct of_phandle_args sensor_specs; - np = of_find_node_by_name(NULL, "thermal-zones"); + struct device_node *np __free(device_node) = of_find_node_by_name(NULL, "thermal-zones"); if (!np) { pr_debug("No thermal zones description\n"); return ERR_PTR(-ENODEV); @@ -159,8 +145,7 @@ static struct device_node *of_thermal_zone_find(struct device_node *sensor, int "#thermal-sensor-cells"); if (count <= 0) { pr_err("%pOFn: missing thermal sensor\n", child); - tz = ERR_PTR(-EINVAL); - goto out; + return ERR_PTR(-EINVAL); } for (i = 0; i < count; i++) { @@ -172,22 +157,18 @@ static struct device_node *of_thermal_zone_find(struct device_node *sensor, int i, &sensor_specs); if (ret < 0) { pr_err("%pOFn: Failed to read thermal-sensors cells: %d\n", child, ret); - tz = ERR_PTR(ret); - goto out; + return ERR_PTR(ret); } if ((sensor == sensor_specs.np) && id == (sensor_specs.args_count ? sensor_specs.args[0] : 0)) { pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, child); - tz = no_free_ptr(child); - goto out; + return no_free_ptr(child); } } } - tz = ERR_PTR(-ENODEV); -out: - of_node_put(np); - return tz; + + return ERR_PTR(-ENODEV); } static int thermal_of_monitor_init(struct device_node *np, int *delay, int *pdelay) @@ -297,7 +278,7 @@ static bool thermal_of_should_bind(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev, struct cooling_spec *c) { - struct device_node *tz_np, *cm_np, *child; + struct device_node *tz_np, *cm_np; bool result = false; tz_np = thermal_of_zone_get_by_name(tz); @@ -311,7 +292,7 @@ static bool thermal_of_should_bind(struct thermal_zone_device *tz, goto out; /* Look up the trip and the cdev in the cooling maps. */ - for_each_child_of_node(cm_np, child) { + for_each_child_of_node_scoped(cm_np, child) { struct device_node *tr_np; int count, i; @@ -330,7 +311,6 @@ static bool thermal_of_should_bind(struct thermal_zone_device *tz, break; } - of_node_put(child); break; } diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 7d0c83b5b071..8455f08f5d40 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -368,7 +368,6 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr unsigned long lgcd = 0; int log_entity_size; unsigned long size; - u64 start = 0; int err; struct page *pg; unsigned int nsg; @@ -379,10 +378,9 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr struct device *dma = mvdev->vdev.dma_dev; for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); - map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) { + map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) { size = maplen(map, mr); lgcd = gcd(lgcd, size); - start += size; } log_entity_size = ilog2(lgcd); diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 0d632ba5d2a3..451c639299eb 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -486,31 +486,11 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, return 0; } -static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, - struct hisi_acc_vf_migration_file *migf) +static int vf_qm_read_data(struct hisi_qm *vf_qm, struct acc_vf_data *vf_data) { - struct acc_vf_data *vf_data = &migf->vf_data; - struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; struct device *dev = &vf_qm->pdev->dev; int ret; - if (unlikely(qm_wait_dev_not_ready(vf_qm))) { - /* Update state and return with match data */ - vf_data->vf_qm_state = QM_NOT_READY; - hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; - migf->total_length = QM_MATCH_SIZE; - return 0; - } - - vf_data->vf_qm_state = QM_READY; - hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; - - ret = vf_qm_cache_wb(vf_qm); - if (ret) { - dev_err(dev, "failed to writeback QM Cache!\n"); - return ret; - } - ret = qm_get_regs(vf_qm, vf_data); if (ret) return -EINVAL; @@ -536,6 +516,38 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, return -EINVAL; } + return 0; +} + +static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, + struct hisi_acc_vf_migration_file *migf) +{ + struct acc_vf_data *vf_data = &migf->vf_data; + struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + struct device *dev = &vf_qm->pdev->dev; + int ret; + + if (unlikely(qm_wait_dev_not_ready(vf_qm))) { + /* Update state and return with match data */ + vf_data->vf_qm_state = QM_NOT_READY; + hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; + migf->total_length = QM_MATCH_SIZE; + return 0; + } + + vf_data->vf_qm_state = QM_READY; + hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; + + ret = vf_qm_cache_wb(vf_qm); + if (ret) { + dev_err(dev, "failed to writeback QM Cache!\n"); + return ret; + } + + ret = vf_qm_read_data(vf_qm, vf_data); + if (ret) + return -EINVAL; + migf->total_length = sizeof(struct acc_vf_data); return 0; } @@ -615,21 +627,43 @@ static void hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file *migf) mutex_unlock(&migf->lock); } +static void +hisi_acc_debug_migf_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev, + struct hisi_acc_vf_migration_file *src_migf) +{ + struct hisi_acc_vf_migration_file *dst_migf = hisi_acc_vdev->debug_migf; + + if (!dst_migf) + return; + + dst_migf->total_length = src_migf->total_length; + memcpy(&dst_migf->vf_data, &src_migf->vf_data, + sizeof(struct acc_vf_data)); +} + static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vdev) { if (hisi_acc_vdev->resuming_migf) { + hisi_acc_debug_migf_copy(hisi_acc_vdev, hisi_acc_vdev->resuming_migf); hisi_acc_vf_disable_fd(hisi_acc_vdev->resuming_migf); fput(hisi_acc_vdev->resuming_migf->filp); hisi_acc_vdev->resuming_migf = NULL; } if (hisi_acc_vdev->saving_migf) { + hisi_acc_debug_migf_copy(hisi_acc_vdev, hisi_acc_vdev->saving_migf); hisi_acc_vf_disable_fd(hisi_acc_vdev->saving_migf); fput(hisi_acc_vdev->saving_migf->filp); hisi_acc_vdev->saving_migf = NULL; } } +static struct hisi_acc_vf_core_device *hisi_acc_get_vf_dev(struct vfio_device *vdev) +{ + return container_of(vdev, struct hisi_acc_vf_core_device, + core_device.vdev); +} + static void hisi_acc_vf_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev) { hisi_acc_vdev->vf_qm_state = QM_NOT_READY; @@ -1031,8 +1065,7 @@ static struct file * hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev, enum vfio_device_mig_state new_state) { - struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev, - struct hisi_acc_vf_core_device, core_device.vdev); + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); enum vfio_device_mig_state next_state; struct file *res = NULL; int ret; @@ -1073,8 +1106,7 @@ static int hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev, enum vfio_device_mig_state *curr_state) { - struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev, - struct hisi_acc_vf_core_device, core_device.vdev); + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); mutex_lock(&hisi_acc_vdev->state_mutex); *curr_state = hisi_acc_vdev->mig_state; @@ -1276,10 +1308,132 @@ static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int return vfio_pci_core_ioctl(core_vdev, cmd, arg); } +static int hisi_acc_vf_debug_check(struct seq_file *seq, struct vfio_device *vdev) +{ + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); + struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + int ret; + + lockdep_assert_held(&hisi_acc_vdev->open_mutex); + /* + * When the device is not opened, the io_base is not mapped. + * The driver cannot perform device read and write operations. + */ + if (!hisi_acc_vdev->dev_opened) { + seq_puts(seq, "device not opened!\n"); + return -EINVAL; + } + + ret = qm_wait_dev_not_ready(vf_qm); + if (ret) { + seq_puts(seq, "VF device not ready!\n"); + return -EBUSY; + } + + return 0; +} + +static int hisi_acc_vf_debug_cmd(struct seq_file *seq, void *data) +{ + struct device *vf_dev = seq->private; + struct vfio_pci_core_device *core_device = dev_get_drvdata(vf_dev); + struct vfio_device *vdev = &core_device->vdev; + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); + struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + u64 value; + int ret; + + mutex_lock(&hisi_acc_vdev->open_mutex); + ret = hisi_acc_vf_debug_check(seq, vdev); + if (ret) { + mutex_unlock(&hisi_acc_vdev->open_mutex); + return ret; + } + + value = readl(vf_qm->io_base + QM_MB_CMD_SEND_BASE); + if (value == QM_MB_CMD_NOT_READY) { + mutex_unlock(&hisi_acc_vdev->open_mutex); + seq_puts(seq, "mailbox cmd channel not ready!\n"); + return -EINVAL; + } + mutex_unlock(&hisi_acc_vdev->open_mutex); + seq_puts(seq, "mailbox cmd channel ready!\n"); + + return 0; +} + +static int hisi_acc_vf_dev_read(struct seq_file *seq, void *data) +{ + struct device *vf_dev = seq->private; + struct vfio_pci_core_device *core_device = dev_get_drvdata(vf_dev); + struct vfio_device *vdev = &core_device->vdev; + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); + size_t vf_data_sz = offsetofend(struct acc_vf_data, padding); + struct acc_vf_data *vf_data; + int ret; + + mutex_lock(&hisi_acc_vdev->open_mutex); + ret = hisi_acc_vf_debug_check(seq, vdev); + if (ret) { + mutex_unlock(&hisi_acc_vdev->open_mutex); + return ret; + } + + mutex_lock(&hisi_acc_vdev->state_mutex); + vf_data = kzalloc(sizeof(*vf_data), GFP_KERNEL); + if (!vf_data) { + ret = -ENOMEM; + goto mutex_release; + } + + vf_data->vf_qm_state = hisi_acc_vdev->vf_qm_state; + ret = vf_qm_read_data(&hisi_acc_vdev->vf_qm, vf_data); + if (ret) + goto migf_err; + + seq_hex_dump(seq, "Dev Data:", DUMP_PREFIX_OFFSET, 16, 1, + (const void *)vf_data, vf_data_sz, false); + + seq_printf(seq, + "guest driver load: %u\n" + "data size: %lu\n", + hisi_acc_vdev->vf_qm_state, + sizeof(struct acc_vf_data)); + +migf_err: + kfree(vf_data); +mutex_release: + mutex_unlock(&hisi_acc_vdev->state_mutex); + mutex_unlock(&hisi_acc_vdev->open_mutex); + + return ret; +} + +static int hisi_acc_vf_migf_read(struct seq_file *seq, void *data) +{ + struct device *vf_dev = seq->private; + struct vfio_pci_core_device *core_device = dev_get_drvdata(vf_dev); + struct vfio_device *vdev = &core_device->vdev; + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); + size_t vf_data_sz = offsetofend(struct acc_vf_data, padding); + struct hisi_acc_vf_migration_file *debug_migf = hisi_acc_vdev->debug_migf; + + /* Check whether the live migration operation has been performed */ + if (debug_migf->total_length < QM_MATCH_SIZE) { + seq_puts(seq, "device not migrated!\n"); + return -EAGAIN; + } + + seq_hex_dump(seq, "Mig Data:", DUMP_PREFIX_OFFSET, 16, 1, + (const void *)&debug_migf->vf_data, vf_data_sz, false); + seq_printf(seq, "migrate data length: %lu\n", debug_migf->total_length); + + return 0; +} + static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev) { - struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev, - struct hisi_acc_vf_core_device, core_device.vdev); + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev); struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device; int ret; @@ -1288,12 +1442,16 @@ static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev) return ret; if (core_vdev->mig_ops) { + mutex_lock(&hisi_acc_vdev->open_mutex); ret = hisi_acc_vf_qm_init(hisi_acc_vdev); if (ret) { + mutex_unlock(&hisi_acc_vdev->open_mutex); vfio_pci_core_disable(vdev); return ret; } hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING; + hisi_acc_vdev->dev_opened = true; + mutex_unlock(&hisi_acc_vdev->open_mutex); } vfio_pci_core_finish_enable(vdev); @@ -1302,11 +1460,13 @@ static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev) static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev) { - struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev, - struct hisi_acc_vf_core_device, core_device.vdev); + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev); struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + mutex_lock(&hisi_acc_vdev->open_mutex); + hisi_acc_vdev->dev_opened = false; iounmap(vf_qm->io_base); + mutex_unlock(&hisi_acc_vdev->open_mutex); vfio_pci_core_close_device(core_vdev); } @@ -1318,8 +1478,7 @@ static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = { static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev) { - struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev, - struct hisi_acc_vf_core_device, core_device.vdev); + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev); struct pci_dev *pdev = to_pci_dev(core_vdev->dev); struct hisi_qm *pf_qm = hisi_acc_get_pf_qm(pdev); @@ -1327,6 +1486,7 @@ static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev) hisi_acc_vdev->pf_qm = pf_qm; hisi_acc_vdev->vf_dev = pdev; mutex_init(&hisi_acc_vdev->state_mutex); + mutex_init(&hisi_acc_vdev->open_mutex); core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY; core_vdev->mig_ops = &hisi_acc_vfio_pci_migrn_state_ops; @@ -1372,6 +1532,47 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_ops = { .detach_ioas = vfio_iommufd_physical_detach_ioas, }; +static void hisi_acc_vfio_debug_init(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + struct vfio_device *vdev = &hisi_acc_vdev->core_device.vdev; + struct hisi_acc_vf_migration_file *migf; + struct dentry *vfio_dev_migration; + struct dentry *vfio_hisi_acc; + struct device *dev = vdev->dev; + + if (!debugfs_initialized() || + !IS_ENABLED(CONFIG_VFIO_DEBUGFS)) + return; + + if (vdev->ops != &hisi_acc_vfio_pci_migrn_ops) + return; + + vfio_dev_migration = debugfs_lookup("migration", vdev->debug_root); + if (!vfio_dev_migration) { + dev_err(dev, "failed to lookup migration debugfs file!\n"); + return; + } + + migf = kzalloc(sizeof(*migf), GFP_KERNEL); + if (!migf) + return; + hisi_acc_vdev->debug_migf = migf; + + vfio_hisi_acc = debugfs_create_dir("hisi_acc", vfio_dev_migration); + debugfs_create_devm_seqfile(dev, "dev_data", vfio_hisi_acc, + hisi_acc_vf_dev_read); + debugfs_create_devm_seqfile(dev, "migf_data", vfio_hisi_acc, + hisi_acc_vf_migf_read); + debugfs_create_devm_seqfile(dev, "cmd_state", vfio_hisi_acc, + hisi_acc_vf_debug_cmd); +} + +static void hisi_acc_vf_debugfs_exit(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + kfree(hisi_acc_vdev->debug_migf); + hisi_acc_vdev->debug_migf = NULL; +} + static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_acc_vf_core_device *hisi_acc_vdev; @@ -1398,6 +1599,8 @@ static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device); if (ret) goto out_put_vdev; + + hisi_acc_vfio_debug_init(hisi_acc_vdev); return 0; out_put_vdev: @@ -1410,6 +1613,7 @@ static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev) struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev); vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device); + hisi_acc_vf_debugfs_exit(hisi_acc_vdev); vfio_put_device(&hisi_acc_vdev->core_device.vdev); } diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h index 5bab46602fad..245d7537b2bc 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h @@ -32,6 +32,7 @@ #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0) #define QM_SQC_VFT_NUM_SHIFT_V2 45 #define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0) +#define QM_MB_CMD_NOT_READY 0xffffffff /* RW regs */ #define QM_REGS_MAX_LEN 7 @@ -99,6 +100,13 @@ struct hisi_acc_vf_migration_file { struct hisi_acc_vf_core_device { struct vfio_pci_core_device core_device; u8 match_done; + /* + * io_base is only valid when dev_opened is true, + * which is protected by open_mutex. + */ + bool dev_opened; + /* Ensure the accuracy of dev_opened operation */ + struct mutex open_mutex; /* For migration state */ struct mutex state_mutex; @@ -107,9 +115,20 @@ struct hisi_acc_vf_core_device { struct pci_dev *vf_dev; struct hisi_qm *pf_qm; struct hisi_qm vf_qm; + /* + * vf_qm_state represents the QM_VF_STATE register value. + * It is set by Guest driver for the ACC VF dev indicating + * the driver has loaded and configured the dev correctly. + */ u32 vf_qm_state; int vf_id; struct hisi_acc_vf_migration_file *resuming_migf; struct hisi_acc_vf_migration_file *saving_migf; + + /* + * It holds migration data corresponding to the last migration + * and is used by the debugfs interface to report it. + */ + struct hisi_acc_vf_migration_file *debug_migf; }; #endif /* HISI_ACC_VFIO_PCI_H */ diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c index 41a4b0cf4297..7527e277c898 100644 --- a/drivers/vfio/pci/mlx5/cmd.c +++ b/drivers/vfio/pci/mlx5/cmd.c @@ -423,6 +423,7 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf, unsigned long filled; unsigned int to_fill; int ret; + int i; to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list)); page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL_ACCOUNT); @@ -443,7 +444,7 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf, GFP_KERNEL_ACCOUNT); if (ret) - goto err; + goto err_append; buf->allocated_length += filled * PAGE_SIZE; /* clean input for another bulk allocation */ memset(page_list, 0, filled * sizeof(*page_list)); @@ -454,6 +455,9 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf, kvfree(page_list); return 0; +err_append: + for (i = filled - 1; i >= 0; i--) + __free_page(page_list[i]); err: kvfree(page_list); return ret; diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c index 242c23eef452..8833e60d42f5 100644 --- a/drivers/vfio/pci/mlx5/main.c +++ b/drivers/vfio/pci/mlx5/main.c @@ -640,14 +640,11 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev, bool track) O_RDONLY); if (IS_ERR(migf->filp)) { ret = PTR_ERR(migf->filp); - goto end; + kfree(migf); + return ERR_PTR(ret); } migf->mvdev = mvdev; - ret = mlx5vf_cmd_alloc_pd(migf); - if (ret) - goto out_free; - stream_open(migf->filp->f_inode, migf->filp); mutex_init(&migf->lock); init_waitqueue_head(&migf->poll_wait); @@ -663,6 +660,11 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev, bool track) INIT_LIST_HEAD(&migf->buf_list); INIT_LIST_HEAD(&migf->avail_list); spin_lock_init(&migf->list_lock); + + ret = mlx5vf_cmd_alloc_pd(migf); + if (ret) + goto out; + ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &length, &full_size, 0); if (ret) goto out_pd; @@ -692,10 +694,8 @@ out_save: mlx5vf_free_data_buffer(buf); out_pd: mlx5fv_cmd_clean_migf_resources(migf); -out_free: +out: fput(migf->filp); -end: - kfree(migf); return ERR_PTR(ret); } @@ -1016,13 +1016,19 @@ mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev) O_WRONLY); if (IS_ERR(migf->filp)) { ret = PTR_ERR(migf->filp); - goto end; + kfree(migf); + return ERR_PTR(ret); } + stream_open(migf->filp->f_inode, migf->filp); + mutex_init(&migf->lock); + INIT_LIST_HEAD(&migf->buf_list); + INIT_LIST_HEAD(&migf->avail_list); + spin_lock_init(&migf->list_lock); migf->mvdev = mvdev; ret = mlx5vf_cmd_alloc_pd(migf); if (ret) - goto out_free; + goto out; buf = mlx5vf_alloc_data_buffer(migf, 0, DMA_TO_DEVICE); if (IS_ERR(buf)) { @@ -1041,20 +1047,13 @@ mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev) migf->buf_header[0] = buf; migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER; - stream_open(migf->filp->f_inode, migf->filp); - mutex_init(&migf->lock); - INIT_LIST_HEAD(&migf->buf_list); - INIT_LIST_HEAD(&migf->avail_list); - spin_lock_init(&migf->list_lock); return migf; out_buf: mlx5vf_free_data_buffer(migf->buf[0]); out_pd: mlx5vf_cmd_dealloc_pd(migf); -out_free: +out: fput(migf->filp); -end: - kfree(migf); return ERR_PTR(ret); } diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c index a7fd018aa548..a467085038f0 100644 --- a/drivers/vfio/pci/nvgrace-gpu/main.c +++ b/drivers/vfio/pci/nvgrace-gpu/main.c @@ -866,6 +866,8 @@ static const struct pci_device_id nvgrace_gpu_vfio_pci_table[] = { { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2342) }, /* GH200 480GB */ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2345) }, + /* GH200 SKU */ + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2348) }, {} }; diff --git a/drivers/vfio/pci/qat/main.c b/drivers/vfio/pci/qat/main.c index be3644ced17b..c78cb6de9390 100644 --- a/drivers/vfio/pci/qat/main.c +++ b/drivers/vfio/pci/qat/main.c @@ -304,7 +304,7 @@ static ssize_t qat_vf_resume_write(struct file *filp, const char __user *buf, offs = &filp->f_pos; if (*offs < 0 || - check_add_overflow((loff_t)len, *offs, &end)) + check_add_overflow(len, *offs, &end)) return -EOVERFLOW; if (end > mig_dev->state_size) diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 97422aafaa7b..ea2745c1ac5e 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -313,6 +313,10 @@ static int vfio_virt_config_read(struct vfio_pci_core_device *vdev, int pos, return count; } +static struct perm_bits direct_ro_perms = { + .readfn = vfio_direct_config_read, +}; + /* Default capability regions to read-only, no-virtualization */ static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = { [0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read } @@ -1897,9 +1901,17 @@ static ssize_t vfio_config_do_rw(struct vfio_pci_core_device *vdev, char __user cap_start = *ppos; } else { if (*ppos >= PCI_CFG_SPACE_SIZE) { - WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX); + /* + * We can get a cap_id that exceeds PCI_EXT_CAP_ID_MAX + * if we're hiding an unknown capability at the start + * of the extended capability list. Use default, ro + * access, which will virtualize the id and next values. + */ + if (cap_id > PCI_EXT_CAP_ID_MAX) + perm = &direct_ro_perms; + else + perm = &ecap_perms[cap_id]; - perm = &ecap_perms[cap_id]; cap_start = vfio_find_cap_start(vdev, *ppos); } else { WARN_ON(cap_id > PCI_CAP_ID_MAX); diff --git a/drivers/vfio/pci/virtio/Kconfig b/drivers/vfio/pci/virtio/Kconfig index bd80eca4a196..2770f7eb702c 100644 --- a/drivers/vfio/pci/virtio/Kconfig +++ b/drivers/vfio/pci/virtio/Kconfig @@ -1,15 +1,31 @@ # SPDX-License-Identifier: GPL-2.0-only config VIRTIO_VFIO_PCI - tristate "VFIO support for VIRTIO NET PCI devices" - depends on VIRTIO_PCI && VIRTIO_PCI_ADMIN_LEGACY - select VFIO_PCI_CORE - help - This provides support for exposing VIRTIO NET VF devices which support - legacy IO access, using the VFIO framework that can work with a legacy - virtio driver in the guest. - Based on PCIe spec, VFs do not support I/O Space. - As of that this driver emulates I/O BAR in software to let a VF be - seen as a transitional device by its users and let it work with - a legacy driver. - - If you don't know what to do here, say N. + tristate "VFIO support for VIRTIO NET PCI VF devices" + depends on VIRTIO_PCI + select VFIO_PCI_CORE + help + This provides migration support for VIRTIO NET PCI VF devices + using the VFIO framework. Migration support requires the + SR-IOV PF device to support specific VIRTIO extensions, + otherwise this driver provides no additional functionality + beyond vfio-pci. + + Migration support in this driver relies on dirty page tracking + provided by the IOMMU hardware and exposed through IOMMUFD, any + other use cases are dis-recommended. + + If you don't know what to do here, say N. + +config VIRTIO_VFIO_PCI_ADMIN_LEGACY + bool "Legacy I/O support for VIRTIO NET PCI VF devices" + depends on VIRTIO_VFIO_PCI && VIRTIO_PCI_ADMIN_LEGACY + default y + help + This extends the virtio-vfio-pci driver to support legacy I/O + access, allowing use of legacy virtio drivers with VIRTIO NET + PCI VF devices. Legacy I/O support requires the SR-IOV PF + device to support and enable specific VIRTIO extensions, + otherwise this driver provides no additional functionality + beyond vfio-pci. + + If you don't know what to do here, say N. diff --git a/drivers/vfio/pci/virtio/Makefile b/drivers/vfio/pci/virtio/Makefile index 7171105baf33..d9b0bb40d6b3 100644 --- a/drivers/vfio/pci/virtio/Makefile +++ b/drivers/vfio/pci/virtio/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_VIRTIO_VFIO_PCI) += virtio-vfio-pci.o -virtio-vfio-pci-y := main.o +virtio-vfio-pci-y := main.o migrate.o +virtio-vfio-pci-$(CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY) += legacy_io.o diff --git a/drivers/vfio/pci/virtio/common.h b/drivers/vfio/pci/virtio/common.h new file mode 100644 index 000000000000..c7d7e27af386 --- /dev/null +++ b/drivers/vfio/pci/virtio/common.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef VIRTIO_VFIO_COMMON_H +#define VIRTIO_VFIO_COMMON_H + +#include <linux/kernel.h> +#include <linux/virtio.h> +#include <linux/vfio_pci_core.h> +#include <linux/virtio_pci.h> + +enum virtiovf_migf_state { + VIRTIOVF_MIGF_STATE_ERROR = 1, + VIRTIOVF_MIGF_STATE_PRECOPY = 2, + VIRTIOVF_MIGF_STATE_COMPLETE = 3, +}; + +enum virtiovf_load_state { + VIRTIOVF_LOAD_STATE_READ_HEADER, + VIRTIOVF_LOAD_STATE_PREP_HEADER_DATA, + VIRTIOVF_LOAD_STATE_READ_HEADER_DATA, + VIRTIOVF_LOAD_STATE_PREP_CHUNK, + VIRTIOVF_LOAD_STATE_READ_CHUNK, + VIRTIOVF_LOAD_STATE_LOAD_CHUNK, +}; + +struct virtiovf_data_buffer { + struct sg_append_table table; + loff_t start_pos; + u64 length; + u64 allocated_length; + struct list_head buf_elm; + u8 include_header_object:1; + struct virtiovf_migration_file *migf; + /* Optimize virtiovf_get_migration_page() for sequential access */ + struct scatterlist *last_offset_sg; + unsigned int sg_last_entry; + unsigned long last_offset; +}; + +enum virtiovf_migf_header_flags { + VIRTIOVF_MIGF_HEADER_FLAGS_TAG_MANDATORY = 0, + VIRTIOVF_MIGF_HEADER_FLAGS_TAG_OPTIONAL = 1 << 0, +}; + +enum virtiovf_migf_header_tag { + VIRTIOVF_MIGF_HEADER_TAG_DEVICE_DATA = 0, +}; + +struct virtiovf_migration_header { + __le64 record_size; + /* For future use in case we may need to change the kernel protocol */ + __le32 flags; /* Use virtiovf_migf_header_flags */ + __le32 tag; /* Use virtiovf_migf_header_tag */ + __u8 data[]; /* Its size is given in the record_size */ +}; + +struct virtiovf_migration_file { + struct file *filp; + /* synchronize access to the file state */ + struct mutex lock; + loff_t max_pos; + u64 pre_copy_initial_bytes; + struct ratelimit_state pre_copy_rl_state; + u64 record_size; + u32 record_tag; + u8 has_obj_id:1; + u32 obj_id; + enum virtiovf_migf_state state; + enum virtiovf_load_state load_state; + /* synchronize access to the lists */ + spinlock_t list_lock; + struct list_head buf_list; + struct list_head avail_list; + struct virtiovf_data_buffer *buf; + struct virtiovf_data_buffer *buf_header; + struct virtiovf_pci_core_device *virtvdev; +}; + +struct virtiovf_pci_core_device { + struct vfio_pci_core_device core_device; +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY + u8 *bar0_virtual_buf; + /* synchronize access to the virtual buf */ + struct mutex bar_mutex; + void __iomem *notify_addr; + u64 notify_offset; + __le32 pci_base_addr_0; + __le16 pci_cmd; + u8 bar0_virtual_buf_size; + u8 notify_bar; +#endif + + /* LM related */ + u8 migrate_cap:1; + u8 deferred_reset:1; + /* protect migration state */ + struct mutex state_mutex; + enum vfio_device_mig_state mig_state; + /* protect the reset_done flow */ + spinlock_t reset_lock; + struct virtiovf_migration_file *resuming_migf; + struct virtiovf_migration_file *saving_migf; +}; + +void virtiovf_set_migratable(struct virtiovf_pci_core_device *virtvdev); +void virtiovf_open_migration(struct virtiovf_pci_core_device *virtvdev); +void virtiovf_close_migration(struct virtiovf_pci_core_device *virtvdev); +void virtiovf_migration_reset_done(struct pci_dev *pdev); + +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY +int virtiovf_open_legacy_io(struct virtiovf_pci_core_device *virtvdev); +long virtiovf_vfio_pci_core_ioctl(struct vfio_device *core_vdev, + unsigned int cmd, unsigned long arg); +int virtiovf_pci_ioctl_get_region_info(struct vfio_device *core_vdev, + unsigned int cmd, unsigned long arg); +ssize_t virtiovf_pci_core_write(struct vfio_device *core_vdev, + const char __user *buf, size_t count, + loff_t *ppos); +ssize_t virtiovf_pci_core_read(struct vfio_device *core_vdev, char __user *buf, + size_t count, loff_t *ppos); +bool virtiovf_support_legacy_io(struct pci_dev *pdev); +int virtiovf_init_legacy_io(struct virtiovf_pci_core_device *virtvdev); +void virtiovf_release_legacy_io(struct virtiovf_pci_core_device *virtvdev); +void virtiovf_legacy_io_reset_done(struct pci_dev *pdev); +#endif + +#endif /* VIRTIO_VFIO_COMMON_H */ diff --git a/drivers/vfio/pci/virtio/legacy_io.c b/drivers/vfio/pci/virtio/legacy_io.c new file mode 100644 index 000000000000..20382ee15fac --- /dev/null +++ b/drivers/vfio/pci/virtio/legacy_io.c @@ -0,0 +1,418 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include <linux/vfio.h> +#include <linux/vfio_pci_core.h> +#include <linux/virtio_pci.h> +#include <linux/virtio_net.h> +#include <linux/virtio_pci_admin.h> + +#include "common.h" + +static int +virtiovf_issue_legacy_rw_cmd(struct virtiovf_pci_core_device *virtvdev, + loff_t pos, char __user *buf, + size_t count, bool read) +{ + bool msix_enabled = + (virtvdev->core_device.irq_type == VFIO_PCI_MSIX_IRQ_INDEX); + struct pci_dev *pdev = virtvdev->core_device.pdev; + u8 *bar0_buf = virtvdev->bar0_virtual_buf; + bool common; + u8 offset; + int ret; + + common = pos < VIRTIO_PCI_CONFIG_OFF(msix_enabled); + /* offset within the relevant configuration area */ + offset = common ? pos : pos - VIRTIO_PCI_CONFIG_OFF(msix_enabled); + mutex_lock(&virtvdev->bar_mutex); + if (read) { + if (common) + ret = virtio_pci_admin_legacy_common_io_read(pdev, offset, + count, bar0_buf + pos); + else + ret = virtio_pci_admin_legacy_device_io_read(pdev, offset, + count, bar0_buf + pos); + if (ret) + goto out; + if (copy_to_user(buf, bar0_buf + pos, count)) + ret = -EFAULT; + } else { + if (copy_from_user(bar0_buf + pos, buf, count)) { + ret = -EFAULT; + goto out; + } + + if (common) + ret = virtio_pci_admin_legacy_common_io_write(pdev, offset, + count, bar0_buf + pos); + else + ret = virtio_pci_admin_legacy_device_io_write(pdev, offset, + count, bar0_buf + pos); + } +out: + mutex_unlock(&virtvdev->bar_mutex); + return ret; +} + +static int +virtiovf_pci_bar0_rw(struct virtiovf_pci_core_device *virtvdev, + loff_t pos, char __user *buf, + size_t count, bool read) +{ + struct vfio_pci_core_device *core_device = &virtvdev->core_device; + struct pci_dev *pdev = core_device->pdev; + u16 queue_notify; + int ret; + + if (!(le16_to_cpu(virtvdev->pci_cmd) & PCI_COMMAND_IO)) + return -EIO; + + if (pos + count > virtvdev->bar0_virtual_buf_size) + return -EINVAL; + + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret) { + pci_info_ratelimited(pdev, "runtime resume failed %d\n", ret); + return -EIO; + } + + switch (pos) { + case VIRTIO_PCI_QUEUE_NOTIFY: + if (count != sizeof(queue_notify)) { + ret = -EINVAL; + goto end; + } + if (read) { + ret = vfio_pci_core_ioread16(core_device, true, &queue_notify, + virtvdev->notify_addr); + if (ret) + goto end; + if (copy_to_user(buf, &queue_notify, + sizeof(queue_notify))) { + ret = -EFAULT; + goto end; + } + } else { + if (copy_from_user(&queue_notify, buf, count)) { + ret = -EFAULT; + goto end; + } + ret = vfio_pci_core_iowrite16(core_device, true, queue_notify, + virtvdev->notify_addr); + } + break; + default: + ret = virtiovf_issue_legacy_rw_cmd(virtvdev, pos, buf, count, + read); + } + +end: + pm_runtime_put(&pdev->dev); + return ret ? ret : count; +} + +static ssize_t virtiovf_pci_read_config(struct vfio_device *core_vdev, + char __user *buf, size_t count, + loff_t *ppos) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + core_vdev, struct virtiovf_pci_core_device, core_device.vdev); + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + size_t register_offset; + loff_t copy_offset; + size_t copy_count; + __le32 val32; + __le16 val16; + u8 val8; + int ret; + + ret = vfio_pci_core_read(core_vdev, buf, count, ppos); + if (ret < 0) + return ret; + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_DEVICE_ID, + sizeof(val16), ©_offset, + ©_count, ®ister_offset)) { + val16 = cpu_to_le16(VIRTIO_TRANS_ID_NET); + if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, copy_count)) + return -EFAULT; + } + + if ((le16_to_cpu(virtvdev->pci_cmd) & PCI_COMMAND_IO) && + vfio_pci_core_range_intersect_range(pos, count, PCI_COMMAND, + sizeof(val16), ©_offset, + ©_count, ®ister_offset)) { + if (copy_from_user((void *)&val16 + register_offset, buf + copy_offset, + copy_count)) + return -EFAULT; + val16 |= cpu_to_le16(PCI_COMMAND_IO); + if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, + copy_count)) + return -EFAULT; + } + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_REVISION_ID, + sizeof(val8), ©_offset, + ©_count, ®ister_offset)) { + /* Transional needs to have revision 0 */ + val8 = 0; + if (copy_to_user(buf + copy_offset, &val8, copy_count)) + return -EFAULT; + } + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_0, + sizeof(val32), ©_offset, + ©_count, ®ister_offset)) { + u32 bar_mask = ~(virtvdev->bar0_virtual_buf_size - 1); + u32 pci_base_addr_0 = le32_to_cpu(virtvdev->pci_base_addr_0); + + val32 = cpu_to_le32((pci_base_addr_0 & bar_mask) | PCI_BASE_ADDRESS_SPACE_IO); + if (copy_to_user(buf + copy_offset, (void *)&val32 + register_offset, copy_count)) + return -EFAULT; + } + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_SUBSYSTEM_ID, + sizeof(val16), ©_offset, + ©_count, ®ister_offset)) { + /* + * Transitional devices use the PCI subsystem device id as + * virtio device id, same as legacy driver always did. + */ + val16 = cpu_to_le16(VIRTIO_ID_NET); + if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, + copy_count)) + return -EFAULT; + } + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_SUBSYSTEM_VENDOR_ID, + sizeof(val16), ©_offset, + ©_count, ®ister_offset)) { + val16 = cpu_to_le16(PCI_VENDOR_ID_REDHAT_QUMRANET); + if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, + copy_count)) + return -EFAULT; + } + + return count; +} + +ssize_t virtiovf_pci_core_read(struct vfio_device *core_vdev, char __user *buf, + size_t count, loff_t *ppos) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + core_vdev, struct virtiovf_pci_core_device, core_device.vdev); + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + + if (!count) + return 0; + + if (index == VFIO_PCI_CONFIG_REGION_INDEX) + return virtiovf_pci_read_config(core_vdev, buf, count, ppos); + + if (index == VFIO_PCI_BAR0_REGION_INDEX) + return virtiovf_pci_bar0_rw(virtvdev, pos, buf, count, true); + + return vfio_pci_core_read(core_vdev, buf, count, ppos); +} + +static ssize_t virtiovf_pci_write_config(struct vfio_device *core_vdev, + const char __user *buf, size_t count, + loff_t *ppos) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + core_vdev, struct virtiovf_pci_core_device, core_device.vdev); + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + size_t register_offset; + loff_t copy_offset; + size_t copy_count; + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_COMMAND, + sizeof(virtvdev->pci_cmd), + ©_offset, ©_count, + ®ister_offset)) { + if (copy_from_user((void *)&virtvdev->pci_cmd + register_offset, + buf + copy_offset, + copy_count)) + return -EFAULT; + } + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_0, + sizeof(virtvdev->pci_base_addr_0), + ©_offset, ©_count, + ®ister_offset)) { + if (copy_from_user((void *)&virtvdev->pci_base_addr_0 + register_offset, + buf + copy_offset, + copy_count)) + return -EFAULT; + } + + return vfio_pci_core_write(core_vdev, buf, count, ppos); +} + +ssize_t virtiovf_pci_core_write(struct vfio_device *core_vdev, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + core_vdev, struct virtiovf_pci_core_device, core_device.vdev); + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + + if (!count) + return 0; + + if (index == VFIO_PCI_CONFIG_REGION_INDEX) + return virtiovf_pci_write_config(core_vdev, buf, count, ppos); + + if (index == VFIO_PCI_BAR0_REGION_INDEX) + return virtiovf_pci_bar0_rw(virtvdev, pos, (char __user *)buf, count, false); + + return vfio_pci_core_write(core_vdev, buf, count, ppos); +} + +int virtiovf_pci_ioctl_get_region_info(struct vfio_device *core_vdev, + unsigned int cmd, unsigned long arg) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + core_vdev, struct virtiovf_pci_core_device, core_device.vdev); + unsigned long minsz = offsetofend(struct vfio_region_info, offset); + void __user *uarg = (void __user *)arg; + struct vfio_region_info info = {}; + + if (copy_from_user(&info, uarg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + switch (info.index) { + case VFIO_PCI_BAR0_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = virtvdev->bar0_virtual_buf_size; + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + return copy_to_user(uarg, &info, minsz) ? -EFAULT : 0; + default: + return vfio_pci_core_ioctl(core_vdev, cmd, arg); + } +} + +long virtiovf_vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case VFIO_DEVICE_GET_REGION_INFO: + return virtiovf_pci_ioctl_get_region_info(core_vdev, cmd, arg); + default: + return vfio_pci_core_ioctl(core_vdev, cmd, arg); + } +} + +static int virtiovf_set_notify_addr(struct virtiovf_pci_core_device *virtvdev) +{ + struct vfio_pci_core_device *core_device = &virtvdev->core_device; + int ret; + + /* + * Setup the BAR where the 'notify' exists to be used by vfio as well + * This will let us mmap it only once and use it when needed. + */ + ret = vfio_pci_core_setup_barmap(core_device, + virtvdev->notify_bar); + if (ret) + return ret; + + virtvdev->notify_addr = core_device->barmap[virtvdev->notify_bar] + + virtvdev->notify_offset; + return 0; +} + +int virtiovf_open_legacy_io(struct virtiovf_pci_core_device *virtvdev) +{ + if (!virtvdev->bar0_virtual_buf) + return 0; + + /* + * Upon close_device() the vfio_pci_core_disable() is called + * and will close all the previous mmaps, so it seems that the + * valid life cycle for the 'notify' addr is per open/close. + */ + return virtiovf_set_notify_addr(virtvdev); +} + +static int virtiovf_get_device_config_size(unsigned short device) +{ + /* Network card */ + return offsetofend(struct virtio_net_config, status); +} + +static int virtiovf_read_notify_info(struct virtiovf_pci_core_device *virtvdev) +{ + u64 offset; + int ret; + u8 bar; + + ret = virtio_pci_admin_legacy_io_notify_info(virtvdev->core_device.pdev, + VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM, + &bar, &offset); + if (ret) + return ret; + + virtvdev->notify_bar = bar; + virtvdev->notify_offset = offset; + return 0; +} + +static bool virtiovf_bar0_exists(struct pci_dev *pdev) +{ + struct resource *res = pdev->resource; + + return res->flags; +} + +bool virtiovf_support_legacy_io(struct pci_dev *pdev) +{ + return virtio_pci_admin_has_legacy_io(pdev) && !virtiovf_bar0_exists(pdev); +} + +int virtiovf_init_legacy_io(struct virtiovf_pci_core_device *virtvdev) +{ + struct pci_dev *pdev = virtvdev->core_device.pdev; + int ret; + + ret = virtiovf_read_notify_info(virtvdev); + if (ret) + return ret; + + virtvdev->bar0_virtual_buf_size = VIRTIO_PCI_CONFIG_OFF(true) + + virtiovf_get_device_config_size(pdev->device); + BUILD_BUG_ON(!is_power_of_2(virtvdev->bar0_virtual_buf_size)); + virtvdev->bar0_virtual_buf = kzalloc(virtvdev->bar0_virtual_buf_size, + GFP_KERNEL); + if (!virtvdev->bar0_virtual_buf) + return -ENOMEM; + mutex_init(&virtvdev->bar_mutex); + return 0; +} + +void virtiovf_release_legacy_io(struct virtiovf_pci_core_device *virtvdev) +{ + kfree(virtvdev->bar0_virtual_buf); +} + +void virtiovf_legacy_io_reset_done(struct pci_dev *pdev) +{ + struct virtiovf_pci_core_device *virtvdev = dev_get_drvdata(&pdev->dev); + + virtvdev->pci_cmd = 0; +} diff --git a/drivers/vfio/pci/virtio/main.c b/drivers/vfio/pci/virtio/main.c index b5d3a8c5bbc9..d534d48c4163 100644 --- a/drivers/vfio/pci/virtio/main.c +++ b/drivers/vfio/pci/virtio/main.c @@ -16,347 +16,12 @@ #include <linux/virtio_net.h> #include <linux/virtio_pci_admin.h> -struct virtiovf_pci_core_device { - struct vfio_pci_core_device core_device; - u8 *bar0_virtual_buf; - /* synchronize access to the virtual buf */ - struct mutex bar_mutex; - void __iomem *notify_addr; - u64 notify_offset; - __le32 pci_base_addr_0; - __le16 pci_cmd; - u8 bar0_virtual_buf_size; - u8 notify_bar; -}; - -static int -virtiovf_issue_legacy_rw_cmd(struct virtiovf_pci_core_device *virtvdev, - loff_t pos, char __user *buf, - size_t count, bool read) -{ - bool msix_enabled = - (virtvdev->core_device.irq_type == VFIO_PCI_MSIX_IRQ_INDEX); - struct pci_dev *pdev = virtvdev->core_device.pdev; - u8 *bar0_buf = virtvdev->bar0_virtual_buf; - bool common; - u8 offset; - int ret; - - common = pos < VIRTIO_PCI_CONFIG_OFF(msix_enabled); - /* offset within the relevant configuration area */ - offset = common ? pos : pos - VIRTIO_PCI_CONFIG_OFF(msix_enabled); - mutex_lock(&virtvdev->bar_mutex); - if (read) { - if (common) - ret = virtio_pci_admin_legacy_common_io_read(pdev, offset, - count, bar0_buf + pos); - else - ret = virtio_pci_admin_legacy_device_io_read(pdev, offset, - count, bar0_buf + pos); - if (ret) - goto out; - if (copy_to_user(buf, bar0_buf + pos, count)) - ret = -EFAULT; - } else { - if (copy_from_user(bar0_buf + pos, buf, count)) { - ret = -EFAULT; - goto out; - } - - if (common) - ret = virtio_pci_admin_legacy_common_io_write(pdev, offset, - count, bar0_buf + pos); - else - ret = virtio_pci_admin_legacy_device_io_write(pdev, offset, - count, bar0_buf + pos); - } -out: - mutex_unlock(&virtvdev->bar_mutex); - return ret; -} - -static int -virtiovf_pci_bar0_rw(struct virtiovf_pci_core_device *virtvdev, - loff_t pos, char __user *buf, - size_t count, bool read) -{ - struct vfio_pci_core_device *core_device = &virtvdev->core_device; - struct pci_dev *pdev = core_device->pdev; - u16 queue_notify; - int ret; - - if (!(le16_to_cpu(virtvdev->pci_cmd) & PCI_COMMAND_IO)) - return -EIO; - - if (pos + count > virtvdev->bar0_virtual_buf_size) - return -EINVAL; - - ret = pm_runtime_resume_and_get(&pdev->dev); - if (ret) { - pci_info_ratelimited(pdev, "runtime resume failed %d\n", ret); - return -EIO; - } - - switch (pos) { - case VIRTIO_PCI_QUEUE_NOTIFY: - if (count != sizeof(queue_notify)) { - ret = -EINVAL; - goto end; - } - if (read) { - ret = vfio_pci_core_ioread16(core_device, true, &queue_notify, - virtvdev->notify_addr); - if (ret) - goto end; - if (copy_to_user(buf, &queue_notify, - sizeof(queue_notify))) { - ret = -EFAULT; - goto end; - } - } else { - if (copy_from_user(&queue_notify, buf, count)) { - ret = -EFAULT; - goto end; - } - ret = vfio_pci_core_iowrite16(core_device, true, queue_notify, - virtvdev->notify_addr); - } - break; - default: - ret = virtiovf_issue_legacy_rw_cmd(virtvdev, pos, buf, count, - read); - } - -end: - pm_runtime_put(&pdev->dev); - return ret ? ret : count; -} - -static ssize_t virtiovf_pci_read_config(struct vfio_device *core_vdev, - char __user *buf, size_t count, - loff_t *ppos) -{ - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); - loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; - size_t register_offset; - loff_t copy_offset; - size_t copy_count; - __le32 val32; - __le16 val16; - u8 val8; - int ret; - - ret = vfio_pci_core_read(core_vdev, buf, count, ppos); - if (ret < 0) - return ret; - - if (vfio_pci_core_range_intersect_range(pos, count, PCI_DEVICE_ID, - sizeof(val16), ©_offset, - ©_count, ®ister_offset)) { - val16 = cpu_to_le16(VIRTIO_TRANS_ID_NET); - if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, copy_count)) - return -EFAULT; - } - - if ((le16_to_cpu(virtvdev->pci_cmd) & PCI_COMMAND_IO) && - vfio_pci_core_range_intersect_range(pos, count, PCI_COMMAND, - sizeof(val16), ©_offset, - ©_count, ®ister_offset)) { - if (copy_from_user((void *)&val16 + register_offset, buf + copy_offset, - copy_count)) - return -EFAULT; - val16 |= cpu_to_le16(PCI_COMMAND_IO); - if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, - copy_count)) - return -EFAULT; - } - - if (vfio_pci_core_range_intersect_range(pos, count, PCI_REVISION_ID, - sizeof(val8), ©_offset, - ©_count, ®ister_offset)) { - /* Transional needs to have revision 0 */ - val8 = 0; - if (copy_to_user(buf + copy_offset, &val8, copy_count)) - return -EFAULT; - } - - if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_0, - sizeof(val32), ©_offset, - ©_count, ®ister_offset)) { - u32 bar_mask = ~(virtvdev->bar0_virtual_buf_size - 1); - u32 pci_base_addr_0 = le32_to_cpu(virtvdev->pci_base_addr_0); - - val32 = cpu_to_le32((pci_base_addr_0 & bar_mask) | PCI_BASE_ADDRESS_SPACE_IO); - if (copy_to_user(buf + copy_offset, (void *)&val32 + register_offset, copy_count)) - return -EFAULT; - } - - if (vfio_pci_core_range_intersect_range(pos, count, PCI_SUBSYSTEM_ID, - sizeof(val16), ©_offset, - ©_count, ®ister_offset)) { - /* - * Transitional devices use the PCI subsystem device id as - * virtio device id, same as legacy driver always did. - */ - val16 = cpu_to_le16(VIRTIO_ID_NET); - if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, - copy_count)) - return -EFAULT; - } - - if (vfio_pci_core_range_intersect_range(pos, count, PCI_SUBSYSTEM_VENDOR_ID, - sizeof(val16), ©_offset, - ©_count, ®ister_offset)) { - val16 = cpu_to_le16(PCI_VENDOR_ID_REDHAT_QUMRANET); - if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, - copy_count)) - return -EFAULT; - } - - return count; -} - -static ssize_t -virtiovf_pci_core_read(struct vfio_device *core_vdev, char __user *buf, - size_t count, loff_t *ppos) -{ - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); - unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); - loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; - - if (!count) - return 0; - - if (index == VFIO_PCI_CONFIG_REGION_INDEX) - return virtiovf_pci_read_config(core_vdev, buf, count, ppos); - - if (index == VFIO_PCI_BAR0_REGION_INDEX) - return virtiovf_pci_bar0_rw(virtvdev, pos, buf, count, true); - - return vfio_pci_core_read(core_vdev, buf, count, ppos); -} - -static ssize_t virtiovf_pci_write_config(struct vfio_device *core_vdev, - const char __user *buf, size_t count, - loff_t *ppos) -{ - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); - loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; - size_t register_offset; - loff_t copy_offset; - size_t copy_count; - - if (vfio_pci_core_range_intersect_range(pos, count, PCI_COMMAND, - sizeof(virtvdev->pci_cmd), - ©_offset, ©_count, - ®ister_offset)) { - if (copy_from_user((void *)&virtvdev->pci_cmd + register_offset, - buf + copy_offset, - copy_count)) - return -EFAULT; - } - - if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_0, - sizeof(virtvdev->pci_base_addr_0), - ©_offset, ©_count, - ®ister_offset)) { - if (copy_from_user((void *)&virtvdev->pci_base_addr_0 + register_offset, - buf + copy_offset, - copy_count)) - return -EFAULT; - } - - return vfio_pci_core_write(core_vdev, buf, count, ppos); -} - -static ssize_t -virtiovf_pci_core_write(struct vfio_device *core_vdev, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); - unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); - loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; - - if (!count) - return 0; - - if (index == VFIO_PCI_CONFIG_REGION_INDEX) - return virtiovf_pci_write_config(core_vdev, buf, count, ppos); - - if (index == VFIO_PCI_BAR0_REGION_INDEX) - return virtiovf_pci_bar0_rw(virtvdev, pos, (char __user *)buf, count, false); - - return vfio_pci_core_write(core_vdev, buf, count, ppos); -} - -static int -virtiovf_pci_ioctl_get_region_info(struct vfio_device *core_vdev, - unsigned int cmd, unsigned long arg) -{ - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); - unsigned long minsz = offsetofend(struct vfio_region_info, offset); - void __user *uarg = (void __user *)arg; - struct vfio_region_info info = {}; - - if (copy_from_user(&info, uarg, minsz)) - return -EFAULT; - - if (info.argsz < minsz) - return -EINVAL; - - switch (info.index) { - case VFIO_PCI_BAR0_REGION_INDEX: - info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); - info.size = virtvdev->bar0_virtual_buf_size; - info.flags = VFIO_REGION_INFO_FLAG_READ | - VFIO_REGION_INFO_FLAG_WRITE; - return copy_to_user(uarg, &info, minsz) ? -EFAULT : 0; - default: - return vfio_pci_core_ioctl(core_vdev, cmd, arg); - } -} - -static long -virtiovf_vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd, - unsigned long arg) -{ - switch (cmd) { - case VFIO_DEVICE_GET_REGION_INFO: - return virtiovf_pci_ioctl_get_region_info(core_vdev, cmd, arg); - default: - return vfio_pci_core_ioctl(core_vdev, cmd, arg); - } -} - -static int -virtiovf_set_notify_addr(struct virtiovf_pci_core_device *virtvdev) -{ - struct vfio_pci_core_device *core_device = &virtvdev->core_device; - int ret; - - /* - * Setup the BAR where the 'notify' exists to be used by vfio as well - * This will let us mmap it only once and use it when needed. - */ - ret = vfio_pci_core_setup_barmap(core_device, - virtvdev->notify_bar); - if (ret) - return ret; - - virtvdev->notify_addr = core_device->barmap[virtvdev->notify_bar] + - virtvdev->notify_offset; - return 0; -} +#include "common.h" static int virtiovf_pci_open_device(struct vfio_device *core_vdev) { - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); + struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev, + struct virtiovf_pci_core_device, core_device.vdev); struct vfio_pci_core_device *vdev = &virtvdev->core_device; int ret; @@ -364,88 +29,84 @@ static int virtiovf_pci_open_device(struct vfio_device *core_vdev) if (ret) return ret; - if (virtvdev->bar0_virtual_buf) { - /* - * Upon close_device() the vfio_pci_core_disable() is called - * and will close all the previous mmaps, so it seems that the - * valid life cycle for the 'notify' addr is per open/close. - */ - ret = virtiovf_set_notify_addr(virtvdev); - if (ret) { - vfio_pci_core_disable(vdev); - return ret; - } +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY + ret = virtiovf_open_legacy_io(virtvdev); + if (ret) { + vfio_pci_core_disable(vdev); + return ret; } +#endif + virtiovf_open_migration(virtvdev); vfio_pci_core_finish_enable(vdev); return 0; } -static int virtiovf_get_device_config_size(unsigned short device) +static void virtiovf_pci_close_device(struct vfio_device *core_vdev) { - /* Network card */ - return offsetofend(struct virtio_net_config, status); -} - -static int virtiovf_read_notify_info(struct virtiovf_pci_core_device *virtvdev) -{ - u64 offset; - int ret; - u8 bar; + struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev, + struct virtiovf_pci_core_device, core_device.vdev); - ret = virtio_pci_admin_legacy_io_notify_info(virtvdev->core_device.pdev, - VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM, - &bar, &offset); - if (ret) - return ret; - - virtvdev->notify_bar = bar; - virtvdev->notify_offset = offset; - return 0; + virtiovf_close_migration(virtvdev); + vfio_pci_core_close_device(core_vdev); } +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY static int virtiovf_pci_init_device(struct vfio_device *core_vdev) { - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); - struct pci_dev *pdev; + struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev, + struct virtiovf_pci_core_device, core_device.vdev); int ret; ret = vfio_pci_core_init_dev(core_vdev); if (ret) return ret; - pdev = virtvdev->core_device.pdev; - ret = virtiovf_read_notify_info(virtvdev); - if (ret) - return ret; - - virtvdev->bar0_virtual_buf_size = VIRTIO_PCI_CONFIG_OFF(true) + - virtiovf_get_device_config_size(pdev->device); - BUILD_BUG_ON(!is_power_of_2(virtvdev->bar0_virtual_buf_size)); - virtvdev->bar0_virtual_buf = kzalloc(virtvdev->bar0_virtual_buf_size, - GFP_KERNEL); - if (!virtvdev->bar0_virtual_buf) - return -ENOMEM; - mutex_init(&virtvdev->bar_mutex); - return 0; + /* + * The vfio_device_ops.init() callback is set to virtiovf_pci_init_device() + * only when legacy I/O is supported. Now, let's initialize it. + */ + return virtiovf_init_legacy_io(virtvdev); } +#endif static void virtiovf_pci_core_release_dev(struct vfio_device *core_vdev) { - struct virtiovf_pci_core_device *virtvdev = container_of( - core_vdev, struct virtiovf_pci_core_device, core_device.vdev); +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY + struct virtiovf_pci_core_device *virtvdev = container_of(core_vdev, + struct virtiovf_pci_core_device, core_device.vdev); - kfree(virtvdev->bar0_virtual_buf); + virtiovf_release_legacy_io(virtvdev); +#endif vfio_pci_core_release_dev(core_vdev); } -static const struct vfio_device_ops virtiovf_vfio_pci_tran_ops = { - .name = "virtio-vfio-pci-trans", +static const struct vfio_device_ops virtiovf_vfio_pci_lm_ops = { + .name = "virtio-vfio-pci-lm", + .init = vfio_pci_core_init_dev, + .release = virtiovf_pci_core_release_dev, + .open_device = virtiovf_pci_open_device, + .close_device = virtiovf_pci_close_device, + .ioctl = vfio_pci_core_ioctl, + .device_feature = vfio_pci_core_ioctl_feature, + .read = vfio_pci_core_read, + .write = vfio_pci_core_write, + .mmap = vfio_pci_core_mmap, + .request = vfio_pci_core_request, + .match = vfio_pci_core_match, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, + .detach_ioas = vfio_iommufd_physical_detach_ioas, +}; + +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY +static const struct vfio_device_ops virtiovf_vfio_pci_tran_lm_ops = { + .name = "virtio-vfio-pci-trans-lm", .init = virtiovf_pci_init_device, .release = virtiovf_pci_core_release_dev, .open_device = virtiovf_pci_open_device, - .close_device = vfio_pci_core_close_device, + .close_device = virtiovf_pci_close_device, .ioctl = virtiovf_vfio_pci_core_ioctl, .device_feature = vfio_pci_core_ioctl_feature, .read = virtiovf_pci_core_read, @@ -458,6 +119,7 @@ static const struct vfio_device_ops virtiovf_vfio_pci_tran_ops = { .attach_ioas = vfio_iommufd_physical_attach_ioas, .detach_ioas = vfio_iommufd_physical_detach_ioas, }; +#endif static const struct vfio_device_ops virtiovf_vfio_pci_ops = { .name = "virtio-vfio-pci", @@ -478,29 +140,34 @@ static const struct vfio_device_ops virtiovf_vfio_pci_ops = { .detach_ioas = vfio_iommufd_physical_detach_ioas, }; -static bool virtiovf_bar0_exists(struct pci_dev *pdev) -{ - struct resource *res = pdev->resource; - - return res->flags; -} - static int virtiovf_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { const struct vfio_device_ops *ops = &virtiovf_vfio_pci_ops; struct virtiovf_pci_core_device *virtvdev; + bool sup_legacy_io = false; + bool sup_lm = false; int ret; - if (pdev->is_virtfn && virtio_pci_admin_has_legacy_io(pdev) && - !virtiovf_bar0_exists(pdev)) - ops = &virtiovf_vfio_pci_tran_ops; + if (pdev->is_virtfn) { +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY + sup_legacy_io = virtiovf_support_legacy_io(pdev); + if (sup_legacy_io) + ops = &virtiovf_vfio_pci_tran_lm_ops; +#endif + sup_lm = virtio_pci_admin_has_dev_parts(pdev); + if (sup_lm && !sup_legacy_io) + ops = &virtiovf_vfio_pci_lm_ops; + } virtvdev = vfio_alloc_device(virtiovf_pci_core_device, core_device.vdev, &pdev->dev, ops); if (IS_ERR(virtvdev)) return PTR_ERR(virtvdev); + if (sup_lm) + virtiovf_set_migratable(virtvdev); + dev_set_drvdata(&pdev->dev, &virtvdev->core_device); ret = vfio_pci_core_register_device(&virtvdev->core_device); if (ret) @@ -529,9 +196,10 @@ MODULE_DEVICE_TABLE(pci, virtiovf_pci_table); static void virtiovf_pci_aer_reset_done(struct pci_dev *pdev) { - struct virtiovf_pci_core_device *virtvdev = dev_get_drvdata(&pdev->dev); - - virtvdev->pci_cmd = 0; +#ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY + virtiovf_legacy_io_reset_done(pdev); +#endif + virtiovf_migration_reset_done(pdev); } static const struct pci_error_handlers virtiovf_err_handlers = { diff --git a/drivers/vfio/pci/virtio/migrate.c b/drivers/vfio/pci/virtio/migrate.c new file mode 100644 index 000000000000..ee54f4c17857 --- /dev/null +++ b/drivers/vfio/pci/virtio/migrate.c @@ -0,0 +1,1337 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include <linux/vfio.h> +#include <linux/vfio_pci_core.h> +#include <linux/virtio_pci.h> +#include <linux/virtio_net.h> +#include <linux/virtio_pci_admin.h> +#include <linux/anon_inodes.h> + +#include "common.h" + +/* Device specification max parts size */ +#define MAX_LOAD_SIZE (BIT_ULL(BITS_PER_TYPE \ + (((struct virtio_admin_cmd_dev_parts_metadata_result *)0)->parts_size.size)) - 1) + +/* Initial target buffer size */ +#define VIRTIOVF_TARGET_INITIAL_BUF_SIZE SZ_1M + +static int +virtiovf_read_device_context_chunk(struct virtiovf_migration_file *migf, + u32 ctx_size); + +static struct page * +virtiovf_get_migration_page(struct virtiovf_data_buffer *buf, + unsigned long offset) +{ + unsigned long cur_offset = 0; + struct scatterlist *sg; + unsigned int i; + + /* All accesses are sequential */ + if (offset < buf->last_offset || !buf->last_offset_sg) { + buf->last_offset = 0; + buf->last_offset_sg = buf->table.sgt.sgl; + buf->sg_last_entry = 0; + } + + cur_offset = buf->last_offset; + + for_each_sg(buf->last_offset_sg, sg, + buf->table.sgt.orig_nents - buf->sg_last_entry, i) { + if (offset < sg->length + cur_offset) { + buf->last_offset_sg = sg; + buf->sg_last_entry += i; + buf->last_offset = cur_offset; + return nth_page(sg_page(sg), + (offset - cur_offset) / PAGE_SIZE); + } + cur_offset += sg->length; + } + return NULL; +} + +static int virtiovf_add_migration_pages(struct virtiovf_data_buffer *buf, + unsigned int npages) +{ + unsigned int to_alloc = npages; + struct page **page_list; + unsigned long filled; + unsigned int to_fill; + int ret; + int i; + + to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list)); + page_list = kvcalloc(to_fill, sizeof(*page_list), GFP_KERNEL_ACCOUNT); + if (!page_list) + return -ENOMEM; + + do { + filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill, + page_list); + if (!filled) { + ret = -ENOMEM; + goto err; + } + to_alloc -= filled; + ret = sg_alloc_append_table_from_pages(&buf->table, page_list, + filled, 0, filled << PAGE_SHIFT, UINT_MAX, + SG_MAX_SINGLE_ALLOC, GFP_KERNEL_ACCOUNT); + + if (ret) + goto err_append; + buf->allocated_length += filled * PAGE_SIZE; + /* clean input for another bulk allocation */ + memset(page_list, 0, filled * sizeof(*page_list)); + to_fill = min_t(unsigned int, to_alloc, + PAGE_SIZE / sizeof(*page_list)); + } while (to_alloc > 0); + + kvfree(page_list); + return 0; + +err_append: + for (i = filled - 1; i >= 0; i--) + __free_page(page_list[i]); +err: + kvfree(page_list); + return ret; +} + +static void virtiovf_free_data_buffer(struct virtiovf_data_buffer *buf) +{ + struct sg_page_iter sg_iter; + + /* Undo alloc_pages_bulk_array() */ + for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0) + __free_page(sg_page_iter_page(&sg_iter)); + sg_free_append_table(&buf->table); + kfree(buf); +} + +static struct virtiovf_data_buffer * +virtiovf_alloc_data_buffer(struct virtiovf_migration_file *migf, size_t length) +{ + struct virtiovf_data_buffer *buf; + int ret; + + buf = kzalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT); + if (!buf) + return ERR_PTR(-ENOMEM); + + ret = virtiovf_add_migration_pages(buf, + DIV_ROUND_UP_ULL(length, PAGE_SIZE)); + if (ret) + goto end; + + buf->migf = migf; + return buf; +end: + virtiovf_free_data_buffer(buf); + return ERR_PTR(ret); +} + +static void virtiovf_put_data_buffer(struct virtiovf_data_buffer *buf) +{ + spin_lock_irq(&buf->migf->list_lock); + list_add_tail(&buf->buf_elm, &buf->migf->avail_list); + spin_unlock_irq(&buf->migf->list_lock); +} + +static int +virtiovf_pci_alloc_obj_id(struct virtiovf_pci_core_device *virtvdev, u8 type, + u32 *obj_id) +{ + return virtio_pci_admin_obj_create(virtvdev->core_device.pdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS, type, obj_id); +} + +static void +virtiovf_pci_free_obj_id(struct virtiovf_pci_core_device *virtvdev, u32 obj_id) +{ + virtio_pci_admin_obj_destroy(virtvdev->core_device.pdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS, obj_id); +} + +static struct virtiovf_data_buffer * +virtiovf_get_data_buffer(struct virtiovf_migration_file *migf, size_t length) +{ + struct virtiovf_data_buffer *buf, *temp_buf; + struct list_head free_list; + + INIT_LIST_HEAD(&free_list); + + spin_lock_irq(&migf->list_lock); + list_for_each_entry_safe(buf, temp_buf, &migf->avail_list, buf_elm) { + list_del_init(&buf->buf_elm); + if (buf->allocated_length >= length) { + spin_unlock_irq(&migf->list_lock); + goto found; + } + /* + * Prevent holding redundant buffers. Put in a free + * list and call at the end not under the spin lock + * (&migf->list_lock) to minimize its scope usage. + */ + list_add(&buf->buf_elm, &free_list); + } + spin_unlock_irq(&migf->list_lock); + buf = virtiovf_alloc_data_buffer(migf, length); + +found: + while ((temp_buf = list_first_entry_or_null(&free_list, + struct virtiovf_data_buffer, buf_elm))) { + list_del(&temp_buf->buf_elm); + virtiovf_free_data_buffer(temp_buf); + } + + return buf; +} + +static void virtiovf_clean_migf_resources(struct virtiovf_migration_file *migf) +{ + struct virtiovf_data_buffer *entry; + + if (migf->buf) { + virtiovf_free_data_buffer(migf->buf); + migf->buf = NULL; + } + + if (migf->buf_header) { + virtiovf_free_data_buffer(migf->buf_header); + migf->buf_header = NULL; + } + + list_splice(&migf->avail_list, &migf->buf_list); + + while ((entry = list_first_entry_or_null(&migf->buf_list, + struct virtiovf_data_buffer, buf_elm))) { + list_del(&entry->buf_elm); + virtiovf_free_data_buffer(entry); + } + + if (migf->has_obj_id) + virtiovf_pci_free_obj_id(migf->virtvdev, migf->obj_id); +} + +static void virtiovf_disable_fd(struct virtiovf_migration_file *migf) +{ + mutex_lock(&migf->lock); + migf->state = VIRTIOVF_MIGF_STATE_ERROR; + migf->filp->f_pos = 0; + mutex_unlock(&migf->lock); +} + +static void virtiovf_disable_fds(struct virtiovf_pci_core_device *virtvdev) +{ + if (virtvdev->resuming_migf) { + virtiovf_disable_fd(virtvdev->resuming_migf); + virtiovf_clean_migf_resources(virtvdev->resuming_migf); + fput(virtvdev->resuming_migf->filp); + virtvdev->resuming_migf = NULL; + } + if (virtvdev->saving_migf) { + virtiovf_disable_fd(virtvdev->saving_migf); + virtiovf_clean_migf_resources(virtvdev->saving_migf); + fput(virtvdev->saving_migf->filp); + virtvdev->saving_migf = NULL; + } +} + +/* + * This function is called in all state_mutex unlock cases to + * handle a 'deferred_reset' if exists. + */ +static void virtiovf_state_mutex_unlock(struct virtiovf_pci_core_device *virtvdev) +{ +again: + spin_lock(&virtvdev->reset_lock); + if (virtvdev->deferred_reset) { + virtvdev->deferred_reset = false; + spin_unlock(&virtvdev->reset_lock); + virtvdev->mig_state = VFIO_DEVICE_STATE_RUNNING; + virtiovf_disable_fds(virtvdev); + goto again; + } + mutex_unlock(&virtvdev->state_mutex); + spin_unlock(&virtvdev->reset_lock); +} + +void virtiovf_migration_reset_done(struct pci_dev *pdev) +{ + struct virtiovf_pci_core_device *virtvdev = dev_get_drvdata(&pdev->dev); + + if (!virtvdev->migrate_cap) + return; + + /* + * As the higher VFIO layers are holding locks across reset and using + * those same locks with the mm_lock we need to prevent ABBA deadlock + * with the state_mutex and mm_lock. + * In case the state_mutex was taken already we defer the cleanup work + * to the unlock flow of the other running context. + */ + spin_lock(&virtvdev->reset_lock); + virtvdev->deferred_reset = true; + if (!mutex_trylock(&virtvdev->state_mutex)) { + spin_unlock(&virtvdev->reset_lock); + return; + } + spin_unlock(&virtvdev->reset_lock); + virtiovf_state_mutex_unlock(virtvdev); +} + +static int virtiovf_release_file(struct inode *inode, struct file *filp) +{ + struct virtiovf_migration_file *migf = filp->private_data; + + virtiovf_disable_fd(migf); + mutex_destroy(&migf->lock); + kfree(migf); + return 0; +} + +static struct virtiovf_data_buffer * +virtiovf_get_data_buff_from_pos(struct virtiovf_migration_file *migf, + loff_t pos, bool *end_of_data) +{ + struct virtiovf_data_buffer *buf; + bool found = false; + + *end_of_data = false; + spin_lock_irq(&migf->list_lock); + if (list_empty(&migf->buf_list)) { + *end_of_data = true; + goto end; + } + + buf = list_first_entry(&migf->buf_list, struct virtiovf_data_buffer, + buf_elm); + if (pos >= buf->start_pos && + pos < buf->start_pos + buf->length) { + found = true; + goto end; + } + + /* + * As we use a stream based FD we may expect having the data always + * on first chunk + */ + migf->state = VIRTIOVF_MIGF_STATE_ERROR; + +end: + spin_unlock_irq(&migf->list_lock); + return found ? buf : NULL; +} + +static ssize_t virtiovf_buf_read(struct virtiovf_data_buffer *vhca_buf, + char __user **buf, size_t *len, loff_t *pos) +{ + unsigned long offset; + ssize_t done = 0; + size_t copy_len; + + copy_len = min_t(size_t, + vhca_buf->start_pos + vhca_buf->length - *pos, *len); + while (copy_len) { + size_t page_offset; + struct page *page; + size_t page_len; + u8 *from_buff; + int ret; + + offset = *pos - vhca_buf->start_pos; + page_offset = offset % PAGE_SIZE; + offset -= page_offset; + page = virtiovf_get_migration_page(vhca_buf, offset); + if (!page) + return -EINVAL; + page_len = min_t(size_t, copy_len, PAGE_SIZE - page_offset); + from_buff = kmap_local_page(page); + ret = copy_to_user(*buf, from_buff + page_offset, page_len); + kunmap_local(from_buff); + if (ret) + return -EFAULT; + *pos += page_len; + *len -= page_len; + *buf += page_len; + done += page_len; + copy_len -= page_len; + } + + if (*pos >= vhca_buf->start_pos + vhca_buf->length) { + spin_lock_irq(&vhca_buf->migf->list_lock); + list_del_init(&vhca_buf->buf_elm); + list_add_tail(&vhca_buf->buf_elm, &vhca_buf->migf->avail_list); + spin_unlock_irq(&vhca_buf->migf->list_lock); + } + + return done; +} + +static ssize_t virtiovf_save_read(struct file *filp, char __user *buf, size_t len, + loff_t *pos) +{ + struct virtiovf_migration_file *migf = filp->private_data; + struct virtiovf_data_buffer *vhca_buf; + bool first_loop_call = true; + bool end_of_data; + ssize_t done = 0; + + if (pos) + return -ESPIPE; + pos = &filp->f_pos; + + mutex_lock(&migf->lock); + if (migf->state == VIRTIOVF_MIGF_STATE_ERROR) { + done = -ENODEV; + goto out_unlock; + } + + while (len) { + ssize_t count; + + vhca_buf = virtiovf_get_data_buff_from_pos(migf, *pos, &end_of_data); + if (first_loop_call) { + first_loop_call = false; + /* Temporary end of file as part of PRE_COPY */ + if (end_of_data && migf->state == VIRTIOVF_MIGF_STATE_PRECOPY) { + done = -ENOMSG; + goto out_unlock; + } + if (end_of_data && migf->state != VIRTIOVF_MIGF_STATE_COMPLETE) { + done = -EINVAL; + goto out_unlock; + } + } + + if (end_of_data) + goto out_unlock; + + if (!vhca_buf) { + done = -EINVAL; + goto out_unlock; + } + + count = virtiovf_buf_read(vhca_buf, &buf, &len, pos); + if (count < 0) { + done = count; + goto out_unlock; + } + done += count; + } + +out_unlock: + mutex_unlock(&migf->lock); + return done; +} + +static long virtiovf_precopy_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct virtiovf_migration_file *migf = filp->private_data; + struct virtiovf_pci_core_device *virtvdev = migf->virtvdev; + struct vfio_precopy_info info = {}; + loff_t *pos = &filp->f_pos; + bool end_of_data = false; + unsigned long minsz; + u32 ctx_size = 0; + int ret; + + if (cmd != VFIO_MIG_GET_PRECOPY_INFO) + return -ENOTTY; + + minsz = offsetofend(struct vfio_precopy_info, dirty_bytes); + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + mutex_lock(&virtvdev->state_mutex); + if (virtvdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY && + virtvdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY_P2P) { + ret = -EINVAL; + goto err_state_unlock; + } + + /* + * The virtio specification does not include a PRE_COPY concept. + * Since we can expect the data to remain the same for a certain period, + * we use a rate limiter mechanism before making a call to the device. + */ + if (__ratelimit(&migf->pre_copy_rl_state)) { + + ret = virtio_pci_admin_dev_parts_metadata_get(virtvdev->core_device.pdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS, migf->obj_id, + VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE, + &ctx_size); + if (ret) + goto err_state_unlock; + } + + mutex_lock(&migf->lock); + if (migf->state == VIRTIOVF_MIGF_STATE_ERROR) { + ret = -ENODEV; + goto err_migf_unlock; + } + + if (migf->pre_copy_initial_bytes > *pos) { + info.initial_bytes = migf->pre_copy_initial_bytes - *pos; + } else { + info.dirty_bytes = migf->max_pos - *pos; + if (!info.dirty_bytes) + end_of_data = true; + info.dirty_bytes += ctx_size; + } + + if (!end_of_data || !ctx_size) { + mutex_unlock(&migf->lock); + goto done; + } + + mutex_unlock(&migf->lock); + /* + * We finished transferring the current state and the device has a + * dirty state, read a new state. + */ + ret = virtiovf_read_device_context_chunk(migf, ctx_size); + if (ret) + /* + * The machine is running, and context size could be grow, so no reason to mark + * the device state as VIRTIOVF_MIGF_STATE_ERROR. + */ + goto err_state_unlock; + +done: + virtiovf_state_mutex_unlock(virtvdev); + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + return 0; + +err_migf_unlock: + mutex_unlock(&migf->lock); +err_state_unlock: + virtiovf_state_mutex_unlock(virtvdev); + return ret; +} + +static const struct file_operations virtiovf_save_fops = { + .owner = THIS_MODULE, + .read = virtiovf_save_read, + .unlocked_ioctl = virtiovf_precopy_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .release = virtiovf_release_file, +}; + +static int +virtiovf_add_buf_header(struct virtiovf_data_buffer *header_buf, + u32 data_size) +{ + struct virtiovf_migration_file *migf = header_buf->migf; + struct virtiovf_migration_header header = {}; + struct page *page; + u8 *to_buff; + + header.record_size = cpu_to_le64(data_size); + header.flags = cpu_to_le32(VIRTIOVF_MIGF_HEADER_FLAGS_TAG_MANDATORY); + header.tag = cpu_to_le32(VIRTIOVF_MIGF_HEADER_TAG_DEVICE_DATA); + page = virtiovf_get_migration_page(header_buf, 0); + if (!page) + return -EINVAL; + to_buff = kmap_local_page(page); + memcpy(to_buff, &header, sizeof(header)); + kunmap_local(to_buff); + header_buf->length = sizeof(header); + header_buf->start_pos = header_buf->migf->max_pos; + migf->max_pos += header_buf->length; + spin_lock_irq(&migf->list_lock); + list_add_tail(&header_buf->buf_elm, &migf->buf_list); + spin_unlock_irq(&migf->list_lock); + return 0; +} + +static int +virtiovf_read_device_context_chunk(struct virtiovf_migration_file *migf, + u32 ctx_size) +{ + struct virtiovf_data_buffer *header_buf; + struct virtiovf_data_buffer *buf; + bool unmark_end = false; + struct scatterlist *sg; + unsigned int i; + u32 res_size; + int nent; + int ret; + + buf = virtiovf_get_data_buffer(migf, ctx_size); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + /* Find the total count of SG entries which satisfies the size */ + nent = sg_nents_for_len(buf->table.sgt.sgl, ctx_size); + if (nent <= 0) { + ret = -EINVAL; + goto out; + } + + /* + * Iterate to that SG entry and mark it as last (if it's not already) + * to let underlay layers iterate only till that entry. + */ + for_each_sg(buf->table.sgt.sgl, sg, nent - 1, i) + ; + + if (!sg_is_last(sg)) { + unmark_end = true; + sg_mark_end(sg); + } + + ret = virtio_pci_admin_dev_parts_get(migf->virtvdev->core_device.pdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS, + migf->obj_id, + VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_ALL, + buf->table.sgt.sgl, &res_size); + /* Restore the original SG mark end */ + if (unmark_end) + sg_unmark_end(sg); + if (ret) + goto out; + + buf->length = res_size; + header_buf = virtiovf_get_data_buffer(migf, + sizeof(struct virtiovf_migration_header)); + if (IS_ERR(header_buf)) { + ret = PTR_ERR(header_buf); + goto out; + } + + ret = virtiovf_add_buf_header(header_buf, res_size); + if (ret) + goto out_header; + + buf->start_pos = buf->migf->max_pos; + migf->max_pos += buf->length; + spin_lock(&migf->list_lock); + list_add_tail(&buf->buf_elm, &migf->buf_list); + spin_unlock_irq(&migf->list_lock); + return 0; + +out_header: + virtiovf_put_data_buffer(header_buf); +out: + virtiovf_put_data_buffer(buf); + return ret; +} + +static int +virtiovf_pci_save_device_final_data(struct virtiovf_pci_core_device *virtvdev) +{ + struct virtiovf_migration_file *migf = virtvdev->saving_migf; + u32 ctx_size; + int ret; + + if (migf->state == VIRTIOVF_MIGF_STATE_ERROR) + return -ENODEV; + + ret = virtio_pci_admin_dev_parts_metadata_get(virtvdev->core_device.pdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS, migf->obj_id, + VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE, + &ctx_size); + if (ret) + goto err; + + if (!ctx_size) { + ret = -EINVAL; + goto err; + } + + ret = virtiovf_read_device_context_chunk(migf, ctx_size); + if (ret) + goto err; + + migf->state = VIRTIOVF_MIGF_STATE_COMPLETE; + return 0; + +err: + migf->state = VIRTIOVF_MIGF_STATE_ERROR; + return ret; +} + +static struct virtiovf_migration_file * +virtiovf_pci_save_device_data(struct virtiovf_pci_core_device *virtvdev, + bool pre_copy) +{ + struct virtiovf_migration_file *migf; + u32 ctx_size; + u32 obj_id; + int ret; + + migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT); + if (!migf) + return ERR_PTR(-ENOMEM); + + migf->filp = anon_inode_getfile("virtiovf_mig", &virtiovf_save_fops, migf, + O_RDONLY); + if (IS_ERR(migf->filp)) { + ret = PTR_ERR(migf->filp); + kfree(migf); + return ERR_PTR(ret); + } + + stream_open(migf->filp->f_inode, migf->filp); + mutex_init(&migf->lock); + INIT_LIST_HEAD(&migf->buf_list); + INIT_LIST_HEAD(&migf->avail_list); + spin_lock_init(&migf->list_lock); + migf->virtvdev = virtvdev; + + lockdep_assert_held(&virtvdev->state_mutex); + ret = virtiovf_pci_alloc_obj_id(virtvdev, VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET, + &obj_id); + if (ret) + goto out; + + migf->obj_id = obj_id; + /* Mark as having a valid obj id which can be even 0 */ + migf->has_obj_id = true; + ret = virtio_pci_admin_dev_parts_metadata_get(virtvdev->core_device.pdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS, obj_id, + VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE, + &ctx_size); + if (ret) + goto out_clean; + + if (!ctx_size) { + ret = -EINVAL; + goto out_clean; + } + + ret = virtiovf_read_device_context_chunk(migf, ctx_size); + if (ret) + goto out_clean; + + if (pre_copy) { + migf->pre_copy_initial_bytes = migf->max_pos; + /* Arbitrarily set the pre-copy rate limit to 1-second intervals */ + ratelimit_state_init(&migf->pre_copy_rl_state, 1 * HZ, 1); + /* Prevent any rate messages upon its usage */ + ratelimit_set_flags(&migf->pre_copy_rl_state, + RATELIMIT_MSG_ON_RELEASE); + migf->state = VIRTIOVF_MIGF_STATE_PRECOPY; + } else { + migf->state = VIRTIOVF_MIGF_STATE_COMPLETE; + } + + return migf; + +out_clean: + virtiovf_clean_migf_resources(migf); +out: + fput(migf->filp); + return ERR_PTR(ret); +} + +/* + * Set the required object header at the beginning of the buffer. + * The actual device parts data will be written post of the header offset. + */ +static int virtiovf_set_obj_cmd_header(struct virtiovf_data_buffer *vhca_buf) +{ + struct virtio_admin_cmd_resource_obj_cmd_hdr obj_hdr = {}; + struct page *page; + u8 *to_buff; + + obj_hdr.type = cpu_to_le16(VIRTIO_RESOURCE_OBJ_DEV_PARTS); + obj_hdr.id = cpu_to_le32(vhca_buf->migf->obj_id); + page = virtiovf_get_migration_page(vhca_buf, 0); + if (!page) + return -EINVAL; + to_buff = kmap_local_page(page); + memcpy(to_buff, &obj_hdr, sizeof(obj_hdr)); + kunmap_local(to_buff); + + /* Mark the buffer as including the header object data */ + vhca_buf->include_header_object = 1; + return 0; +} + +static int +virtiovf_append_page_to_mig_buf(struct virtiovf_data_buffer *vhca_buf, + const char __user **buf, size_t *len, + loff_t *pos, ssize_t *done) +{ + unsigned long offset; + size_t page_offset; + struct page *page; + size_t page_len; + u8 *to_buff; + int ret; + + offset = *pos - vhca_buf->start_pos; + + if (vhca_buf->include_header_object) + /* The buffer holds the object header, update the offset accordingly */ + offset += sizeof(struct virtio_admin_cmd_resource_obj_cmd_hdr); + + page_offset = offset % PAGE_SIZE; + + page = virtiovf_get_migration_page(vhca_buf, offset - page_offset); + if (!page) + return -EINVAL; + + page_len = min_t(size_t, *len, PAGE_SIZE - page_offset); + to_buff = kmap_local_page(page); + ret = copy_from_user(to_buff + page_offset, *buf, page_len); + kunmap_local(to_buff); + if (ret) + return -EFAULT; + + *pos += page_len; + *done += page_len; + *buf += page_len; + *len -= page_len; + vhca_buf->length += page_len; + return 0; +} + +static ssize_t +virtiovf_resume_read_chunk(struct virtiovf_migration_file *migf, + struct virtiovf_data_buffer *vhca_buf, + size_t chunk_size, const char __user **buf, + size_t *len, loff_t *pos, ssize_t *done, + bool *has_work) +{ + size_t copy_len, to_copy; + int ret; + + to_copy = min_t(size_t, *len, chunk_size - vhca_buf->length); + copy_len = to_copy; + while (to_copy) { + ret = virtiovf_append_page_to_mig_buf(vhca_buf, buf, &to_copy, + pos, done); + if (ret) + return ret; + } + + *len -= copy_len; + if (vhca_buf->length == chunk_size) { + migf->load_state = VIRTIOVF_LOAD_STATE_LOAD_CHUNK; + migf->max_pos += chunk_size; + *has_work = true; + } + + return 0; +} + +static int +virtiovf_resume_read_header_data(struct virtiovf_migration_file *migf, + struct virtiovf_data_buffer *vhca_buf, + const char __user **buf, size_t *len, + loff_t *pos, ssize_t *done) +{ + size_t copy_len, to_copy; + size_t required_data; + int ret; + + required_data = migf->record_size - vhca_buf->length; + to_copy = min_t(size_t, *len, required_data); + copy_len = to_copy; + while (to_copy) { + ret = virtiovf_append_page_to_mig_buf(vhca_buf, buf, &to_copy, + pos, done); + if (ret) + return ret; + } + + *len -= copy_len; + if (vhca_buf->length == migf->record_size) { + switch (migf->record_tag) { + default: + /* Optional tag */ + break; + } + + migf->load_state = VIRTIOVF_LOAD_STATE_READ_HEADER; + migf->max_pos += migf->record_size; + vhca_buf->length = 0; + } + + return 0; +} + +static int +virtiovf_resume_read_header(struct virtiovf_migration_file *migf, + struct virtiovf_data_buffer *vhca_buf, + const char __user **buf, + size_t *len, loff_t *pos, + ssize_t *done, bool *has_work) +{ + struct page *page; + size_t copy_len; + u8 *to_buff; + int ret; + + copy_len = min_t(size_t, *len, + sizeof(struct virtiovf_migration_header) - vhca_buf->length); + page = virtiovf_get_migration_page(vhca_buf, 0); + if (!page) + return -EINVAL; + to_buff = kmap_local_page(page); + ret = copy_from_user(to_buff + vhca_buf->length, *buf, copy_len); + if (ret) { + ret = -EFAULT; + goto end; + } + + *buf += copy_len; + *pos += copy_len; + *done += copy_len; + *len -= copy_len; + vhca_buf->length += copy_len; + if (vhca_buf->length == sizeof(struct virtiovf_migration_header)) { + u64 record_size; + u32 flags; + + record_size = le64_to_cpup((__le64 *)to_buff); + if (record_size > MAX_LOAD_SIZE) { + ret = -ENOMEM; + goto end; + } + + migf->record_size = record_size; + flags = le32_to_cpup((__le32 *)(to_buff + + offsetof(struct virtiovf_migration_header, flags))); + migf->record_tag = le32_to_cpup((__le32 *)(to_buff + + offsetof(struct virtiovf_migration_header, tag))); + switch (migf->record_tag) { + case VIRTIOVF_MIGF_HEADER_TAG_DEVICE_DATA: + migf->load_state = VIRTIOVF_LOAD_STATE_PREP_CHUNK; + break; + default: + if (!(flags & VIRTIOVF_MIGF_HEADER_FLAGS_TAG_OPTIONAL)) { + ret = -EOPNOTSUPP; + goto end; + } + /* We may read and skip this optional record data */ + migf->load_state = VIRTIOVF_LOAD_STATE_PREP_HEADER_DATA; + } + + migf->max_pos += vhca_buf->length; + vhca_buf->length = 0; + *has_work = true; + } +end: + kunmap_local(to_buff); + return ret; +} + +static ssize_t virtiovf_resume_write(struct file *filp, const char __user *buf, + size_t len, loff_t *pos) +{ + struct virtiovf_migration_file *migf = filp->private_data; + struct virtiovf_data_buffer *vhca_buf = migf->buf; + struct virtiovf_data_buffer *vhca_buf_header = migf->buf_header; + unsigned int orig_length; + bool has_work = false; + ssize_t done = 0; + int ret = 0; + + if (pos) + return -ESPIPE; + + pos = &filp->f_pos; + if (*pos < vhca_buf->start_pos) + return -EINVAL; + + mutex_lock(&migf->virtvdev->state_mutex); + mutex_lock(&migf->lock); + if (migf->state == VIRTIOVF_MIGF_STATE_ERROR) { + done = -ENODEV; + goto out_unlock; + } + + while (len || has_work) { + has_work = false; + switch (migf->load_state) { + case VIRTIOVF_LOAD_STATE_READ_HEADER: + ret = virtiovf_resume_read_header(migf, vhca_buf_header, &buf, + &len, pos, &done, &has_work); + if (ret) + goto out_unlock; + break; + case VIRTIOVF_LOAD_STATE_PREP_HEADER_DATA: + if (vhca_buf_header->allocated_length < migf->record_size) { + virtiovf_free_data_buffer(vhca_buf_header); + + migf->buf_header = virtiovf_alloc_data_buffer(migf, + migf->record_size); + if (IS_ERR(migf->buf_header)) { + ret = PTR_ERR(migf->buf_header); + migf->buf_header = NULL; + goto out_unlock; + } + + vhca_buf_header = migf->buf_header; + } + + vhca_buf_header->start_pos = migf->max_pos; + migf->load_state = VIRTIOVF_LOAD_STATE_READ_HEADER_DATA; + break; + case VIRTIOVF_LOAD_STATE_READ_HEADER_DATA: + ret = virtiovf_resume_read_header_data(migf, vhca_buf_header, + &buf, &len, pos, &done); + if (ret) + goto out_unlock; + break; + case VIRTIOVF_LOAD_STATE_PREP_CHUNK: + { + u32 cmd_size = migf->record_size + + sizeof(struct virtio_admin_cmd_resource_obj_cmd_hdr); + + /* + * The DMA map/unmap is managed in virtio layer, we just need to extend + * the SG pages to hold the extra required chunk data. + */ + if (vhca_buf->allocated_length < cmd_size) { + ret = virtiovf_add_migration_pages(vhca_buf, + DIV_ROUND_UP_ULL(cmd_size - vhca_buf->allocated_length, + PAGE_SIZE)); + if (ret) + goto out_unlock; + } + + vhca_buf->start_pos = migf->max_pos; + migf->load_state = VIRTIOVF_LOAD_STATE_READ_CHUNK; + break; + } + case VIRTIOVF_LOAD_STATE_READ_CHUNK: + ret = virtiovf_resume_read_chunk(migf, vhca_buf, migf->record_size, + &buf, &len, pos, &done, &has_work); + if (ret) + goto out_unlock; + break; + case VIRTIOVF_LOAD_STATE_LOAD_CHUNK: + /* Mark the last SG entry and set its length */ + sg_mark_end(vhca_buf->last_offset_sg); + orig_length = vhca_buf->last_offset_sg->length; + /* Length should include the resource object command header */ + vhca_buf->last_offset_sg->length = vhca_buf->length + + sizeof(struct virtio_admin_cmd_resource_obj_cmd_hdr) - + vhca_buf->last_offset; + ret = virtio_pci_admin_dev_parts_set(migf->virtvdev->core_device.pdev, + vhca_buf->table.sgt.sgl); + /* Restore the original SG data */ + vhca_buf->last_offset_sg->length = orig_length; + sg_unmark_end(vhca_buf->last_offset_sg); + if (ret) + goto out_unlock; + migf->load_state = VIRTIOVF_LOAD_STATE_READ_HEADER; + /* be ready for reading the next chunk */ + vhca_buf->length = 0; + break; + default: + break; + } + } + +out_unlock: + if (ret) + migf->state = VIRTIOVF_MIGF_STATE_ERROR; + mutex_unlock(&migf->lock); + virtiovf_state_mutex_unlock(migf->virtvdev); + return ret ? ret : done; +} + +static const struct file_operations virtiovf_resume_fops = { + .owner = THIS_MODULE, + .write = virtiovf_resume_write, + .release = virtiovf_release_file, +}; + +static struct virtiovf_migration_file * +virtiovf_pci_resume_device_data(struct virtiovf_pci_core_device *virtvdev) +{ + struct virtiovf_migration_file *migf; + struct virtiovf_data_buffer *buf; + u32 obj_id; + int ret; + + migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT); + if (!migf) + return ERR_PTR(-ENOMEM); + + migf->filp = anon_inode_getfile("virtiovf_mig", &virtiovf_resume_fops, migf, + O_WRONLY); + if (IS_ERR(migf->filp)) { + ret = PTR_ERR(migf->filp); + kfree(migf); + return ERR_PTR(ret); + } + + stream_open(migf->filp->f_inode, migf->filp); + mutex_init(&migf->lock); + INIT_LIST_HEAD(&migf->buf_list); + INIT_LIST_HEAD(&migf->avail_list); + spin_lock_init(&migf->list_lock); + + buf = virtiovf_alloc_data_buffer(migf, VIRTIOVF_TARGET_INITIAL_BUF_SIZE); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + goto out; + } + + migf->buf = buf; + + buf = virtiovf_alloc_data_buffer(migf, + sizeof(struct virtiovf_migration_header)); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + goto out_clean; + } + + migf->buf_header = buf; + migf->load_state = VIRTIOVF_LOAD_STATE_READ_HEADER; + + migf->virtvdev = virtvdev; + ret = virtiovf_pci_alloc_obj_id(virtvdev, VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_SET, + &obj_id); + if (ret) + goto out_clean; + + migf->obj_id = obj_id; + /* Mark as having a valid obj id which can be even 0 */ + migf->has_obj_id = true; + ret = virtiovf_set_obj_cmd_header(migf->buf); + if (ret) + goto out_clean; + + return migf; + +out_clean: + virtiovf_clean_migf_resources(migf); +out: + fput(migf->filp); + return ERR_PTR(ret); +} + +static struct file * +virtiovf_pci_step_device_state_locked(struct virtiovf_pci_core_device *virtvdev, + u32 new) +{ + u32 cur = virtvdev->mig_state; + int ret; + + if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) { + /* NOP */ + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P) { + /* NOP */ + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) || + (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) { + ret = virtio_pci_admin_mode_set(virtvdev->core_device.pdev, + BIT(VIRTIO_ADMIN_CMD_DEV_MODE_F_STOPPED)); + if (ret) + return ERR_PTR(ret); + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) || + (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_PRE_COPY)) { + ret = virtio_pci_admin_mode_set(virtvdev->core_device.pdev, 0); + if (ret) + return ERR_PTR(ret); + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) { + struct virtiovf_migration_file *migf; + + migf = virtiovf_pci_save_device_data(virtvdev, false); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + virtvdev->saving_migf = migf; + return migf->filp; + } + + if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) || + (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) || + (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_RUNNING_P2P)) { + virtiovf_disable_fds(virtvdev); + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) { + struct virtiovf_migration_file *migf; + + migf = virtiovf_pci_resume_device_data(virtvdev); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + virtvdev->resuming_migf = migf; + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) { + virtiovf_disable_fds(virtvdev); + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) || + (cur == VFIO_DEVICE_STATE_RUNNING_P2P && + new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) { + struct virtiovf_migration_file *migf; + + migf = virtiovf_pci_save_device_data(virtvdev, true); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + virtvdev->saving_migf = migf; + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_STOP_COPY) { + ret = virtiovf_pci_save_device_final_data(virtvdev); + return ret ? ERR_PTR(ret) : NULL; + } + + /* + * vfio_mig_get_next_state() does not use arcs other than the above + */ + WARN_ON(true); + return ERR_PTR(-EINVAL); +} + +static struct file * +virtiovf_pci_set_device_state(struct vfio_device *vdev, + enum vfio_device_mig_state new_state) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + vdev, struct virtiovf_pci_core_device, core_device.vdev); + enum vfio_device_mig_state next_state; + struct file *res = NULL; + int ret; + + mutex_lock(&virtvdev->state_mutex); + while (new_state != virtvdev->mig_state) { + ret = vfio_mig_get_next_state(vdev, virtvdev->mig_state, + new_state, &next_state); + if (ret) { + res = ERR_PTR(ret); + break; + } + res = virtiovf_pci_step_device_state_locked(virtvdev, next_state); + if (IS_ERR(res)) + break; + virtvdev->mig_state = next_state; + if (WARN_ON(res && new_state != virtvdev->mig_state)) { + fput(res); + res = ERR_PTR(-EINVAL); + break; + } + } + virtiovf_state_mutex_unlock(virtvdev); + return res; +} + +static int virtiovf_pci_get_device_state(struct vfio_device *vdev, + enum vfio_device_mig_state *curr_state) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + vdev, struct virtiovf_pci_core_device, core_device.vdev); + + mutex_lock(&virtvdev->state_mutex); + *curr_state = virtvdev->mig_state; + virtiovf_state_mutex_unlock(virtvdev); + return 0; +} + +static int virtiovf_pci_get_data_size(struct vfio_device *vdev, + unsigned long *stop_copy_length) +{ + struct virtiovf_pci_core_device *virtvdev = container_of( + vdev, struct virtiovf_pci_core_device, core_device.vdev); + bool obj_id_exists; + u32 res_size; + u32 obj_id; + int ret; + + mutex_lock(&virtvdev->state_mutex); + obj_id_exists = virtvdev->saving_migf && virtvdev->saving_migf->has_obj_id; + if (!obj_id_exists) { + ret = virtiovf_pci_alloc_obj_id(virtvdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET, + &obj_id); + if (ret) + goto end; + } else { + obj_id = virtvdev->saving_migf->obj_id; + } + + ret = virtio_pci_admin_dev_parts_metadata_get(virtvdev->core_device.pdev, + VIRTIO_RESOURCE_OBJ_DEV_PARTS, obj_id, + VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE, + &res_size); + if (!ret) + *stop_copy_length = res_size; + + /* + * We can't leave this obj_id alive if didn't exist before, otherwise, it might + * stay alive, even without an active migration flow (e.g. migration was cancelled) + */ + if (!obj_id_exists) + virtiovf_pci_free_obj_id(virtvdev, obj_id); +end: + virtiovf_state_mutex_unlock(virtvdev); + return ret; +} + +static const struct vfio_migration_ops virtvdev_pci_mig_ops = { + .migration_set_state = virtiovf_pci_set_device_state, + .migration_get_state = virtiovf_pci_get_device_state, + .migration_get_data_size = virtiovf_pci_get_data_size, +}; + +void virtiovf_set_migratable(struct virtiovf_pci_core_device *virtvdev) +{ + virtvdev->migrate_cap = 1; + mutex_init(&virtvdev->state_mutex); + spin_lock_init(&virtvdev->reset_lock); + virtvdev->core_device.vdev.migration_flags = + VFIO_MIGRATION_STOP_COPY | + VFIO_MIGRATION_P2P | + VFIO_MIGRATION_PRE_COPY; + virtvdev->core_device.vdev.mig_ops = &virtvdev_pci_mig_ops; +} + +void virtiovf_open_migration(struct virtiovf_pci_core_device *virtvdev) +{ + if (!virtvdev->migrate_cap) + return; + + virtvdev->mig_state = VFIO_DEVICE_STATE_RUNNING; +} + +void virtiovf_close_migration(struct virtiovf_pci_core_device *virtvdev) +{ + if (!virtvdev->migrate_cap) + return; + + virtiovf_disable_fds(virtvdev); +} diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index 8beecf23ec85..8cd01de27baf 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -48,6 +48,9 @@ struct virtio_pci_admin_vq { /* Protects virtqueue access. */ spinlock_t lock; u64 supported_cmds; + u64 supported_caps; + u8 max_dev_parts_objects; + struct ida dev_parts_ida; /* Name of the admin queue: avq.$vq_index. */ char name[10]; u16 vq_index; @@ -167,15 +170,27 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev); BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ) | \ BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO)) +#define VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP \ + (BIT_ULL(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DRIVER_CAP_SET) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_GET) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_SET) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DEV_MODE_SET)) + /* Unlike modern drivers which support hardware virtio devices, legacy drivers * assume software-based devices: e.g. they don't use proper memory barriers * on ARM, use big endian on PPC, etc. X86 drivers are mostly ok though, more * or less by chance. For now, only support legacy IO on X86. */ #ifdef CONFIG_VIRTIO_PCI_ADMIN_LEGACY -#define VIRTIO_ADMIN_CMD_BITMAP VIRTIO_LEGACY_ADMIN_CMD_BITMAP +#define VIRTIO_ADMIN_CMD_BITMAP (VIRTIO_LEGACY_ADMIN_CMD_BITMAP | \ + VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP) #else -#define VIRTIO_ADMIN_CMD_BITMAP 0 +#define VIRTIO_ADMIN_CMD_BITMAP VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP #endif bool vp_is_avq(struct virtio_device *vdev, unsigned int index); diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 4fbcbc7a9ae1..5eaade757860 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -15,6 +15,7 @@ */ #include <linux/delay.h> +#include <linux/virtio_pci_admin.h> #define VIRTIO_PCI_NO_LEGACY #define VIRTIO_RING_NO_LEGACY #include "virtio_pci_common.h" @@ -54,8 +55,10 @@ void vp_modern_avq_done(struct virtqueue *vq) spin_lock_irqsave(&admin_vq->lock, flags); do { virtqueue_disable_cb(vq); - while ((cmd = virtqueue_get_buf(vq, &len))) + while ((cmd = virtqueue_get_buf(vq, &len))) { + cmd->result_sg_size = len; complete(&cmd->completion); + } } while (!virtqueue_enable_cb(vq)); spin_unlock_irqrestore(&admin_vq->lock, flags); } @@ -218,12 +221,117 @@ end: kfree(data); } +static void +virtio_pci_admin_cmd_dev_parts_objects_enable(struct virtio_device *virtio_dev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev); + struct virtio_admin_cmd_cap_get_data *get_data; + struct virtio_admin_cmd_cap_set_data *set_data; + struct virtio_dev_parts_cap *result; + struct virtio_admin_cmd cmd = {}; + struct scatterlist result_sg; + struct scatterlist data_sg; + u8 resource_objects_limit; + u16 set_data_size; + int ret; + + get_data = kzalloc(sizeof(*get_data), GFP_KERNEL); + if (!get_data) + return; + + result = kzalloc(sizeof(*result), GFP_KERNEL); + if (!result) + goto end; + + get_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP); + sg_init_one(&data_sg, get_data, sizeof(*get_data)); + sg_init_one(&result_sg, result, sizeof(*result)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.data_sg = &data_sg; + cmd.result_sg = &result_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (ret) + goto err_get; + + set_data_size = sizeof(*set_data) + sizeof(*result); + set_data = kzalloc(set_data_size, GFP_KERNEL); + if (!set_data) + goto err_get; + + set_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP); + + /* Set the limit to the minimum value between the GET and SET values + * supported by the device. Since the obj_id for VIRTIO_DEV_PARTS_CAP + * is a globally unique value per PF, there is no possibility of + * overlap between GET and SET operations. + */ + resource_objects_limit = min(result->get_parts_resource_objects_limit, + result->set_parts_resource_objects_limit); + result->get_parts_resource_objects_limit = resource_objects_limit; + result->set_parts_resource_objects_limit = resource_objects_limit; + memcpy(set_data->cap_specific_data, result, sizeof(*result)); + sg_init_one(&data_sg, set_data, set_data_size); + cmd.data_sg = &data_sg; + cmd.result_sg = NULL; + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DRIVER_CAP_SET); + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (ret) + goto err_set; + + /* Allocate IDR to manage the dev caps objects */ + ida_init(&vp_dev->admin_vq.dev_parts_ida); + vp_dev->admin_vq.max_dev_parts_objects = resource_objects_limit; + +err_set: + kfree(set_data); +err_get: + kfree(result); +end: + kfree(get_data); +} + +static void virtio_pci_admin_cmd_cap_init(struct virtio_device *virtio_dev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev); + struct virtio_admin_cmd_query_cap_id_result *data; + struct virtio_admin_cmd cmd = {}; + struct scatterlist result_sg; + int ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return; + + sg_init_one(&result_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.result_sg = &result_sg; + + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (ret) + goto end; + + /* Max number of caps fits into a single u64 */ + BUILD_BUG_ON(sizeof(data->supported_caps) > sizeof(u64)); + + vp_dev->admin_vq.supported_caps = le64_to_cpu(data->supported_caps[0]); + + if (!(vp_dev->admin_vq.supported_caps & (1 << VIRTIO_DEV_PARTS_CAP))) + goto end; + + virtio_pci_admin_cmd_dev_parts_objects_enable(virtio_dev); +end: + kfree(data); +} + static void vp_modern_avq_activate(struct virtio_device *vdev) { if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) return; virtio_pci_admin_cmd_list_init(vdev); + virtio_pci_admin_cmd_cap_init(vdev); } static void vp_modern_avq_cleanup(struct virtio_device *vdev) @@ -758,6 +866,353 @@ static bool vp_get_shm_region(struct virtio_device *vdev, return true; } +/* + * virtio_pci_admin_has_dev_parts - Checks whether the device parts + * functionality is supported + * @pdev: VF pci_dev + * + * Returns true on success. + */ +bool virtio_pci_admin_has_dev_parts(struct pci_dev *pdev) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_pci_device *vp_dev; + + if (!virtio_dev) + return false; + + if (!virtio_has_feature(virtio_dev, VIRTIO_F_ADMIN_VQ)) + return false; + + vp_dev = to_vp_device(virtio_dev); + + if (!((vp_dev->admin_vq.supported_cmds & VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP) == + VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP)) + return false; + + return vp_dev->admin_vq.max_dev_parts_objects; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_has_dev_parts); + +/* + * virtio_pci_admin_mode_set - Sets the mode of a member device + * @pdev: VF pci_dev + * @flags: device mode's flags + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_mode_set(struct pci_dev *pdev, u8 flags) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_dev_mode_set_data *data; + struct virtio_admin_cmd cmd = {}; + struct scatterlist data_sg; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->flags = flags; + sg_init_one(&data_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_MODE_SET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_mode_set); + +/* + * virtio_pci_admin_obj_create - Creates an object for a given type and operation, + * following the max objects that can be created for that request. + * @pdev: VF pci_dev + * @obj_type: Object type + * @operation_type: Operation type + * @obj_id: Output unique object id + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_obj_create(struct pci_dev *pdev, u16 obj_type, u8 operation_type, + u32 *obj_id) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + u16 data_size = sizeof(struct virtio_admin_cmd_resource_obj_create_data); + struct virtio_admin_cmd_resource_obj_create_data *obj_create_data; + struct virtio_resource_obj_dev_parts obj_dev_parts = {}; + struct virtio_pci_admin_vq *avq; + struct virtio_admin_cmd cmd = {}; + struct scatterlist data_sg; + void *data; + int id = -1; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS) + return -EOPNOTSUPP; + + if (operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET && + operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_SET) + return -EINVAL; + + avq = &to_vp_device(virtio_dev)->admin_vq; + if (!avq->max_dev_parts_objects) + return -EOPNOTSUPP; + + id = ida_alloc_range(&avq->dev_parts_ida, 0, + avq->max_dev_parts_objects - 1, GFP_KERNEL); + if (id < 0) + return id; + + *obj_id = id; + data_size += sizeof(obj_dev_parts); + data = kzalloc(data_size, GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto end; + } + + obj_create_data = data; + obj_create_data->hdr.type = cpu_to_le16(obj_type); + obj_create_data->hdr.id = cpu_to_le32(*obj_id); + obj_dev_parts.type = operation_type; + memcpy(obj_create_data->resource_obj_specific_data, &obj_dev_parts, + sizeof(obj_dev_parts)); + sg_init_one(&data_sg, data, data_size); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + + kfree(data); +end: + if (ret) + ida_free(&avq->dev_parts_ida, id); + + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_create); + +/* + * virtio_pci_admin_obj_destroy - Destroys an object of a given type and id + * @pdev: VF pci_dev + * @obj_type: Object type + * @id: Object id + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_obj_destroy(struct pci_dev *pdev, u16 obj_type, u32 id) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_resource_obj_cmd_hdr *data; + struct virtio_pci_device *vp_dev; + struct virtio_admin_cmd cmd = {}; + struct scatterlist data_sg; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS) + return -EINVAL; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->type = cpu_to_le16(obj_type); + data->id = cpu_to_le32(id); + sg_init_one(&data_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (!ret) { + vp_dev = to_vp_device(virtio_dev); + ida_free(&vp_dev->admin_vq.dev_parts_ida, id); + } + + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_destroy); + +/* + * virtio_pci_admin_dev_parts_metadata_get - Gets the metadata of the device parts + * identified by the below attributes. + * @pdev: VF pci_dev + * @obj_type: Object type + * @id: Object id + * @metadata_type: Metadata type + * @out: Upon success holds the output for 'metadata type size' + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_dev_parts_metadata_get(struct pci_dev *pdev, u16 obj_type, + u32 id, u8 metadata_type, u32 *out) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_dev_parts_metadata_result *result; + struct virtio_admin_cmd_dev_parts_metadata_data *data; + struct scatterlist data_sg, result_sg; + struct virtio_admin_cmd cmd = {}; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + if (metadata_type != VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE) + return -EOPNOTSUPP; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + result = kzalloc(sizeof(*result), GFP_KERNEL); + if (!result) { + ret = -ENOMEM; + goto end; + } + + data->hdr.type = cpu_to_le16(obj_type); + data->hdr.id = cpu_to_le32(id); + data->type = metadata_type; + sg_init_one(&data_sg, data, sizeof(*data)); + sg_init_one(&result_sg, result, sizeof(*result)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + cmd.result_sg = &result_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (!ret) + *out = le32_to_cpu(result->parts_size.size); + + kfree(result); +end: + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_metadata_get); + +/* + * virtio_pci_admin_dev_parts_get - Gets the device parts identified by the below attributes. + * @pdev: VF pci_dev + * @obj_type: Object type + * @id: Object id + * @get_type: Get type + * @res_sg: Upon success holds the output result data + * @res_size: Upon success holds the output result size + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_dev_parts_get(struct pci_dev *pdev, u16 obj_type, u32 id, + u8 get_type, struct scatterlist *res_sg, + u32 *res_size) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_dev_parts_get_data *data; + struct scatterlist data_sg; + struct virtio_admin_cmd cmd = {}; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + if (get_type != VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_ALL) + return -EOPNOTSUPP; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->hdr.type = cpu_to_le16(obj_type); + data->hdr.id = cpu_to_le32(id); + data->type = get_type; + sg_init_one(&data_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_GET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + cmd.result_sg = res_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (!ret) + *res_size = cmd.result_sg_size; + + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_get); + +/* + * virtio_pci_admin_dev_parts_set - Sets the device parts identified by the below attributes. + * @pdev: VF pci_dev + * @data_sg: The device parts data, its layout follows struct virtio_admin_cmd_dev_parts_set_data + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_dev_parts_set(struct pci_dev *pdev, struct scatterlist *data_sg) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd cmd = {}; + int vf_id; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_SET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = data_sg; + return vp_modern_admin_cmd_exec(virtio_dev, &cmd); +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_set); + static const struct virtio_config_ops virtio_pci_config_nodev_ops = { .get = NULL, .set = NULL, diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 8167be01b400..82a7d2cbc704 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -223,15 +223,6 @@ struct vring_virtqueue { #endif }; -static struct virtqueue *__vring_new_virtqueue(unsigned int index, - struct vring_virtqueue_split *vring_split, - struct virtio_device *vdev, - bool weak_barriers, - bool context, - bool (*notify)(struct virtqueue *), - void (*callback)(struct virtqueue *), - const char *name, - struct device *dma_dev); static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num); static void vring_free(struct virtqueue *_vq); @@ -1135,6 +1126,64 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split, return 0; } +static struct virtqueue *__vring_new_virtqueue_split(unsigned int index, + struct vring_virtqueue_split *vring_split, + struct virtio_device *vdev, + bool weak_barriers, + bool context, + bool (*notify)(struct virtqueue *), + void (*callback)(struct virtqueue *), + const char *name, + struct device *dma_dev) +{ + struct vring_virtqueue *vq; + int err; + + vq = kmalloc(sizeof(*vq), GFP_KERNEL); + if (!vq) + return NULL; + + vq->packed_ring = false; + vq->vq.callback = callback; + vq->vq.vdev = vdev; + vq->vq.name = name; + vq->vq.index = index; + vq->vq.reset = false; + vq->we_own_ring = false; + vq->notify = notify; + vq->weak_barriers = weak_barriers; +#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION + vq->broken = true; +#else + vq->broken = false; +#endif + vq->dma_dev = dma_dev; + vq->use_dma_api = vring_use_dma_api(vdev); + + vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && + !context; + vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); + + if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) + vq->weak_barriers = false; + + err = vring_alloc_state_extra_split(vring_split); + if (err) { + kfree(vq); + return NULL; + } + + virtqueue_vring_init_split(vring_split, vq); + + virtqueue_init(vq, vring_split->vring.num); + virtqueue_vring_attach_split(vq, vring_split); + + spin_lock(&vdev->vqs_list_lock); + list_add_tail(&vq->vq.list, &vdev->vqs); + spin_unlock(&vdev->vqs_list_lock); + return &vq->vq; +} + static struct virtqueue *vring_create_virtqueue_split( unsigned int index, unsigned int num, @@ -1157,7 +1206,7 @@ static struct virtqueue *vring_create_virtqueue_split( if (err) return NULL; - vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers, + vq = __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers, context, notify, callback, name, dma_dev); if (!vq) { vring_free_split(&vring_split, vdev, dma_dev); @@ -2055,36 +2104,29 @@ static void virtqueue_reinit_packed(struct vring_virtqueue *vq) virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback); } -static struct virtqueue *vring_create_virtqueue_packed( - unsigned int index, - unsigned int num, - unsigned int vring_align, - struct virtio_device *vdev, - bool weak_barriers, - bool may_reduce_num, - bool context, - bool (*notify)(struct virtqueue *), - void (*callback)(struct virtqueue *), - const char *name, - struct device *dma_dev) +static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index, + struct vring_virtqueue_packed *vring_packed, + struct virtio_device *vdev, + bool weak_barriers, + bool context, + bool (*notify)(struct virtqueue *), + void (*callback)(struct virtqueue *), + const char *name, + struct device *dma_dev) { - struct vring_virtqueue_packed vring_packed = {}; struct vring_virtqueue *vq; int err; - if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev)) - goto err_ring; - vq = kmalloc(sizeof(*vq), GFP_KERNEL); if (!vq) - goto err_vq; + return NULL; vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; vq->vq.index = index; vq->vq.reset = false; - vq->we_own_ring = true; + vq->we_own_ring = false; vq->notify = notify; vq->weak_barriers = weak_barriers; #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION @@ -2103,26 +2145,52 @@ static struct virtqueue *vring_create_virtqueue_packed( if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) vq->weak_barriers = false; - err = vring_alloc_state_extra_packed(&vring_packed); - if (err) - goto err_state_extra; + err = vring_alloc_state_extra_packed(vring_packed); + if (err) { + kfree(vq); + return NULL; + } - virtqueue_vring_init_packed(&vring_packed, !!callback); + virtqueue_vring_init_packed(vring_packed, !!callback); - virtqueue_init(vq, num); - virtqueue_vring_attach_packed(vq, &vring_packed); + virtqueue_init(vq, vring_packed->vring.num); + virtqueue_vring_attach_packed(vq, vring_packed); spin_lock(&vdev->vqs_list_lock); list_add_tail(&vq->vq.list, &vdev->vqs); spin_unlock(&vdev->vqs_list_lock); return &vq->vq; +} -err_state_extra: - kfree(vq); -err_vq: - vring_free_packed(&vring_packed, vdev, dma_dev); -err_ring: - return NULL; +static struct virtqueue *vring_create_virtqueue_packed( + unsigned int index, + unsigned int num, + unsigned int vring_align, + struct virtio_device *vdev, + bool weak_barriers, + bool may_reduce_num, + bool context, + bool (*notify)(struct virtqueue *), + void (*callback)(struct virtqueue *), + const char *name, + struct device *dma_dev) +{ + struct vring_virtqueue_packed vring_packed = {}; + struct virtqueue *vq; + + if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev)) + return NULL; + + vq = __vring_new_virtqueue_packed(index, &vring_packed, vdev, weak_barriers, + context, notify, callback, name, dma_dev); + if (!vq) { + vring_free_packed(&vring_packed, vdev, dma_dev); + return NULL; + } + + to_vvq(vq)->we_own_ring = true; + + return vq; } static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num) @@ -2650,68 +2718,6 @@ irqreturn_t vring_interrupt(int irq, void *_vq) } EXPORT_SYMBOL_GPL(vring_interrupt); -/* Only available for split ring */ -static struct virtqueue *__vring_new_virtqueue(unsigned int index, - struct vring_virtqueue_split *vring_split, - struct virtio_device *vdev, - bool weak_barriers, - bool context, - bool (*notify)(struct virtqueue *), - void (*callback)(struct virtqueue *), - const char *name, - struct device *dma_dev) -{ - struct vring_virtqueue *vq; - int err; - - if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) - return NULL; - - vq = kmalloc(sizeof(*vq), GFP_KERNEL); - if (!vq) - return NULL; - - vq->packed_ring = false; - vq->vq.callback = callback; - vq->vq.vdev = vdev; - vq->vq.name = name; - vq->vq.index = index; - vq->vq.reset = false; - vq->we_own_ring = false; - vq->notify = notify; - vq->weak_barriers = weak_barriers; -#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION - vq->broken = true; -#else - vq->broken = false; -#endif - vq->dma_dev = dma_dev; - vq->use_dma_api = vring_use_dma_api(vdev); - - vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && - !context; - vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); - - if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) - vq->weak_barriers = false; - - err = vring_alloc_state_extra_split(vring_split); - if (err) { - kfree(vq); - return NULL; - } - - virtqueue_vring_init_split(vring_split, vq); - - virtqueue_init(vq, vring_split->vring.num); - virtqueue_vring_attach_split(vq, vring_split); - - spin_lock(&vdev->vqs_list_lock); - list_add_tail(&vq->vq.list, &vdev->vqs); - spin_unlock(&vdev->vqs_list_lock); - return &vq->vq; -} - struct virtqueue *vring_create_virtqueue( unsigned int index, unsigned int num, @@ -2846,7 +2852,6 @@ int virtqueue_reset(struct virtqueue *_vq, } EXPORT_SYMBOL_GPL(virtqueue_reset); -/* Only available for split ring */ struct virtqueue *vring_new_virtqueue(unsigned int index, unsigned int num, unsigned int vring_align, @@ -2860,11 +2865,19 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, { struct vring_virtqueue_split vring_split = {}; - if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) - return NULL; + if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + struct vring_virtqueue_packed vring_packed = {}; + + vring_packed.vring.num = num; + vring_packed.vring.desc = pages; + return __vring_new_virtqueue_packed(index, &vring_packed, + vdev, weak_barriers, + context, notify, callback, + name, vdev->dev.parent); + } vring_init(&vring_split.vring, num, pages, vring_align); - return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers, + return __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers, context, notify, callback, name, vdev->dev.parent); } diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c index 7364bd53e38d..1f60c9d5cb18 100644 --- a/drivers/virtio/virtio_vdpa.c +++ b/drivers/virtio/virtio_vdpa.c @@ -364,14 +364,13 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev); struct vdpa_device *vdpa = vd_get_vdpa(vdev); const struct vdpa_config_ops *ops = vdpa->config; - struct irq_affinity default_affd = { 0 }; struct cpumask *masks; struct vdpa_callback cb; bool has_affinity = desc && ops->set_vq_affinity; int i, err, queue_idx = 0; if (has_affinity) { - masks = create_affinity_masks(nvqs, desc ? desc : &default_affd); + masks = create_affinity_masks(nvqs, desc); if (!masks) return -ENOMEM; } diff --git a/fs/backing-file.c b/fs/backing-file.c index 526ddb4d6f76..cbdad8b68474 100644 --- a/fs/backing-file.c +++ b/fs/backing-file.c @@ -327,6 +327,7 @@ int backing_file_mmap(struct file *file, struct vm_area_struct *vma, struct backing_file_ctx *ctx) { const struct cred *old_cred; + struct file *user_file = vma->vm_file; int ret; if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING))) @@ -342,7 +343,7 @@ int backing_file_mmap(struct file *file, struct vm_area_struct *vma, revert_creds_light(old_cred); if (ctx->accessed) - ctx->accessed(vma->vm_file); + ctx->accessed(user_file); return ret; } diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 3039a6b7aba4..106f0e8af177 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1257,6 +1257,7 @@ out_free_interp: } reloc_func_desc = interp_load_addr; + allow_write_access(interpreter); fput(interpreter); kfree(interp_elf_ex); @@ -1353,6 +1354,7 @@ out_free_dentry: kfree(interp_elf_ex); kfree(interp_elf_phdata); out_free_file: + allow_write_access(interpreter); if (interpreter) fput(interpreter); out_free_ph: diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 31d253bd3961..f1a7c4875c4a 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -394,6 +394,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm) goto error; } + allow_write_access(interpreter); fput(interpreter); interpreter = NULL; } @@ -465,8 +466,10 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm) retval = 0; error: - if (interpreter) + if (interpreter) { + allow_write_access(interpreter); fput(interpreter); + } kfree(interpreter_name); kfree(exec_params.phdrs); kfree(exec_params.loadmap); diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 31660d8cc2c6..6a3a16f91051 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -247,10 +247,13 @@ static int load_misc_binary(struct linux_binprm *bprm) if (retval < 0) goto ret; - if (fmt->flags & MISC_FMT_OPEN_FILE) + if (fmt->flags & MISC_FMT_OPEN_FILE) { interp_file = file_clone_open(fmt->interp_file); - else + if (!IS_ERR(interp_file)) + deny_write_access(interp_file); + } else { interp_file = open_exec(fmt->interpreter); + } retval = PTR_ERR(interp_file); if (IS_ERR(interp_file)) goto ret; diff --git a/fs/exec.c b/fs/exec.c index da51ca70489a..98cb7ba9983c 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -883,7 +883,8 @@ EXPORT_SYMBOL(transfer_args_to_stack); */ static struct file *do_open_execat(int fd, struct filename *name, int flags) { - struct file *file; + int err; + struct file *file __free(fput) = NULL; struct open_flags open_exec_flags = { .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC, .acc_mode = MAY_EXEC, @@ -908,12 +909,14 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags) * an invariant that all non-regular files error out before we get here. */ if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) || - path_noexec(&file->f_path)) { - fput(file); + path_noexec(&file->f_path)) return ERR_PTR(-EACCES); - } - return file; + err = deny_write_access(file); + if (err) + return ERR_PTR(err); + + return no_free_ptr(file); } /** @@ -923,7 +926,8 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags) * * Returns ERR_PTR on failure or allocated struct file on success. * - * As this is a wrapper for the internal do_open_execat(). Also see + * As this is a wrapper for the internal do_open_execat(), callers + * must call allow_write_access() before fput() on release. Also see * do_close_execat(). */ struct file *open_exec(const char *name) @@ -1465,8 +1469,10 @@ static int prepare_bprm_creds(struct linux_binprm *bprm) /* Matches do_open_execat() */ static void do_close_execat(struct file *file) { - if (file) - fput(file); + if (!file) + return; + allow_write_access(file); + fput(file); } static void free_bprm(struct linux_binprm *bprm) @@ -1791,6 +1797,7 @@ static int exec_binprm(struct linux_binprm *bprm) bprm->file = bprm->interpreter; bprm->interpreter = NULL; + allow_write_access(exec); if (unlikely(bprm->have_execfd)) { if (bprm->executable) { fput(exec); diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index 7446bf09a04a..fe0a9b8a0cd0 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -82,11 +82,8 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent if (ei->type != TYPE_DIR) return -EPERM; - if (ei->entry == -1) - exfat_chain_set(&dir, sbi->root_dir, 0, ALLOC_FAT_CHAIN); - else - exfat_chain_set(&dir, ei->start_clu, - EXFAT_B_TO_CLU(i_size_read(inode), sbi), ei->flags); + exfat_chain_set(&dir, ei->start_clu, + EXFAT_B_TO_CLU(i_size_read(inode), sbi), ei->flags); dentries_per_clu = sbi->dentries_per_clu; max_dentries = (unsigned int)min_t(u64, MAX_EXFAT_DENTRIES, @@ -135,21 +132,6 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent num_ext = ep->dentry.file.num_ext; dir_entry->attr = le16_to_cpu(ep->dentry.file.attr); - exfat_get_entry_time(sbi, &dir_entry->crtime, - ep->dentry.file.create_tz, - ep->dentry.file.create_time, - ep->dentry.file.create_date, - ep->dentry.file.create_time_cs); - exfat_get_entry_time(sbi, &dir_entry->mtime, - ep->dentry.file.modify_tz, - ep->dentry.file.modify_time, - ep->dentry.file.modify_date, - ep->dentry.file.modify_time_cs); - exfat_get_entry_time(sbi, &dir_entry->atime, - ep->dentry.file.access_tz, - ep->dentry.file.access_time, - ep->dentry.file.access_date, - 0); *uni_name.name = 0x0; err = exfat_get_uniname_from_ext_entry(sb, &clu, i, @@ -166,9 +148,8 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent ep = exfat_get_dentry(sb, &clu, i + 1, &bh); if (!ep) return -EIO; - dir_entry->size = - le64_to_cpu(ep->dentry.stream.valid_size); - dir_entry->entry = dentry; + dir_entry->entry = i; + dir_entry->dir = clu; brelse(bh); ei->hint_bmap.off = EXFAT_DEN_TO_CLU(dentry, sbi); @@ -276,7 +257,7 @@ get_new: if (!nb->lfn[0]) goto end_of_dir; - i_pos = ((loff_t)ei->start_clu << 32) | (de.entry & 0xffffffff); + i_pos = ((loff_t)de.dir.dir << 32) | (de.entry & 0xffffffff); tmp = exfat_iget(sb, i_pos); if (tmp) { inum = tmp->i_ino; diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index 3cdc1de362a9..78be6964a8a0 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -204,7 +204,9 @@ struct exfat_entry_set_cache { #define IS_DYNAMIC_ES(es) ((es)->__bh != (es)->bh) struct exfat_dir_entry { + /* the cluster where file dentry is located */ struct exfat_chain dir; + /* the index of file dentry in ->dir */ int entry; unsigned int type; unsigned int start_clu; @@ -290,7 +292,9 @@ struct exfat_sb_info { * EXFAT file system inode in-memory data */ struct exfat_inode_info { + /* the cluster where file dentry is located */ struct exfat_chain dir; + /* the index of file dentry in ->dir */ int entry; unsigned int type; unsigned short attr; @@ -508,6 +512,8 @@ struct exfat_dentry *exfat_get_dentry_cached(struct exfat_entry_set_cache *es, int exfat_get_dentry_set(struct exfat_entry_set_cache *es, struct super_block *sb, struct exfat_chain *p_dir, int entry, unsigned int num_entries); +#define exfat_get_dentry_set_by_ei(es, sb, ei) \ + exfat_get_dentry_set(es, sb, &(ei)->dir, (ei)->entry, ES_ALL_ENTRIES) int exfat_get_empty_dentry_set(struct exfat_entry_set_cache *es, struct super_block *sb, struct exfat_chain *p_dir, int entry, unsigned int num_entries); diff --git a/fs/exfat/file.c b/fs/exfat/file.c index a25d7eb789f4..fb38769c3e39 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -584,6 +584,16 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter) if (ret < 0) goto unlock; + if (iocb->ki_flags & IOCB_DIRECT) { + unsigned long align = pos | iov_iter_alignment(iter); + + if (!IS_ALIGNED(align, i_blocksize(inode)) && + !IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev))) { + ret = -EINVAL; + goto unlock; + } + } + if (pos > valid_size) { ret = exfat_extend_valid_size(file, pos); if (ret < 0 && ret != -ENOSPC) { diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index d724de8f57bf..96952d4acb50 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -43,7 +43,7 @@ int __exfat_write_inode(struct inode *inode, int sync) exfat_set_volume_dirty(sb); /* get the directory entry of given file or directory */ - if (exfat_get_dentry_set(&es, sb, &(ei->dir), ei->entry, ES_ALL_ENTRIES)) + if (exfat_get_dentry_set_by_ei(&es, sb, ei)) return -EIO; ep = exfat_get_dentry_cached(&es, ES_IDX_FILE); ep2 = exfat_get_dentry_cached(&es, ES_IDX_STREAM); diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index 2c4c44229352..97d2774760fe 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -288,8 +288,22 @@ static int exfat_check_max_dentries(struct inode *inode) return 0; } -/* find empty directory entry. - * if there isn't any empty slot, expand cluster chain. +/* + * Find an empty directory entry set. + * + * If there isn't any empty slot, expand cluster chain. + * + * in: + * inode: inode of the parent directory + * num_entries: specifies how many dentries in the empty directory entry set + * + * out: + * p_dir: the cluster where the empty directory entry set is located + * es: The found empty directory entry set + * + * return: + * the directory entry index in p_dir is returned on succeeds + * -error code is returned on failure */ static int exfat_find_empty_entry(struct inode *inode, struct exfat_chain *p_dir, int num_entries, @@ -311,6 +325,9 @@ static int exfat_find_empty_entry(struct inode *inode, ei->hint_femp.eidx = EXFAT_HINT_NONE; } + exfat_chain_set(p_dir, ei->start_clu, + EXFAT_B_TO_CLU(i_size_read(inode), sbi), ei->flags); + while ((dentry = exfat_search_empty_slot(sb, &hint_femp, p_dir, num_entries, es)) < 0) { if (dentry == -EIO) @@ -345,6 +362,7 @@ static int exfat_find_empty_entry(struct inode *inode, if (ei->start_clu == EXFAT_EOF_CLUSTER) { ei->start_clu = clu.dir; p_dir->dir = clu.dir; + hint_femp.eidx = 0; } /* append to the FAT chain */ @@ -377,7 +395,10 @@ static int exfat_find_empty_entry(struct inode *inode, inode->i_blocks += sbi->cluster_size >> 9; } - return dentry; + p_dir->dir = exfat_sector_to_cluster(sbi, es->bh[0]->b_blocknr); + p_dir->size -= dentry / sbi->dentries_per_clu; + + return dentry & (sbi->dentries_per_clu - 1); } /* @@ -385,14 +406,11 @@ static int exfat_find_empty_entry(struct inode *inode, * Zero if it was successful; otherwise nonzero. */ static int __exfat_resolve_path(struct inode *inode, const unsigned char *path, - struct exfat_chain *p_dir, struct exfat_uni_name *p_uniname, - int lookup) + struct exfat_uni_name *p_uniname, int lookup) { int namelen; int lossy = NLS_NAME_NO_LOSSY; struct super_block *sb = inode->i_sb; - struct exfat_sb_info *sbi = EXFAT_SB(sb); - struct exfat_inode_info *ei = EXFAT_I(inode); int pathlen = strlen(path); /* @@ -431,24 +449,19 @@ static int __exfat_resolve_path(struct inode *inode, const unsigned char *path, if ((lossy && !lookup) || !namelen) return (lossy & NLS_NAME_OVERLEN) ? -ENAMETOOLONG : -EINVAL; - exfat_chain_set(p_dir, ei->start_clu, - EXFAT_B_TO_CLU(i_size_read(inode), sbi), ei->flags); - return 0; } static inline int exfat_resolve_path(struct inode *inode, - const unsigned char *path, struct exfat_chain *dir, - struct exfat_uni_name *uni) + const unsigned char *path, struct exfat_uni_name *uni) { - return __exfat_resolve_path(inode, path, dir, uni, 0); + return __exfat_resolve_path(inode, path, uni, 0); } static inline int exfat_resolve_path_for_lookup(struct inode *inode, - const unsigned char *path, struct exfat_chain *dir, - struct exfat_uni_name *uni) + const unsigned char *path, struct exfat_uni_name *uni) { - return __exfat_resolve_path(inode, path, dir, uni, 1); + return __exfat_resolve_path(inode, path, uni, 1); } static inline loff_t exfat_make_i_pos(struct exfat_dir_entry *info) @@ -457,8 +470,7 @@ static inline loff_t exfat_make_i_pos(struct exfat_dir_entry *info) } static int exfat_add_entry(struct inode *inode, const char *path, - struct exfat_chain *p_dir, unsigned int type, - struct exfat_dir_entry *info) + unsigned int type, struct exfat_dir_entry *info) { int ret, dentry, num_entries; struct super_block *sb = inode->i_sb; @@ -470,7 +482,7 @@ static int exfat_add_entry(struct inode *inode, const char *path, int clu_size = 0; unsigned int start_clu = EXFAT_FREE_CLUSTER; - ret = exfat_resolve_path(inode, path, p_dir, &uniname); + ret = exfat_resolve_path(inode, path, &uniname); if (ret) goto out; @@ -481,7 +493,7 @@ static int exfat_add_entry(struct inode *inode, const char *path, } /* exfat_find_empty_entry must be called before alloc_cluster() */ - dentry = exfat_find_empty_entry(inode, p_dir, num_entries, &es); + dentry = exfat_find_empty_entry(inode, &info->dir, num_entries, &es); if (dentry < 0) { ret = dentry; /* -EIO or -ENOSPC */ goto out; @@ -508,7 +520,6 @@ static int exfat_add_entry(struct inode *inode, const char *path, if (ret) goto out; - info->dir = *p_dir; info->entry = dentry; info->flags = ALLOC_NO_FAT_CHAIN; info->type = type; @@ -541,7 +552,6 @@ static int exfat_create(struct mnt_idmap *idmap, struct inode *dir, { struct super_block *sb = dir->i_sb; struct inode *inode; - struct exfat_chain cdir; struct exfat_dir_entry info; loff_t i_pos; int err; @@ -552,8 +562,7 @@ static int exfat_create(struct mnt_idmap *idmap, struct inode *dir, mutex_lock(&EXFAT_SB(sb)->s_lock); exfat_set_volume_dirty(sb); - err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_FILE, - &info); + err = exfat_add_entry(dir, dentry->d_name.name, TYPE_FILE, &info); if (err) goto unlock; @@ -601,10 +610,13 @@ static int exfat_find(struct inode *dir, struct qstr *qname, return -ENOENT; /* check the validity of directory name in the given pathname */ - ret = exfat_resolve_path_for_lookup(dir, qname->name, &cdir, &uni_name); + ret = exfat_resolve_path_for_lookup(dir, qname->name, &uni_name); if (ret) return ret; + exfat_chain_set(&cdir, ei->start_clu, + EXFAT_B_TO_CLU(i_size_read(dir), sbi), ei->flags); + /* check the validation of hint_stat and initialize it if required */ if (ei->version != (inode_peek_iversion_raw(dir) & 0xffffffff)) { ei->hint_stat.clu = cdir.dir; @@ -618,15 +630,16 @@ static int exfat_find(struct inode *dir, struct qstr *qname, if (dentry < 0) return dentry; /* -error value */ - info->dir = cdir; - info->entry = dentry; - info->num_subdirs = 0; - /* adjust cdir to the optimized value */ cdir.dir = hint_opt.clu; if (cdir.flags & ALLOC_NO_FAT_CHAIN) cdir.size -= dentry / sbi->dentries_per_clu; dentry = hint_opt.eidx; + + info->dir = cdir; + info->entry = dentry; + info->num_subdirs = 0; + if (exfat_get_dentry_set(&es, sb, &cdir, dentry, ES_2_ENTRIES)) return -EIO; ep = exfat_get_dentry_cached(&es, ES_IDX_FILE); @@ -637,14 +650,26 @@ static int exfat_find(struct inode *dir, struct qstr *qname, info->size = le64_to_cpu(ep2->dentry.stream.valid_size); info->valid_size = le64_to_cpu(ep2->dentry.stream.valid_size); info->size = le64_to_cpu(ep2->dentry.stream.size); + + info->start_clu = le32_to_cpu(ep2->dentry.stream.start_clu); + if (!is_valid_cluster(sbi, info->start_clu) && info->size) { + exfat_warn(sb, "start_clu is invalid cluster(0x%x)", + info->start_clu); + info->size = 0; + info->valid_size = 0; + } + + if (info->valid_size > info->size) { + exfat_warn(sb, "valid_size(%lld) is greater than size(%lld)", + info->valid_size, info->size); + info->valid_size = info->size; + } + if (info->size == 0) { info->flags = ALLOC_NO_FAT_CHAIN; info->start_clu = EXFAT_EOF_CLUSTER; - } else { + } else info->flags = ep2->dentry.stream.flags; - info->start_clu = - le32_to_cpu(ep2->dentry.stream.start_clu); - } exfat_get_entry_time(sbi, &info->crtime, ep->dentry.file.create_tz, @@ -766,26 +791,23 @@ unlock: /* remove an entry, BUT don't truncate */ static int exfat_unlink(struct inode *dir, struct dentry *dentry) { - struct exfat_chain cdir; struct super_block *sb = dir->i_sb; struct inode *inode = dentry->d_inode; struct exfat_inode_info *ei = EXFAT_I(inode); struct exfat_entry_set_cache es; - int entry, err = 0; + int err = 0; if (unlikely(exfat_forced_shutdown(sb))) return -EIO; mutex_lock(&EXFAT_SB(sb)->s_lock); - exfat_chain_dup(&cdir, &ei->dir); - entry = ei->entry; if (ei->dir.dir == DIR_DELETED) { exfat_err(sb, "abnormal access to deleted dentry"); err = -ENOENT; goto unlock; } - err = exfat_get_dentry_set(&es, sb, &cdir, entry, ES_ALL_ENTRIES); + err = exfat_get_dentry_set_by_ei(&es, sb, ei); if (err) { err = -EIO; goto unlock; @@ -824,7 +846,6 @@ static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct super_block *sb = dir->i_sb; struct inode *inode; struct exfat_dir_entry info; - struct exfat_chain cdir; loff_t i_pos; int err; loff_t size = i_size_read(dir); @@ -834,8 +855,7 @@ static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir, mutex_lock(&EXFAT_SB(sb)->s_lock); exfat_set_volume_dirty(sb); - err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_DIR, - &info); + err = exfat_add_entry(dir, dentry->d_name.name, TYPE_DIR, &info); if (err) goto unlock; @@ -915,21 +935,18 @@ static int exfat_check_dir_empty(struct super_block *sb, static int exfat_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; - struct exfat_chain cdir, clu_to_free; + struct exfat_chain clu_to_free; struct super_block *sb = inode->i_sb; struct exfat_sb_info *sbi = EXFAT_SB(sb); struct exfat_inode_info *ei = EXFAT_I(inode); struct exfat_entry_set_cache es; - int entry, err; + int err; if (unlikely(exfat_forced_shutdown(sb))) return -EIO; mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock); - exfat_chain_dup(&cdir, &ei->dir); - entry = ei->entry; - if (ei->dir.dir == DIR_DELETED) { exfat_err(sb, "abnormal access to deleted dentry"); err = -ENOENT; @@ -947,7 +964,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry) goto unlock; } - err = exfat_get_dentry_set(&es, sb, &cdir, entry, ES_ALL_ENTRIES); + err = exfat_get_dentry_set_by_ei(&es, sb, ei); if (err) { err = -EIO; goto unlock; @@ -982,15 +999,14 @@ unlock: return err; } -static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir, - int oldentry, struct exfat_uni_name *p_uniname, - struct exfat_inode_info *ei) +static int exfat_rename_file(struct inode *parent_inode, + struct exfat_uni_name *p_uniname, struct exfat_inode_info *ei) { int ret, num_new_entries; struct exfat_dentry *epold, *epnew; - struct super_block *sb = inode->i_sb; + struct super_block *sb = parent_inode->i_sb; struct exfat_entry_set_cache old_es, new_es; - int sync = IS_DIRSYNC(inode); + int sync = IS_DIRSYNC(parent_inode); if (unlikely(exfat_forced_shutdown(sb))) return -EIO; @@ -999,7 +1015,7 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir, if (num_new_entries < 0) return num_new_entries; - ret = exfat_get_dentry_set(&old_es, sb, p_dir, oldentry, ES_ALL_ENTRIES); + ret = exfat_get_dentry_set_by_ei(&old_es, sb, ei); if (ret) { ret = -EIO; return ret; @@ -1009,9 +1025,10 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir, if (old_es.num_entries < num_new_entries) { int newentry; + struct exfat_chain dir; - newentry = exfat_find_empty_entry(inode, p_dir, num_new_entries, - &new_es); + newentry = exfat_find_empty_entry(parent_inode, &dir, + num_new_entries, &new_es); if (newentry < 0) { ret = newentry; /* -EIO or -ENOSPC */ goto put_old_es; @@ -1034,8 +1051,8 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir, if (ret) goto put_old_es; - exfat_remove_entries(inode, &old_es, ES_IDX_FILE); - ei->dir = *p_dir; + exfat_remove_entries(parent_inode, &old_es, ES_IDX_FILE); + ei->dir = dir; ei->entry = newentry; } else { if (exfat_get_entry_type(epold) == TYPE_FILE) { @@ -1043,7 +1060,7 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir, ei->attr |= EXFAT_ATTR_ARCHIVE; } - exfat_remove_entries(inode, &old_es, ES_IDX_FIRST_FILENAME + 1); + exfat_remove_entries(parent_inode, &old_es, ES_IDX_FIRST_FILENAME + 1); exfat_init_ext_entry(&old_es, num_new_entries, p_uniname); } return exfat_put_dentry_set(&old_es, sync); @@ -1053,26 +1070,24 @@ put_old_es: return ret; } -static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir, - int oldentry, struct exfat_chain *p_newdir, +static int exfat_move_file(struct inode *parent_inode, struct exfat_uni_name *p_uniname, struct exfat_inode_info *ei) { int ret, newentry, num_new_entries; struct exfat_dentry *epmov, *epnew; - struct super_block *sb = inode->i_sb; struct exfat_entry_set_cache mov_es, new_es; + struct exfat_chain newdir; num_new_entries = exfat_calc_num_entries(p_uniname); if (num_new_entries < 0) return num_new_entries; - ret = exfat_get_dentry_set(&mov_es, sb, p_olddir, oldentry, - ES_ALL_ENTRIES); + ret = exfat_get_dentry_set_by_ei(&mov_es, parent_inode->i_sb, ei); if (ret) return -EIO; - newentry = exfat_find_empty_entry(inode, p_newdir, num_new_entries, - &new_es); + newentry = exfat_find_empty_entry(parent_inode, &newdir, + num_new_entries, &new_es); if (newentry < 0) { ret = newentry; /* -EIO or -ENOSPC */ goto put_mov_es; @@ -1091,18 +1106,16 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir, *epnew = *epmov; exfat_init_ext_entry(&new_es, num_new_entries, p_uniname); - exfat_remove_entries(inode, &mov_es, ES_IDX_FILE); - - exfat_chain_set(&ei->dir, p_newdir->dir, p_newdir->size, - p_newdir->flags); + exfat_remove_entries(parent_inode, &mov_es, ES_IDX_FILE); + ei->dir = newdir; ei->entry = newentry; - ret = exfat_put_dentry_set(&new_es, IS_DIRSYNC(inode)); + ret = exfat_put_dentry_set(&new_es, IS_DIRSYNC(parent_inode)); if (ret) goto put_mov_es; - return exfat_put_dentry_set(&mov_es, IS_DIRSYNC(inode)); + return exfat_put_dentry_set(&mov_es, IS_DIRSYNC(parent_inode)); put_mov_es: exfat_put_dentry_set(&mov_es, false); @@ -1116,19 +1129,12 @@ static int __exfat_rename(struct inode *old_parent_inode, struct dentry *new_dentry) { int ret; - int dentry; - struct exfat_chain olddir, newdir; - struct exfat_chain *p_dir = NULL; struct exfat_uni_name uni_name; - struct exfat_dentry *ep; struct super_block *sb = old_parent_inode->i_sb; struct exfat_sb_info *sbi = EXFAT_SB(sb); const unsigned char *new_path = new_dentry->d_name.name; struct inode *new_inode = new_dentry->d_inode; struct exfat_inode_info *new_ei = NULL; - unsigned int new_entry_type = TYPE_UNUSED; - int new_entry = 0; - struct buffer_head *new_bh = NULL; /* check the validity of pointer parameters */ if (new_path == NULL || strlen(new_path) == 0) @@ -1139,11 +1145,6 @@ static int __exfat_rename(struct inode *old_parent_inode, return -ENOENT; } - exfat_chain_set(&olddir, EXFAT_I(old_parent_inode)->start_clu, - EXFAT_B_TO_CLU_ROUND_UP(i_size_read(old_parent_inode), sbi), - EXFAT_I(old_parent_inode)->flags); - dentry = ei->entry; - /* check whether new dir is existing directory and empty */ if (new_inode) { ret = -EIO; @@ -1154,17 +1155,8 @@ static int __exfat_rename(struct inode *old_parent_inode, goto out; } - p_dir = &(new_ei->dir); - new_entry = new_ei->entry; - ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh); - if (!ep) - goto out; - - new_entry_type = exfat_get_entry_type(ep); - brelse(new_bh); - /* if new_inode exists, update ei */ - if (new_entry_type == TYPE_DIR) { + if (S_ISDIR(new_inode->i_mode)) { struct exfat_chain new_clu; new_clu.dir = new_ei->start_clu; @@ -1180,26 +1172,22 @@ static int __exfat_rename(struct inode *old_parent_inode, } /* check the validity of directory name in the given new pathname */ - ret = exfat_resolve_path(new_parent_inode, new_path, &newdir, - &uni_name); + ret = exfat_resolve_path(new_parent_inode, new_path, &uni_name); if (ret) goto out; exfat_set_volume_dirty(sb); - if (olddir.dir == newdir.dir) - ret = exfat_rename_file(new_parent_inode, &olddir, dentry, - &uni_name, ei); + if (new_parent_inode == old_parent_inode) + ret = exfat_rename_file(new_parent_inode, &uni_name, ei); else - ret = exfat_move_file(new_parent_inode, &olddir, dentry, - &newdir, &uni_name, ei); + ret = exfat_move_file(new_parent_inode, &uni_name, ei); if (!ret && new_inode) { struct exfat_entry_set_cache es; /* delete entries of new_dir */ - ret = exfat_get_dentry_set(&es, sb, p_dir, new_entry, - ES_ALL_ENTRIES); + ret = exfat_get_dentry_set_by_ei(&es, sb, new_ei); if (ret) { ret = -EIO; goto del_out; @@ -1212,7 +1200,7 @@ static int __exfat_rename(struct inode *old_parent_inode, goto del_out; /* Free the clusters if new_inode is a dir(as if exfat_rmdir) */ - if (new_entry_type == TYPE_DIR && + if (S_ISDIR(new_inode->i_mode) && new_ei->start_clu != EXFAT_EOF_CLUSTER) { /* new_ei, new_clu_to_free */ struct exfat_chain new_clu_to_free; diff --git a/fs/fcntl.c b/fs/fcntl.c index ac77dd912412..49884fa3c81d 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -374,6 +374,9 @@ static long fcntl_set_rw_hint(struct file *file, unsigned int cmd, u64 __user *argp = (u64 __user *)arg; u64 hint; + if (!inode_owner_or_capable(file_mnt_idmap(file), inode)) + return -EPERM; + if (copy_from_user(&hint, argp, sizeof(hint))) return -EFAULT; if (!rw_hint_valid(hint)) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index d88d3fc5306a..82afe78ec542 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -243,7 +243,7 @@ static ssize_t cpu_list_show(struct kobject *kobj, qid = fsvq->vq->index; for (cpu = 0; cpu < nr_cpu_ids; cpu++) { - if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid - VQ_REQUEST)) { + if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid)) { if (first) ret = snprintf(buf + pos, size - pos, "%u", cpu); else @@ -522,6 +522,7 @@ static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs) return -EINVAL; } + dev_info(&vdev->dev, "discovered new tag: %s\n", fs->tag); return 0; } @@ -875,23 +876,23 @@ static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *f goto fallback; for_each_cpu(cpu, mask) - fs->mq_map[cpu] = q; + fs->mq_map[cpu] = q + VQ_REQUEST; } return; fallback: /* Attempt to map evenly in groups over the CPUs */ masks = group_cpus_evenly(fs->num_request_queues); - /* If even this fails we default to all CPUs use queue zero */ + /* If even this fails we default to all CPUs use first request queue */ if (!masks) { for_each_possible_cpu(cpu) - fs->mq_map[cpu] = 0; + fs->mq_map[cpu] = VQ_REQUEST; return; } for (q = 0; q < fs->num_request_queues; q++) { for_each_cpu(cpu, &masks[q]) - fs->mq_map[cpu] = q; + fs->mq_map[cpu] = q + VQ_REQUEST; } kfree(masks); } @@ -1487,7 +1488,7 @@ static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req) clear_bit(FR_PENDING, &req->flags); fs = fiq->priv; - queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()]; + queue_id = fs->mq_map[raw_smp_processor_id()]; pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u queue_id %u\n", __func__, req->in.h.opcode, req->in.h.unique, diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index d42f01e0fc1c..955f19e27e47 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1350,40 +1350,12 @@ static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i) return filemap_write_and_wait_range(mapping, i->pos, end); } -static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero, - bool *range_dirty) +static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) { - const struct iomap *srcmap = iomap_iter_srcmap(iter); loff_t pos = iter->pos; loff_t length = iomap_length(iter); loff_t written = 0; - /* - * We must zero subranges of unwritten mappings that might be dirty in - * pagecache from previous writes. We only know whether the entire range - * was clean or not, however, and dirty folios may have been written - * back or reclaimed at any point after mapping lookup. - * - * The easiest way to deal with this is to flush pagecache to trigger - * any pending unwritten conversions and then grab the updated extents - * from the fs. The flush may change the current mapping, so mark it - * stale for the iterator to remap it for the next pass to handle - * properly. - * - * Note that holes are treated the same as unwritten because zero range - * is (ab)used for partial folio zeroing in some cases. Hole backed - * post-eof ranges can be dirtied via mapped write and the flush - * triggers writeback time post-eof zeroing. - */ - if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) { - if (*range_dirty) { - *range_dirty = false; - return iomap_zero_iter_flush_and_stale(iter); - } - /* range is clean and already zeroed, nothing to do */ - return length; - } - do { struct folio *folio; int status; @@ -1397,6 +1369,8 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero, if (iter->iomap.flags & IOMAP_F_STALE) break; + /* warn about zeroing folios beyond eof that won't write back */ + WARN_ON_ONCE(folio_pos(folio) > iter->inode->i_size); offset = offset_in_folio(folio, pos); if (bytes > folio_size(folio) - offset) bytes = folio_size(folio) - offset; @@ -1429,28 +1403,58 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, .len = len, .flags = IOMAP_ZERO, }; + struct address_space *mapping = inode->i_mapping; + unsigned int blocksize = i_blocksize(inode); + unsigned int off = pos & (blocksize - 1); + loff_t plen = min_t(loff_t, len, blocksize - off); int ret; bool range_dirty; /* - * Zero range wants to skip pre-zeroed (i.e. unwritten) mappings, but - * pagecache must be flushed to ensure stale data from previous - * buffered writes is not exposed. A flush is only required for certain - * types of mappings, but checking pagecache after mapping lookup is - * racy with writeback and reclaim. + * Zero range can skip mappings that are zero on disk so long as + * pagecache is clean. If pagecache was dirty prior to zero range, the + * mapping converts on writeback completion and so must be zeroed. * - * Therefore, check the entire range first and pass along whether any - * part of it is dirty. If so and an underlying mapping warrants it, - * flush the cache at that point. This trades off the occasional false - * positive (and spurious flush, if the dirty data and mapping don't - * happen to overlap) for simplicity in handling a relatively uncommon - * situation. + * The simplest way to deal with this across a range is to flush + * pagecache and process the updated mappings. To avoid excessive + * flushing on partial eof zeroing, special case it to zero the + * unaligned start portion if already dirty in pagecache. + */ + if (off && + filemap_range_needs_writeback(mapping, pos, pos + plen - 1)) { + iter.len = plen; + while ((ret = iomap_iter(&iter, ops)) > 0) + iter.processed = iomap_zero_iter(&iter, did_zero); + + iter.len = len - (iter.pos - pos); + if (ret || !iter.len) + return ret; + } + + /* + * To avoid an unconditional flush, check pagecache state and only flush + * if dirty and the fs returns a mapping that might convert on + * writeback. */ range_dirty = filemap_range_needs_writeback(inode->i_mapping, - pos, pos + len - 1); + iter.pos, iter.pos + iter.len - 1); + while ((ret = iomap_iter(&iter, ops)) > 0) { + const struct iomap *srcmap = iomap_iter_srcmap(&iter); - while ((ret = iomap_iter(&iter, ops)) > 0) - iter.processed = iomap_zero_iter(&iter, did_zero, &range_dirty); + if (srcmap->type == IOMAP_HOLE || + srcmap->type == IOMAP_UNWRITTEN) { + loff_t proc = iomap_length(&iter); + + if (range_dirty) { + range_dirty = false; + proc = iomap_zero_iter_flush_and_stale(&iter); + } + iter.processed = proc; + continue; + } + + iter.processed = iomap_zero_iter(&iter, did_zero); + } return ret; } EXPORT_SYMBOL_GPL(iomap_zero_range); diff --git a/fs/iomap/iter.c b/fs/iomap/iter.c index 79a0614eaab7..3790918646af 100644 --- a/fs/iomap/iter.c +++ b/fs/iomap/iter.c @@ -22,26 +22,25 @@ static inline int iomap_iter_advance(struct iomap_iter *iter) { bool stale = iter->iomap.flags & IOMAP_F_STALE; + int ret = 1; /* handle the previous iteration (if any) */ if (iter->iomap.length) { if (iter->processed < 0) return iter->processed; - if (!iter->processed && !stale) - return 0; if (WARN_ON_ONCE(iter->processed > iomap_length(iter))) return -EIO; iter->pos += iter->processed; iter->len -= iter->processed; - if (!iter->len) - return 0; + if (!iter->len || (!iter->processed && !stale)) + ret = 0; } - /* clear the state for the next iteration */ + /* clear the per iteration state */ iter->processed = 0; memset(&iter->iomap, 0, sizeof(iter->iomap)); memset(&iter->srcmap, 0, sizeof(iter->srcmap)); - return 1; + return ret; } static inline void iomap_iter_done(struct iomap_iter *iter) diff --git a/fs/namespace.c b/fs/namespace.c index 6b0a17487d0f..23e81c2a1e3f 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -5057,21 +5057,32 @@ static int statmount_mnt_opts(struct kstatmount *s, struct seq_file *seq) return 0; } -static inline int statmount_opt_unescape(struct seq_file *seq, char *buf_start) +static inline int statmount_opt_process(struct seq_file *seq, size_t start) { - char *buf_end, *opt_start, *opt_end; + char *buf_end, *opt_end, *src, *dst; int count = 0; + if (unlikely(seq_has_overflowed(seq))) + return -EAGAIN; + buf_end = seq->buf + seq->count; + dst = seq->buf + start; + src = dst + 1; /* skip initial comma */ + + if (src >= buf_end) { + seq->count = start; + return 0; + } + *buf_end = '\0'; - for (opt_start = buf_start + 1; opt_start < buf_end; opt_start = opt_end + 1) { - opt_end = strchrnul(opt_start, ','); + for (; src < buf_end; src = opt_end + 1) { + opt_end = strchrnul(src, ','); *opt_end = '\0'; - buf_start += string_unescape(opt_start, buf_start, 0, UNESCAPE_OCTAL) + 1; + dst += string_unescape(src, dst, 0, UNESCAPE_OCTAL) + 1; if (WARN_ON_ONCE(++count == INT_MAX)) return -EOVERFLOW; } - seq->count = buf_start - 1 - seq->buf; + seq->count = dst - 1 - seq->buf; return count; } @@ -5080,24 +5091,16 @@ static int statmount_opt_array(struct kstatmount *s, struct seq_file *seq) struct vfsmount *mnt = s->mnt; struct super_block *sb = mnt->mnt_sb; size_t start = seq->count; - char *buf_start; int err; if (!sb->s_op->show_options) return 0; - buf_start = seq->buf + start; err = sb->s_op->show_options(seq, mnt->mnt_root); if (err) return err; - if (unlikely(seq_has_overflowed(seq))) - return -EAGAIN; - - if (seq->count == start) - return 0; - - err = statmount_opt_unescape(seq, buf_start); + err = statmount_opt_process(seq, start); if (err < 0) return err; @@ -5110,22 +5113,13 @@ static int statmount_opt_sec_array(struct kstatmount *s, struct seq_file *seq) struct vfsmount *mnt = s->mnt; struct super_block *sb = mnt->mnt_sb; size_t start = seq->count; - char *buf_start; int err; - buf_start = seq->buf + start; - err = security_sb_show_options(seq, sb); - if (!err) + if (err) return err; - if (unlikely(seq_has_overflowed(seq))) - return -EAGAIN; - - if (seq->count == start) - return 0; - - err = statmount_opt_unescape(seq, buf_start); + err = statmount_opt_process(seq, start); if (err < 0) return err; diff --git a/fs/netfs/fscache_io.c b/fs/netfs/fscache_io.c index 38637e5c9b57..b1722a82c03d 100644 --- a/fs/netfs/fscache_io.c +++ b/fs/netfs/fscache_io.c @@ -9,7 +9,6 @@ #include <linux/uio.h> #include <linux/bvec.h> #include <linux/slab.h> -#include <linux/uio.h> #include "internal.h" /** diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c index 0763202d00c9..8d789b017fa9 100644 --- a/fs/ntfs3/attrib.c +++ b/fs/ntfs3/attrib.c @@ -977,7 +977,7 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn, /* Check for compressed frame. */ err = attr_is_frame_compressed(ni, attr_b, vcn >> NTFS_LZNT_CUNIT, - &hint); + &hint, run); if (err) goto out; @@ -1521,16 +1521,16 @@ out: * attr_is_frame_compressed - Used to detect compressed frame. * * attr - base (primary) attribute segment. + * run - run to use, usually == &ni->file.run. * Only base segments contains valid 'attr->nres.c_unit' */ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr, - CLST frame, CLST *clst_data) + CLST frame, CLST *clst_data, struct runs_tree *run) { int err; u32 clst_frame; CLST clen, lcn, vcn, alen, slen, vcn_next; size_t idx; - struct runs_tree *run; *clst_data = 0; @@ -1542,7 +1542,6 @@ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr, clst_frame = 1u << attr->nres.c_unit; vcn = frame * clst_frame; - run = &ni->file.run; if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) { err = attr_load_runs_vcn(ni, attr->type, attr_name(attr), @@ -1678,7 +1677,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size, if (err) goto out; - err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data); + err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data, run); if (err) goto out; diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c index cf4fe21a5039..04107b950717 100644 --- a/fs/ntfs3/bitmap.c +++ b/fs/ntfs3/bitmap.c @@ -710,20 +710,17 @@ int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits) { int err = 0; struct super_block *sb = wnd->sb; - size_t bits0 = bits; u32 wbits = 8 * sb->s_blocksize; size_t iw = bit >> (sb->s_blocksize_bits + 3); u32 wbit = bit & (wbits - 1); struct buffer_head *bh; + u32 op; - while (iw < wnd->nwnd && bits) { - u32 tail, op; - + for (; iw < wnd->nwnd && bits; iw++, bit += op, bits -= op, wbit = 0) { if (iw + 1 == wnd->nwnd) wbits = wnd->bits_last; - tail = wbits - wbit; - op = min_t(u32, tail, bits); + op = min_t(u32, wbits - wbit, bits); bh = wnd_map(wnd, iw); if (IS_ERR(bh)) { @@ -736,20 +733,15 @@ int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits) ntfs_bitmap_clear_le(bh->b_data, wbit, op); wnd->free_bits[iw] += op; + wnd->total_zeroes += op; set_buffer_uptodate(bh); mark_buffer_dirty(bh); unlock_buffer(bh); put_bh(bh); - wnd->total_zeroes += op; - bits -= op; - wbit = 0; - iw += 1; + wnd_add_free_ext(wnd, bit, op, false); } - - wnd_add_free_ext(wnd, bit, bits0, false); - return err; } @@ -760,20 +752,17 @@ int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits) { int err = 0; struct super_block *sb = wnd->sb; - size_t bits0 = bits; size_t iw = bit >> (sb->s_blocksize_bits + 3); u32 wbits = 8 * sb->s_blocksize; u32 wbit = bit & (wbits - 1); struct buffer_head *bh; + u32 op; - while (iw < wnd->nwnd && bits) { - u32 tail, op; - + for (; iw < wnd->nwnd && bits; iw++, bit += op, bits -= op, wbit = 0) { if (unlikely(iw + 1 == wnd->nwnd)) wbits = wnd->bits_last; - tail = wbits - wbit; - op = min_t(u32, tail, bits); + op = min_t(u32, wbits - wbit, bits); bh = wnd_map(wnd, iw); if (IS_ERR(bh)) { @@ -785,21 +774,16 @@ int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits) ntfs_bitmap_set_le(bh->b_data, wbit, op); wnd->free_bits[iw] -= op; + wnd->total_zeroes -= op; set_buffer_uptodate(bh); mark_buffer_dirty(bh); unlock_buffer(bh); put_bh(bh); - wnd->total_zeroes -= op; - bits -= op; - wbit = 0; - iw += 1; + if (!RB_EMPTY_ROOT(&wnd->start_tree)) + wnd_remove_free_ext(wnd, bit, op); } - - if (!RB_EMPTY_ROOT(&wnd->start_tree)) - wnd_remove_free_ext(wnd, bit, bits0); - return err; } @@ -852,15 +836,13 @@ static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits) size_t iw = bit >> (sb->s_blocksize_bits + 3); u32 wbits = 8 * sb->s_blocksize; u32 wbit = bit & (wbits - 1); + u32 op; - while (iw < wnd->nwnd && bits) { - u32 tail, op; - + for (; iw < wnd->nwnd && bits; iw++, bits -= op, wbit = 0) { if (unlikely(iw + 1 == wnd->nwnd)) wbits = wnd->bits_last; - tail = wbits - wbit; - op = min_t(u32, tail, bits); + op = min_t(u32, wbits - wbit, bits); if (wbits != wnd->free_bits[iw]) { bool ret; @@ -875,10 +857,6 @@ static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits) if (!ret) return false; } - - bits -= op; - wbit = 0; - iw += 1; } return true; @@ -928,6 +906,7 @@ bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits) size_t iw = bit >> (sb->s_blocksize_bits + 3); u32 wbits = 8 * sb->s_blocksize; u32 wbit = bit & (wbits - 1); + u32 op; size_t end; struct rb_node *n; struct e_node *e; @@ -945,14 +924,11 @@ bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits) return false; use_wnd: - while (iw < wnd->nwnd && bits) { - u32 tail, op; - + for (; iw < wnd->nwnd && bits; iw++, bits -= op, wbit = 0) { if (unlikely(iw + 1 == wnd->nwnd)) wbits = wnd->bits_last; - tail = wbits - wbit; - op = min_t(u32, tail, bits); + op = min_t(u32, wbits - wbit, bits); if (wnd->free_bits[iw]) { bool ret; @@ -966,10 +942,6 @@ use_wnd: if (!ret) goto out; } - - bits -= op; - wbit = 0; - iw += 1; } ret = true; diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c index e370eaf9bfe2..3f96a11804c9 100644 --- a/fs/ntfs3/file.c +++ b/fs/ntfs3/file.c @@ -182,13 +182,15 @@ static int ntfs_extend_initialized_size(struct file *file, loff_t pos = valid; int err; + if (valid >= new_valid) + return 0; + if (is_resident(ni)) { ni->i_valid = new_valid; return 0; } WARN_ON(is_compressed(ni)); - WARN_ON(valid >= new_valid); for (;;) { u32 zerofrom, len; @@ -222,7 +224,7 @@ static int ntfs_extend_initialized_size(struct file *file, if (err) goto out; - folio_zero_range(folio, zerofrom, folio_size(folio)); + folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom); err = ntfs_write_end(file, mapping, pos, len, len, folio, NULL); if (err < 0) @@ -987,6 +989,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) u64 frame_vbo; pgoff_t index; bool frame_uptodate; + struct folio *folio; if (frame_size < PAGE_SIZE) { /* @@ -1041,8 +1044,9 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) if (err) { for (ip = 0; ip < pages_per_frame; ip++) { page = pages[ip]; - unlock_page(page); - put_page(page); + folio = page_folio(page); + folio_unlock(folio); + folio_put(folio); } goto out; } @@ -1052,9 +1056,10 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) off = offset_in_page(valid); for (; ip < pages_per_frame; ip++, off = 0) { page = pages[ip]; + folio = page_folio(page); zero_user_segment(page, off, PAGE_SIZE); flush_dcache_page(page); - SetPageUptodate(page); + folio_mark_uptodate(folio); } ni_lock(ni); @@ -1063,9 +1068,10 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) for (ip = 0; ip < pages_per_frame; ip++) { page = pages[ip]; - SetPageUptodate(page); - unlock_page(page); - put_page(page); + folio = page_folio(page); + folio_mark_uptodate(folio); + folio_unlock(folio); + folio_put(folio); } if (err) @@ -1107,8 +1113,9 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) for (ip = 0; ip < pages_per_frame; ip++) { page = pages[ip]; - unlock_page(page); - put_page(page); + folio = page_folio(page); + folio_unlock(folio); + folio_put(folio); } goto out; } @@ -1149,9 +1156,10 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) for (ip = 0; ip < pages_per_frame; ip++) { page = pages[ip]; ClearPageDirty(page); - SetPageUptodate(page); - unlock_page(page); - put_page(page); + folio = page_folio(page); + folio_mark_uptodate(folio); + folio_unlock(folio); + folio_put(folio); } if (err) diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c index 41c7ffad2790..8b39d0ce5f28 100644 --- a/fs/ntfs3/frecord.c +++ b/fs/ntfs3/frecord.c @@ -1901,46 +1901,6 @@ enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr, } /* - * fiemap_fill_next_extent_k - a copy of fiemap_fill_next_extent - * but it uses 'fe_k' instead of fieinfo->fi_extents_start - */ -static int fiemap_fill_next_extent_k(struct fiemap_extent_info *fieinfo, - struct fiemap_extent *fe_k, u64 logical, - u64 phys, u64 len, u32 flags) -{ - struct fiemap_extent extent; - - /* only count the extents */ - if (fieinfo->fi_extents_max == 0) { - fieinfo->fi_extents_mapped++; - return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0; - } - - if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max) - return 1; - - if (flags & FIEMAP_EXTENT_DELALLOC) - flags |= FIEMAP_EXTENT_UNKNOWN; - if (flags & FIEMAP_EXTENT_DATA_ENCRYPTED) - flags |= FIEMAP_EXTENT_ENCODED; - if (flags & (FIEMAP_EXTENT_DATA_TAIL | FIEMAP_EXTENT_DATA_INLINE)) - flags |= FIEMAP_EXTENT_NOT_ALIGNED; - - memset(&extent, 0, sizeof(extent)); - extent.fe_logical = logical; - extent.fe_physical = phys; - extent.fe_length = len; - extent.fe_flags = flags; - - memcpy(fe_k + fieinfo->fi_extents_mapped, &extent, sizeof(extent)); - - fieinfo->fi_extents_mapped++; - if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max) - return 1; - return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0; -} - -/* * ni_fiemap - Helper for file_fiemap(). * * Assumed ni_lock. @@ -1950,11 +1910,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, __u64 vbo, __u64 len) { int err = 0; - struct fiemap_extent *fe_k = NULL; struct ntfs_sb_info *sbi = ni->mi.sbi; u8 cluster_bits = sbi->cluster_bits; - struct runs_tree *run; - struct rw_semaphore *run_lock; + struct runs_tree run; struct ATTRIB *attr; CLST vcn = vbo >> cluster_bits; CLST lcn, clen; @@ -1965,13 +1923,11 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, u32 flags; bool ok; + run_init(&run); if (S_ISDIR(ni->vfs_inode.i_mode)) { - run = &ni->dir.alloc_run; attr = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, I30_NAME, ARRAY_SIZE(I30_NAME), NULL, NULL); - run_lock = &ni->dir.run_lock; } else { - run = &ni->file.run; attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL); if (!attr) { @@ -1986,7 +1942,6 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, "fiemap is not supported for compressed file (cp -r)"); goto out; } - run_lock = &ni->file.run_lock; } if (!attr || !attr->non_res) { @@ -1998,51 +1953,32 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, goto out; } - /* - * To avoid lock problems replace pointer to user memory by pointer to kernel memory. - */ - fe_k = kmalloc_array(fieinfo->fi_extents_max, - sizeof(struct fiemap_extent), - GFP_NOFS | __GFP_ZERO); - if (!fe_k) { - err = -ENOMEM; - goto out; - } - end = vbo + len; alloc_size = le64_to_cpu(attr->nres.alloc_size); if (end > alloc_size) end = alloc_size; - down_read(run_lock); - while (vbo < end) { if (idx == -1) { - ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx); + ok = run_lookup_entry(&run, vcn, &lcn, &clen, &idx); } else { CLST vcn_next = vcn; - ok = run_get_entry(run, ++idx, &vcn, &lcn, &clen) && + ok = run_get_entry(&run, ++idx, &vcn, &lcn, &clen) && vcn == vcn_next; if (!ok) vcn = vcn_next; } if (!ok) { - up_read(run_lock); - down_write(run_lock); - err = attr_load_runs_vcn(ni, attr->type, attr_name(attr), - attr->name_len, run, vcn); - - up_write(run_lock); - down_read(run_lock); + attr->name_len, &run, vcn); if (err) break; - ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx); + ok = run_lookup_entry(&run, vcn, &lcn, &clen, &idx); if (!ok) { err = -EINVAL; @@ -2067,8 +2003,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, } else if (is_attr_compressed(attr)) { CLST clst_data; - err = attr_is_frame_compressed( - ni, attr, vcn >> attr->nres.c_unit, &clst_data); + err = attr_is_frame_compressed(ni, attr, + vcn >> attr->nres.c_unit, + &clst_data, &run); if (err) break; if (clst_data < NTFS_LZNT_CLUSTERS) @@ -2097,8 +2034,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, if (vbo + dlen >= end) flags |= FIEMAP_EXTENT_LAST; - err = fiemap_fill_next_extent_k(fieinfo, fe_k, vbo, lbo, - dlen, flags); + err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen, + flags); if (err < 0) break; @@ -2119,8 +2056,7 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, if (vbo + bytes >= end) flags |= FIEMAP_EXTENT_LAST; - err = fiemap_fill_next_extent_k(fieinfo, fe_k, vbo, lbo, bytes, - flags); + err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags); if (err < 0) break; if (err == 1) { @@ -2131,19 +2067,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, vbo += bytes; } - up_read(run_lock); - - /* - * Copy to user memory out of lock - */ - if (copy_to_user(fieinfo->fi_extents_start, fe_k, - fieinfo->fi_extents_max * - sizeof(struct fiemap_extent))) { - err = -EFAULT; - } - out: - kfree(fe_k); + run_close(&run); return err; } @@ -2672,7 +2597,8 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages, down_write(&ni->file.run_lock); run_truncate_around(run, le64_to_cpu(attr->nres.svcn)); frame = frame_vbo >> (cluster_bits + NTFS_LZNT_CUNIT); - err = attr_is_frame_compressed(ni, attr, frame, &clst_data); + err = attr_is_frame_compressed(ni, attr, frame, &clst_data, + run); up_write(&ni->file.run_lock); if (err) goto out1; diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c index 0fa636038b4e..03471bc9371c 100644 --- a/fs/ntfs3/fsntfs.c +++ b/fs/ntfs3/fsntfs.c @@ -2699,4 +2699,4 @@ unlock_out: out: __putname(uni); return err; -}
\ No newline at end of file +} diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h index 26e1e1379c04..cd8e8374bb5a 100644 --- a/fs/ntfs3/ntfs_fs.h +++ b/fs/ntfs3/ntfs_fs.h @@ -446,7 +446,8 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr, struct runs_tree *run, u64 frame, u64 frames, u8 frame_bits, u32 *ondisk_size, u64 *vbo_data); int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr, - CLST frame, CLST *clst_data); + CLST frame, CLST *clst_data, + struct runs_tree *run); int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size, u64 new_valid); int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes); diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c index f810f0419d25..61d53d39f3b9 100644 --- a/fs/ntfs3/record.c +++ b/fs/ntfs3/record.c @@ -212,7 +212,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) return NULL; if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 || - !IS_ALIGNED(off, 4)) { + !IS_ALIGNED(off, 8)) { return NULL; } @@ -236,8 +236,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) off += asize; } - /* Can we use the first field (attr->type). */ - /* NOTE: this code also checks attr->size availability. */ + /* + * Can we use the first fields: + * attr->type, + * attr->size + */ if (off + 8 > used) { static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8); return NULL; @@ -259,10 +262,17 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) asize = le32_to_cpu(attr->size); + if (!IS_ALIGNED(asize, 8)) + return NULL; + /* Check overflow and boundary. */ if (off + asize < off || off + asize > used) return NULL; + /* Can we use the field attr->non_res. */ + if (off + 9 > used) + return NULL; + /* Check size of attribute. */ if (!attr->non_res) { /* Check resident fields. */ diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c index 58e988cd8049..6e86d66197ef 100644 --- a/fs/ntfs3/run.c +++ b/fs/ntfs3/run.c @@ -1055,8 +1055,8 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, { int ret, err; CLST next_vcn, lcn, len; - size_t index; - bool ok; + size_t index, done; + bool ok, zone; struct wnd_bitmap *wnd; ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size); @@ -1087,8 +1087,9 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, continue; down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS); + zone = max(wnd->zone_bit, lcn) < min(wnd->zone_end, lcn + len); /* Check for free blocks. */ - ok = wnd_is_used(wnd, lcn, len); + ok = !zone && wnd_is_used(wnd, lcn, len); up_read(&wnd->rw_lock); if (ok) continue; @@ -1096,14 +1097,33 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, /* Looks like volume is corrupted. */ ntfs_set_state(sbi, NTFS_DIRTY_ERROR); - if (down_write_trylock(&wnd->rw_lock)) { - /* Mark all zero bits as used in range [lcn, lcn+len). */ - size_t done; - err = wnd_set_used_safe(wnd, lcn, len, &done); - up_write(&wnd->rw_lock); - if (err) - return err; + if (!down_write_trylock(&wnd->rw_lock)) + continue; + + if (zone) { + /* + * Range [lcn, lcn + len) intersects with zone. + * To avoid complex with zone just turn it off. + */ + wnd_zone_set(wnd, 0, 0); + } + + /* Mark all zero bits as used in range [lcn, lcn+len). */ + err = wnd_set_used_safe(wnd, lcn, len, &done); + if (zone) { + /* Restore zone. Lock mft run. */ + struct rw_semaphore *lock = + is_mounted(sbi) ? &sbi->mft.ni->file.run_lock : + NULL; + if (lock) + down_read(lock); + ntfs_refresh_zone(sbi); + if (lock) + up_read(lock); } + up_write(&wnd->rw_lock); + if (err) + return err; } return ret; diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 7a85735d584f..e376f48c4b8b 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -600,6 +600,7 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) ret = -EFAULT; goto out; } + ret = 0; /* * We know the bounce buffer is safe to copy from, so * use _copy_to_iter() directly. diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 0655aa5b57b2..bf47cca2c375 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -136,6 +136,7 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) } #endif +#ifndef __no_arch_spinlock_redefine /* * Remapping spinlock architecture specific functions to the corresponding * queued spinlock functions. @@ -146,5 +147,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) #define arch_spin_lock(l) queued_spin_lock(l) #define arch_spin_trylock(l) queued_spin_trylock(l) #define arch_spin_unlock(l) queued_spin_unlock(l) +#endif #endif /* __ASM_GENERIC_QSPINLOCK_H */ diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h index 90803a826ba0..970590baf61b 100644 --- a/include/asm-generic/spinlock.h +++ b/include/asm-generic/spinlock.h @@ -1,94 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * 'Generic' ticket-lock implementation. - * - * It relies on atomic_fetch_add() having well defined forward progress - * guarantees under contention. If your architecture cannot provide this, stick - * to a test-and-set lock. - * - * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a - * sub-word of the value. This is generally true for anything LL/SC although - * you'd be hard pressed to find anything useful in architecture specifications - * about this. If your architecture cannot do this you might be better off with - * a test-and-set. - * - * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence - * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with - * a full fence after the spin to upgrade the otherwise-RCpc - * atomic_cond_read_acquire(). - * - * The implementation uses smp_cond_load_acquire() to spin, so if the - * architecture has WFE like instructions to sleep instead of poll for word - * modifications be sure to implement that (see ARM64 for example). - * - */ - #ifndef __ASM_GENERIC_SPINLOCK_H #define __ASM_GENERIC_SPINLOCK_H -#include <linux/atomic.h> -#include <asm-generic/spinlock_types.h> - -static __always_inline void arch_spin_lock(arch_spinlock_t *lock) -{ - u32 val = atomic_fetch_add(1<<16, lock); - u16 ticket = val >> 16; - - if (ticket == (u16)val) - return; - - /* - * atomic_cond_read_acquire() is RCpc, but rather than defining a - * custom cond_read_rcsc() here we just emit a full fence. We only - * need the prior reads before subsequent writes ordering from - * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we - * have no outstanding writes due to the atomic_fetch_add() the extra - * orderings are free. - */ - atomic_cond_read_acquire(lock, ticket == (u16)VAL); - smp_mb(); -} - -static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) -{ - u32 old = atomic_read(lock); - - if ((old >> 16) != (old & 0xffff)) - return false; - - return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */ -} - -static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); - u32 val = atomic_read(lock); - - smp_store_release(ptr, (u16)val + 1); -} - -static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) -{ - u32 val = lock.counter; - - return ((val >> 16) == (val & 0xffff)); -} - -static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) -{ - arch_spinlock_t val = READ_ONCE(*lock); - - return !arch_spin_value_unlocked(val); -} - -static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) -{ - u32 val = atomic_read(lock); - - return (s16)((val >> 16) - (val & 0xffff)) > 1; -} - +#include <asm-generic/ticket_spinlock.h> #include <asm/qrwlock.h> #endif /* __ASM_GENERIC_SPINLOCK_H */ diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h index 8962bb730945..f534aa5de394 100644 --- a/include/asm-generic/spinlock_types.h +++ b/include/asm-generic/spinlock_types.h @@ -3,15 +3,7 @@ #ifndef __ASM_GENERIC_SPINLOCK_TYPES_H #define __ASM_GENERIC_SPINLOCK_TYPES_H -#include <linux/types.h> -typedef atomic_t arch_spinlock_t; - -/* - * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the - * include. - */ -#include <asm/qrwlock_types.h> - -#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0) +#include <asm-generic/qspinlock_types.h> +#include <asm-generic/qrwlock_types.h> #endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */ diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h new file mode 100644 index 000000000000..325779970d8a --- /dev/null +++ b/include/asm-generic/ticket_spinlock.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * 'Generic' ticket-lock implementation. + * + * It relies on atomic_fetch_add() having well defined forward progress + * guarantees under contention. If your architecture cannot provide this, stick + * to a test-and-set lock. + * + * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a + * sub-word of the value. This is generally true for anything LL/SC although + * you'd be hard pressed to find anything useful in architecture specifications + * about this. If your architecture cannot do this you might be better off with + * a test-and-set. + * + * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence + * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with + * a full fence after the spin to upgrade the otherwise-RCpc + * atomic_cond_read_acquire(). + * + * The implementation uses smp_cond_load_acquire() to spin, so if the + * architecture has WFE like instructions to sleep instead of poll for word + * modifications be sure to implement that (see ARM64 for example). + * + */ + +#ifndef __ASM_GENERIC_TICKET_SPINLOCK_H +#define __ASM_GENERIC_TICKET_SPINLOCK_H + +#include <linux/atomic.h> +#include <asm-generic/spinlock_types.h> + +static __always_inline void ticket_spin_lock(arch_spinlock_t *lock) +{ + u32 val = atomic_fetch_add(1<<16, &lock->val); + u16 ticket = val >> 16; + + if (ticket == (u16)val) + return; + + /* + * atomic_cond_read_acquire() is RCpc, but rather than defining a + * custom cond_read_rcsc() here we just emit a full fence. We only + * need the prior reads before subsequent writes ordering from + * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we + * have no outstanding writes due to the atomic_fetch_add() the extra + * orderings are free. + */ + atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL); + smp_mb(); +} + +static __always_inline bool ticket_spin_trylock(arch_spinlock_t *lock) +{ + u32 old = atomic_read(&lock->val); + + if ((old >> 16) != (old & 0xffff)) + return false; + + return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */ +} + +static __always_inline void ticket_spin_unlock(arch_spinlock_t *lock) +{ + u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); + u32 val = atomic_read(&lock->val); + + smp_store_release(ptr, (u16)val + 1); +} + +static __always_inline int ticket_spin_value_unlocked(arch_spinlock_t lock) +{ + u32 val = lock.val.counter; + + return ((val >> 16) == (val & 0xffff)); +} + +static __always_inline int ticket_spin_is_locked(arch_spinlock_t *lock) +{ + arch_spinlock_t val = READ_ONCE(*lock); + + return !ticket_spin_value_unlocked(val); +} + +static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock) +{ + u32 val = atomic_read(&lock->val); + + return (s16)((val >> 16) - (val & 0xffff)) > 1; +} + +#ifndef __no_arch_spinlock_redefine +/* + * Remapping spinlock architecture specific functions to the corresponding + * ticket spinlock functions. + */ +#define arch_spin_is_locked(l) ticket_spin_is_locked(l) +#define arch_spin_is_contended(l) ticket_spin_is_contended(l) +#define arch_spin_value_unlocked(l) ticket_spin_value_unlocked(l) +#define arch_spin_lock(l) ticket_spin_lock(l) +#define arch_spin_trylock(l) ticket_spin_trylock(l) +#define arch_spin_unlock(l) ticket_spin_unlock(l) +#endif + +#endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7dd24acd9ffe..05f39fbfa485 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1530,17 +1530,7 @@ static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) } #endif -#ifdef CONFIG_ARM64 -void acpi_arm_init(void); -#else -static inline void acpi_arm_init(void) { } -#endif - -#ifdef CONFIG_RISCV -void acpi_riscv_init(void); -#else -static inline void acpi_riscv_init(void) { } -#endif +void acpi_arch_init(void); #ifdef CONFIG_ACPI_PCC void acpi_init_pcc(void); diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h index 72cedb916a9c..e748b2877602 100644 --- a/include/linux/acpi_dma.h +++ b/include/linux/acpi_dma.h @@ -11,10 +11,11 @@ #ifndef __LINUX_ACPI_DMA_H #define __LINUX_ACPI_DMA_H -#include <linux/list.h> -#include <linux/device.h> #include <linux/err.h> #include <linux/dmaengine.h> +#include <linux/types.h> + +struct device; /** * struct acpi_dma_spec - slave device DMA resources @@ -65,7 +66,6 @@ int devm_acpi_dma_controller_register(struct device *dev, struct dma_chan *(*acpi_dma_xlate) (struct acpi_dma_spec *, struct acpi_dma *), void *data); -void devm_acpi_dma_controller_free(struct device *dev); struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, size_t index); @@ -94,9 +94,6 @@ static inline int devm_acpi_dma_controller_register(struct device *dev, { return -ENODEV; } -static inline void devm_acpi_dma_controller_free(struct device *dev) -{ -} static inline struct dma_chan *acpi_dma_request_slave_chan_by_index( struct device *dev, size_t index) diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index dda2f3ea89cb..9946276aff73 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -121,6 +121,7 @@ extern const struct bus_type amba_bustype; #ifdef CONFIG_ARM_AMBA int __amba_driver_register(struct amba_driver *, struct module *); void amba_driver_unregister(struct amba_driver *); +bool dev_is_amba(const struct device *dev); #else static inline int __amba_driver_register(struct amba_driver *drv, struct module *owner) @@ -130,6 +131,10 @@ static inline int __amba_driver_register(struct amba_driver *drv, static inline void amba_driver_unregister(struct amba_driver *drv) { } +static inline bool dev_is_amba(const struct device *dev) +{ + return false; +} #endif struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 301e97d745c1..318d27841130 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -557,17 +557,19 @@ iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size, * @domain_alloc: allocate and return an iommu domain if success. Otherwise * NULL is returned. The domain is not fully initialized until * the caller iommu_domain_alloc() returns. - * @domain_alloc_user: Allocate an iommu domain corresponding to the input - * parameters as defined in include/uapi/linux/iommufd.h. - * Upon success, if the @user_data is valid and the @parent - * points to a kernel-managed domain, the new domain must be - * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be - * NULL while the @user_data can be optionally provided, the - * new domain must support __IOMMU_DOMAIN_PAGING. - * Upon failure, ERR_PTR must be returned. + * @domain_alloc_paging_flags: Allocate an iommu domain corresponding to the + * input parameters as defined in + * include/uapi/linux/iommufd.h. The @user_data can be + * optionally provided, the new domain must support + * __IOMMU_DOMAIN_PAGING. Upon failure, ERR_PTR must be + * returned. * @domain_alloc_paging: Allocate an iommu_domain that can be used for - * UNMANAGED, DMA, and DMA_FQ domain types. + * UNMANAGED, DMA, and DMA_FQ domain types. This is the + * same as invoking domain_alloc_paging_flags() with + * @flags=0, @user_data=NULL. A driver should implement + * only one of the two ops. * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing. + * @domain_alloc_nested: Allocate an iommu_domain for nested translation. * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU @@ -616,12 +618,15 @@ struct iommu_ops { /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); - struct iommu_domain *(*domain_alloc_user)( - struct device *dev, u32 flags, struct iommu_domain *parent, + struct iommu_domain *(*domain_alloc_paging_flags)( + struct device *dev, u32 flags, const struct iommu_user_data *user_data); struct iommu_domain *(*domain_alloc_paging)(struct device *dev); struct iommu_domain *(*domain_alloc_sva)(struct device *dev, struct mm_struct *mm); + struct iommu_domain *(*domain_alloc_nested)( + struct device *dev, struct iommu_domain *parent, u32 flags, + const struct iommu_user_data *user_data); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 4cf6aaed5f35..e4bddb927795 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2709,9 +2709,6 @@ #define PCI_DEVICE_ID_INTEL_82815_MC 0x1130 #define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132 #define PCI_DEVICE_ID_INTEL_SST_TNG 0x119a -#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb -#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212 -#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216 #define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 #define PCI_DEVICE_ID_INTEL_82437 0x122d #define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e diff --git a/include/linux/phy.h b/include/linux/phy.h index 77c6d6451638..563c46205685 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -602,6 +602,7 @@ struct macsec_ops; * @supported_eee: supported PHY EEE linkmodes * @advertising_eee: Currently advertised EEE linkmodes * @enable_tx_lpi: When True, MAC should transmit LPI to PHY + * @eee_active: phylib private state, indicating that EEE has been negotiated * @eee_cfg: User configuration of EEE * @lp_advertising: Current link partner advertised linkmodes * @host_interfaces: PHY interface modes supported by host @@ -723,6 +724,7 @@ struct phy_device { /* Energy efficient ethernet modes which should be prohibited */ __ETHTOOL_DECLARE_LINK_MODE_MASK(eee_broken_modes); bool enable_tx_lpi; + bool eee_active; struct eee_config eee_cfg; /* Host supported PHY interface types. Should be ignored if empty. */ diff --git a/include/linux/phy/phy-sun4i-usb.h b/include/linux/phy/phy-sun4i-usb.h index 91eb755ee73b..f3e7b13608e4 100644 --- a/include/linux/phy/phy-sun4i-usb.h +++ b/include/linux/phy/phy-sun4i-usb.h @@ -11,7 +11,7 @@ /** * sun4i_usb_phy_set_squelch_detect() - Enable/disable squelch detect * @phy: reference to a sun4i usb phy - * @enabled: wether to enable or disable squelch detect + * @enabled: whether to enable or disable squelch detect */ void sun4i_usb_phy_set_squelch_detect(struct phy *phy, bool enabled); diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 910d407ebe63..b98106e1a90f 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -236,6 +236,8 @@ struct power_supply_config { char **supplied_to; size_t num_supplicants; + + bool no_wakeup_source; }; /* Description of power supply */ @@ -750,9 +752,9 @@ struct power_supply_battery_info { int temp_alert_max; int temp_min; int temp_max; - struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX]; + const struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX]; int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX]; - struct power_supply_resistance_temp_table *resist_table; + const struct power_supply_resistance_temp_table *resist_table; int resist_table_size; const struct power_supply_vbat_ri_table *vbat2ri_discharging; int vbat2ri_discharging_size; @@ -797,15 +799,15 @@ extern bool power_supply_battery_info_has_prop(struct power_supply_battery_info extern int power_supply_battery_info_get_prop(struct power_supply_battery_info *info, enum power_supply_property psp, union power_supply_propval *val); -extern int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table, +extern int power_supply_ocv2cap_simple(const struct power_supply_battery_ocv_table *table, int table_len, int ocv); -extern struct power_supply_battery_ocv_table * +extern const struct power_supply_battery_ocv_table * power_supply_find_ocv2cap_table(struct power_supply_battery_info *info, int temp, int *table_len); extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info, int ocv, int temp); extern int -power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table, +power_supply_temp2resist_simple(const struct power_supply_resistance_temp_table *table, int table_len, int temp); extern int power_supply_vbat2ri(struct power_supply_battery_info *info, int vbat_uv, bool charging); @@ -863,8 +865,6 @@ static inline int power_supply_set_property(struct power_supply *psy, const union power_supply_propval *val) { return 0; } #endif -extern int power_supply_property_is_writeable(struct power_supply *psy, - enum power_supply_property psp); extern void power_supply_external_power_changed(struct power_supply *psy); extern struct power_supply *__must_check @@ -872,17 +872,9 @@ power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg); extern struct power_supply *__must_check -power_supply_register_no_ws(struct device *parent, - const struct power_supply_desc *desc, - const struct power_supply_config *cfg); -extern struct power_supply *__must_check devm_power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg); -extern struct power_supply *__must_check -devm_power_supply_register_no_ws(struct device *parent, - const struct power_supply_desc *desc, - const struct power_supply_config *cfg); extern void power_supply_unregister(struct power_supply *psy); extern int power_supply_powers(struct power_supply *psy, struct device *dev); @@ -945,19 +937,6 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp) return false; } -#ifdef CONFIG_POWER_SUPPLY_HWMON -int power_supply_add_hwmon_sysfs(struct power_supply *psy); -void power_supply_remove_hwmon_sysfs(struct power_supply *psy); -#else -static inline int power_supply_add_hwmon_sysfs(struct power_supply *psy) -{ - return 0; -} - -static inline -void power_supply_remove_hwmon_sysfs(struct power_supply *psy) {} -#endif - #ifdef CONFIG_SYSFS ssize_t power_supply_charge_behaviour_show(struct device *dev, unsigned int available_behaviours, diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h index eda493200663..e6c44eb428ab 100644 --- a/include/linux/rcupdate_trace.h +++ b/include/linux/rcupdate_trace.h @@ -10,6 +10,7 @@ #include <linux/sched.h> #include <linux/rcupdate.h> +#include <linux/cleanup.h> extern struct lockdep_map rcu_trace_lock_map; @@ -98,4 +99,8 @@ static inline void rcu_read_lock_trace(void) { BUG(); } static inline void rcu_read_unlock_trace(void) { BUG(); } #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ +DEFINE_LOCK_GUARD_0(rcu_tasks_trace, + rcu_read_lock_trace(), + rcu_read_unlock_trace()) + #endif /* __LINUX_RCUPDATE_TRACE_H */ diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index 195debe2b1db..3e6c8e9d67ae 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -53,6 +53,8 @@ static inline int copy_from_sockptr_offset(void *dst, sockptr_t src, /* Deprecated. * This is unsafe, unless caller checked user provided optlen. * Prefer copy_safe_from_sockptr() instead. + * + * Returns 0 for success, or number of bytes not copied on error. */ static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) { diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 49d690f3d29a..bd9836690da6 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -4,14 +4,21 @@ #ifndef __SOUNDWIRE_H #define __SOUNDWIRE_H +#include <linux/bitfield.h> #include <linux/bug.h> -#include <linux/lockdep_types.h> +#include <linux/completion.h> +#include <linux/device.h> #include <linux/irq.h> #include <linux/irqdomain.h> +#include <linux/lockdep_types.h> #include <linux/mod_devicetable.h> -#include <linux/bitfield.h> +#include <linux/mutex.h> +#include <linux/types.h> #include <sound/sdca.h> +struct dentry; +struct fwnode_handle; + struct sdw_bus; struct sdw_slave; @@ -227,64 +234,36 @@ enum sdw_clk_stop_mode { /** * struct sdw_dp0_prop - DP0 properties + * @words: wordlengths supported * @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64 * (inclusive) * @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64 * (inclusive) * @num_words: number of wordlengths supported - * @words: wordlengths supported + * @ch_prep_timeout: Port-specific timeout value, in milliseconds * @BRA_flow_controlled: Slave implementation results in an OK_NotReady * response * @simple_ch_prep_sm: If channel prepare sequence is required - * @ch_prep_timeout: Port-specific timeout value, in milliseconds * @imp_def_interrupts: If set, each bit corresponds to support for * implementation-defined interrupts + * @num_lanes: array size of @lane_list + * @lane_list: indicates which Lanes can be used by DP0 * * The wordlengths are specified by Spec as max, min AND number of * discrete values, implementation can define based on the wordlengths they * support */ struct sdw_dp0_prop { + u32 *words; u32 max_word; u32 min_word; u32 num_words; - u32 *words; + u32 ch_prep_timeout; bool BRA_flow_controlled; bool simple_ch_prep_sm; - u32 ch_prep_timeout; bool imp_def_interrupts; -}; - -/** - * struct sdw_dpn_audio_mode - Audio mode properties for DPn - * @bus_min_freq: Minimum bus frequency, in Hz - * @bus_max_freq: Maximum bus frequency, in Hz - * @bus_num_freq: Number of discrete frequencies supported - * @bus_freq: Discrete bus frequencies, in Hz - * @min_freq: Minimum sampling frequency, in Hz - * @max_freq: Maximum sampling bus frequency, in Hz - * @num_freq: Number of discrete sampling frequency supported - * @freq: Discrete sampling frequencies, in Hz - * @prep_ch_behave: Specifies the dependencies between Channel Prepare - * sequence and bus clock configuration - * If 0, Channel Prepare can happen at any Bus clock rate - * If 1, Channel Prepare sequence shall happen only after Bus clock is - * changed to a frequency supported by this mode or compatible modes - * described by the next field - * @glitchless: Bitmap describing possible glitchless transitions from this - * Audio Mode to other Audio Modes - */ -struct sdw_dpn_audio_mode { - u32 bus_min_freq; - u32 bus_max_freq; - u32 bus_num_freq; - u32 *bus_freq; - u32 max_freq; - u32 min_freq; - u32 num_freq; - u32 *freq; - u32 prep_ch_behave; - u32 glitchless; + int num_lanes; + u32 *lane_list; }; /** @@ -299,24 +278,25 @@ struct sdw_dpn_audio_mode { * @type: Data port type. Full, Simplified or Reduced * @max_grouping: Maximum number of samples that can be grouped together for * a full data port - * @simple_ch_prep_sm: If the port supports simplified channel prepare state - * machine * @ch_prep_timeout: Port-specific timeout value, in milliseconds * @imp_def_interrupts: If set, each bit corresponds to support for * implementation-defined interrupts * @max_ch: Maximum channels supported * @min_ch: Minimum channels supported * @num_channels: Number of discrete channels supported - * @channels: Discrete channels supported * @num_ch_combinations: Number of channel combinations supported + * @channels: Discrete channels supported * @ch_combinations: Channel combinations supported + * @lane_list: indicates which Lanes can be used by DPn + * @num_lanes: array size of @lane_list * @modes: SDW mode supported * @max_async_buffer: Number of samples that this port can buffer in * asynchronous modes + * @port_encoding: Payload Channel Sample encoding schemes supported * @block_pack_mode: Type of block port mode supported * @read_only_wordlength: Read Only wordlength field in DPN_BlockCtrl1 register - * @port_encoding: Payload Channel Sample encoding schemes supported - * @audio_modes: Audio modes supported + * @simple_ch_prep_sm: If the port supports simplified channel prepare state + * machine */ struct sdw_dpn_prop { u32 num; @@ -326,25 +306,29 @@ struct sdw_dpn_prop { u32 *words; enum sdw_dpn_type type; u32 max_grouping; - bool simple_ch_prep_sm; u32 ch_prep_timeout; u32 imp_def_interrupts; u32 max_ch; u32 min_ch; u32 num_channels; - u32 *channels; u32 num_ch_combinations; + u32 *channels; u32 *ch_combinations; + u32 *lane_list; + int num_lanes; u32 modes; u32 max_async_buffer; + u32 port_encoding; bool block_pack_mode; bool read_only_wordlength; - u32 port_encoding; - struct sdw_dpn_audio_mode *audio_modes; + bool simple_ch_prep_sm; }; /** * struct sdw_slave_prop - SoundWire Slave properties + * @dp0_prop: Data Port 0 properties + * @src_dpn_prop: Source Data Port N properties + * @sink_dpn_prop: Sink Data Port N properties * @mipi_revision: Spec version of the implementation * @wake_capable: Wake-up events are supported * @test_mode_capable: If test mode is supported @@ -361,23 +345,26 @@ struct sdw_dpn_prop { * SCP_AddrPage2 * @bank_delay_support: Slave implements bank delay/bridge support registers * SCP_BankDelay and SCP_NextFrame + * @lane_control_support: Slave supports lane control * @p15_behave: Slave behavior when the Master attempts a read to the Port15 * alias - * @lane_control_support: Slave supports lane control * @master_count: Number of Masters present on this Slave * @source_ports: Bitmap identifying source ports * @sink_ports: Bitmap identifying sink ports - * @dp0_prop: Data Port 0 properties - * @src_dpn_prop: Source Data Port N properties - * @sink_dpn_prop: Sink Data Port N properties - * @scp_int1_mask: SCP_INT1_MASK desired settings * @quirks: bitmask identifying deltas from the MIPI specification + * @sdca_interrupt_register_list: indicates which sets of SDCA interrupt status + * and masks are supported + * @commit_register_supported: is PCP_Commit register supported + * @scp_int1_mask: SCP_INT1_MASK desired settings * @clock_reg_supported: the Peripheral implements the clock base and scale * registers introduced with the SoundWire 1.2 specification. SDCA devices * do not need to set this boolean property as the registers are required. * @use_domain_irq: call actual IRQ handler on slave, as well as callback */ struct sdw_slave_prop { + struct sdw_dp0_prop *dp0_prop; + struct sdw_dpn_prop *src_dpn_prop; + struct sdw_dpn_prop *sink_dpn_prop; u32 mipi_revision; bool wake_capable; bool test_mode_capable; @@ -389,16 +376,15 @@ struct sdw_slave_prop { bool high_PHY_capable; bool paging_support; bool bank_delay_support; - enum sdw_p15_behave p15_behave; bool lane_control_support; + enum sdw_p15_behave p15_behave; u32 master_count; u32 source_ports; u32 sink_ports; - struct sdw_dp0_prop *dp0_prop; - struct sdw_dpn_prop *src_dpn_prop; - struct sdw_dpn_prop *sink_dpn_prop; - u8 scp_int1_mask; u32 quirks; + u32 sdca_interrupt_register_list; + u8 commit_register_supported; + u8 scp_int1_mask; bool clock_reg_supported; bool use_domain_irq; }; @@ -407,13 +393,14 @@ struct sdw_slave_prop { /** * struct sdw_master_prop - Master properties + * @clk_gears: Clock gears supported + * @clk_freq: Clock frequencies supported, in Hz + * @quirks: bitmask identifying optional behavior beyond the scope of the MIPI specification * @revision: MIPI spec version of the implementation * @clk_stop_modes: Bitmap, bit N set when clock-stop-modeN supported * @max_clk_freq: Maximum Bus clock frequency, in Hz * @num_clk_gears: Number of clock gears supported - * @clk_gears: Clock gears supported * @num_clk_freq: Number of clock frequencies supported, in Hz - * @clk_freq: Clock frequencies supported, in Hz * @default_frame_rate: Controller default Frame rate, in Hz * @default_row: Number of rows * @default_col: Number of columns @@ -422,24 +409,23 @@ struct sdw_slave_prop { * command * @mclk_freq: clock reference passed to SoundWire Master, in Hz. * @hw_disabled: if true, the Master is not functional, typically due to pin-mux - * @quirks: bitmask identifying optional behavior beyond the scope of the MIPI specification */ struct sdw_master_prop { + u32 *clk_gears; + u32 *clk_freq; + u64 quirks; u32 revision; u32 clk_stop_modes; u32 max_clk_freq; u32 num_clk_gears; - u32 *clk_gears; u32 num_clk_freq; - u32 *clk_freq; u32 default_frame_rate; u32 default_row; u32 default_col; - bool dynamic_frame; u32 err_threshold; u32 mclk_freq; + bool dynamic_frame; bool hw_disabled; - u64 quirks; }; /* Definitions for Master quirks */ @@ -631,7 +617,6 @@ struct sdw_slave_ops { int (*clk_stop)(struct sdw_slave *slave, enum sdw_clk_stop_mode mode, enum sdw_clk_stop_type type); - }; /** @@ -707,8 +692,7 @@ struct sdw_master_device { container_of(d, struct sdw_master_device, dev) struct sdw_driver { - int (*probe)(struct sdw_slave *sdw, - const struct sdw_device_id *id); + int (*probe)(struct sdw_slave *sdw, const struct sdw_device_id *id); int (*remove)(struct sdw_slave *sdw); void (*shutdown)(struct sdw_slave *sdw); @@ -727,7 +711,7 @@ struct sdw_driver { SDW_SLAVE_ENTRY_EXT((_mfg_id), (_part_id), 0, 0, (_drv_data)) int sdw_handle_slave_status(struct sdw_bus *bus, - enum sdw_slave_status status[]); + enum sdw_slave_status status[]); /* * SDW master structures and APIs @@ -809,29 +793,28 @@ struct sdw_enable_ch { */ struct sdw_master_port_ops { int (*dpn_set_port_params)(struct sdw_bus *bus, - struct sdw_port_params *port_params, - unsigned int bank); + struct sdw_port_params *port_params, + unsigned int bank); int (*dpn_set_port_transport_params)(struct sdw_bus *bus, - struct sdw_transport_params *transport_params, - enum sdw_reg_bank bank); - int (*dpn_port_prep)(struct sdw_bus *bus, - struct sdw_prepare_ch *prepare_ch); + struct sdw_transport_params *transport_params, + enum sdw_reg_bank bank); + int (*dpn_port_prep)(struct sdw_bus *bus, struct sdw_prepare_ch *prepare_ch); int (*dpn_port_enable_ch)(struct sdw_bus *bus, - struct sdw_enable_ch *enable_ch, unsigned int bank); + struct sdw_enable_ch *enable_ch, unsigned int bank); }; struct sdw_msg; /** - * struct sdw_defer - SDW deffered message - * @length: message length + * struct sdw_defer - SDW deferred message * @complete: message completion * @msg: SDW message + * @length: message length */ struct sdw_defer { + struct sdw_msg *msg; int length; struct completion complete; - struct sdw_msg *msg; }; /** @@ -852,14 +835,11 @@ struct sdw_defer { */ struct sdw_master_ops { int (*read_prop)(struct sdw_bus *bus); - u64 (*override_adr) - (struct sdw_bus *bus, u64 addr); - enum sdw_command_response (*xfer_msg) - (struct sdw_bus *bus, struct sdw_msg *msg); - enum sdw_command_response (*xfer_msg_defer) - (struct sdw_bus *bus); + u64 (*override_adr)(struct sdw_bus *bus, u64 addr); + enum sdw_command_response (*xfer_msg)(struct sdw_bus *bus, struct sdw_msg *msg); + enum sdw_command_response (*xfer_msg_defer)(struct sdw_bus *bus); int (*set_bus_conf)(struct sdw_bus *bus, - struct sdw_bus_params *params); + struct sdw_bus_params *params); int (*pre_bank_switch)(struct sdw_bus *bus); int (*post_bank_switch)(struct sdw_bus *bus); u32 (*read_ping_status)(struct sdw_bus *bus); @@ -874,68 +854,71 @@ struct sdw_master_ops { * struct sdw_bus - SoundWire bus * @dev: Shortcut to &bus->md->dev to avoid changing the entire code. * @md: Master device - * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used. - * @link_id: Link id number, can be 0 to N, unique for each Controller - * @id: bus system-wide unique id - * @slaves: list of Slaves on this bus - * @assigned: Bitmap for Slave device numbers. - * Bit set implies used number, bit clear implies unused number. + * @bus_lock_key: bus lock key associated to @bus_lock * @bus_lock: bus lock + * @slaves: list of Slaves on this bus + * @msg_lock_key: message lock key associated to @msg_lock * @msg_lock: message lock - * @compute_params: points to Bus resource management implementation - * @ops: Master callback ops - * @port_ops: Master port callback ops - * @params: Current bus parameters - * @prop: Master properties - * @vendor_specific_prop: pointer to non-standard properties * @m_rt_list: List of Master instance of all stream(s) running on Bus. This * is used to compute and program bus bandwidth, clock, frame shape, * transport and port parameters - * @debugfs: Bus debugfs - * @domain: IRQ domain * @defer_msg: Defer message - * @clk_stop_timeout: Clock stop timeout computed - * @bank_switch_timeout: Bank switch timeout computed - * @multi_link: Store bus property that indicates if multi links - * are supported. This flag is populated by drivers after reading - * appropriate firmware (ACPI/DT). + * @params: Current bus parameters + * @stream_refcount: number of streams currently using this bus + * @ops: Master callback ops + * @port_ops: Master port callback ops + * @prop: Master properties + * @vendor_specific_prop: pointer to non-standard properties * @hw_sync_min_links: Number of links used by a stream above which * hardware-based synchronization is required. This value is only * meaningful if multi_link is set. If set to 1, hardware-based * synchronization will be used even if a stream only uses a single * SoundWire segment. - * @stream_refcount: number of streams currently using this bus + * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used. + * @link_id: Link id number, can be 0 to N, unique for each Controller + * @id: bus system-wide unique id + * @compute_params: points to Bus resource management implementation + * @assigned: Bitmap for Slave device numbers. + * Bit set implies used number, bit clear implies unused number. + * @clk_stop_timeout: Clock stop timeout computed + * @bank_switch_timeout: Bank switch timeout computed + * @domain: IRQ domain + * @irq_chip: IRQ chip + * @debugfs: Bus debugfs (optional) + * @multi_link: Store bus property that indicates if multi links + * are supported. This flag is populated by drivers after reading + * appropriate firmware (ACPI/DT). */ struct sdw_bus { struct device *dev; struct sdw_master_device *md; - int controller_id; - unsigned int link_id; - int id; - struct list_head slaves; - DECLARE_BITMAP(assigned, SDW_MAX_DEVICES); - struct mutex bus_lock; struct lock_class_key bus_lock_key; - struct mutex msg_lock; + struct mutex bus_lock; + struct list_head slaves; struct lock_class_key msg_lock_key; - int (*compute_params)(struct sdw_bus *bus); + struct mutex msg_lock; + struct list_head m_rt_list; + struct sdw_defer defer_msg; + struct sdw_bus_params params; + int stream_refcount; const struct sdw_master_ops *ops; const struct sdw_master_port_ops *port_ops; - struct sdw_bus_params params; struct sdw_master_prop prop; void *vendor_specific_prop; - struct list_head m_rt_list; + int hw_sync_min_links; + int controller_id; + unsigned int link_id; + int id; + int (*compute_params)(struct sdw_bus *bus); + DECLARE_BITMAP(assigned, SDW_MAX_DEVICES); + unsigned int clk_stop_timeout; + u32 bank_switch_timeout; + struct irq_chip irq_chip; + struct irq_domain *domain; #ifdef CONFIG_DEBUG_FS struct dentry *debugfs; #endif - struct irq_chip irq_chip; - struct irq_domain *domain; - struct sdw_defer defer_msg; - unsigned int clk_stop_timeout; - u32 bank_switch_timeout; bool multi_link; - int hw_sync_min_links; - int stream_refcount; }; int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, @@ -1013,18 +996,18 @@ struct sdw_stream_params { * @params: Stream parameters * @state: Current state of the stream * @type: Stream type PCM or PDM + * @m_rt_count: Count of Master runtime(s) in this stream * @master_list: List of Master runtime(s) in this stream. * master_list can contain only one m_rt per Master instance * for a stream - * @m_rt_count: Count of Master runtime(s) in this stream */ struct sdw_stream_runtime { const char *name; struct sdw_stream_params params; enum sdw_stream_state state; enum sdw_stream_type type; - struct list_head master_list; int m_rt_count; + struct list_head master_list; }; struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name); @@ -1033,12 +1016,12 @@ void sdw_release_stream(struct sdw_stream_runtime *stream); int sdw_compute_params(struct sdw_bus *bus); int sdw_stream_add_master(struct sdw_bus *bus, - struct sdw_stream_config *stream_config, - const struct sdw_port_config *port_config, - unsigned int num_ports, - struct sdw_stream_runtime *stream); + struct sdw_stream_config *stream_config, + const struct sdw_port_config *port_config, + unsigned int num_ports, + struct sdw_stream_runtime *stream); int sdw_stream_remove_master(struct sdw_bus *bus, - struct sdw_stream_runtime *stream); + struct sdw_stream_runtime *stream); int sdw_startup_stream(void *sdw_substream); int sdw_prepare_stream(struct sdw_stream_runtime *stream); int sdw_enable_stream(struct sdw_stream_runtime *stream); diff --git a/include/linux/soundwire/sdw_amd.h b/include/linux/soundwire/sdw_amd.h index 585b4c58a8a6..799f8578137b 100644 --- a/include/linux/soundwire/sdw_amd.h +++ b/include/linux/soundwire/sdw_amd.h @@ -27,9 +27,11 @@ #define ACP_SDW0 0 #define ACP_SDW1 1 #define AMD_SDW_MAX_MANAGER_COUNT 2 +#define ACP63_PCI_REV_ID 0x63 struct acp_sdw_pdata { u16 instance; + u32 acp_rev; /* mutex to protect acp common register access */ struct mutex *acp_sdw_lock; }; @@ -66,6 +68,7 @@ struct sdw_amd_dai_runtime { * @instance: SoundWire manager instance * @quirks: SoundWire manager quirks * @wake_en_mask: wake enable mask per SoundWire manager + * @acp_rev: acp pci device revision id * @clk_stopped: flag set to true when clock is stopped * @power_mode_mask: flag interprets amd SoundWire manager power mode * @dai_runtime_array: dai runtime array @@ -94,6 +97,7 @@ struct amd_sdw_manager { u32 quirks; u32 wake_en_mask; u32 power_mode_mask; + u32 acp_rev; bool clk_stopped; struct sdw_amd_dai_runtime **dai_runtime_array; @@ -131,6 +135,7 @@ struct sdw_amd_ctx { * struct sdw_amd_res - Soundwire AMD global resource structure, * typically populated by the DSP driver/Legacy driver * + * @acp_rev: acp pci device revision id * @addr: acp pci device resource start address * @reg_range: ACP register range * @link_mask: bit-wise mask listing links selected by the DSP driver/ @@ -143,6 +148,7 @@ struct sdw_amd_ctx { * @acp_lock: mutex protecting acp common registers access */ struct sdw_amd_res { + u32 acp_rev; u32 addr; u32 reg_range; u32 link_mask; diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 016b29a56c87..2a5df5b62cfc 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -184,6 +184,7 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status); enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 0x01, + TRACE_FLAG_NEED_RESCHED_LAZY = 0x02, TRACE_FLAG_NEED_RESCHED = 0x04, TRACE_FLAG_HARDIRQ = 0x08, TRACE_FLAG_SOFTIRQ = 0x10, diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 6073a8c7e38c..76d9055b2cff 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -210,36 +210,6 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #endif /* CONFIG_HAVE_STATIC_CALL */ /* - * it_func[0] is never NULL because there is at least one element in the array - * when the array itself is non NULL. - * - * With @syscall=0, the tracepoint callback array dereference is - * protected by disabling preemption. - * With @syscall=1, the tracepoint callback array dereference is - * protected by Tasks Trace RCU, which allows probes to handle page - * faults. - */ -#define __DO_TRACE(name, args, cond, syscall) \ - do { \ - int __maybe_unused __idx = 0; \ - \ - if (!(cond)) \ - return; \ - \ - if (syscall) \ - rcu_read_lock_trace(); \ - else \ - preempt_disable_notrace(); \ - \ - __DO_TRACE_CALL(name, TP_ARGS(args)); \ - \ - if (syscall) \ - rcu_read_unlock_trace(); \ - else \ - preempt_enable_notrace(); \ - } while (0) - -/* * Declare an exported function that Rust code can call to trigger this * tracepoint. This function does not include the static branch; that is done * in Rust to avoid a function call when the tracepoint is disabled. @@ -262,7 +232,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * site if it is not watching, as it will need to be active when the * tracepoint is enabled. */ -#define __DECLARE_TRACE_COMMON(name, proto, args, cond, data_proto) \ +#define __DECLARE_TRACE_COMMON(name, proto, args, data_proto) \ extern int __traceiter_##name(data_proto); \ DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \ extern struct tracepoint __tracepoint_##name; \ @@ -297,41 +267,43 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) } #define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ - __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), cond, PARAMS(data_proto)) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ static inline void __rust_do_trace_##name(proto) \ { \ - __DO_TRACE(name, \ - TP_ARGS(args), \ - TP_CONDITION(cond), 0); \ + if (cond) { \ + guard(preempt_notrace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ + } \ } \ static inline void trace_##name(proto) \ { \ - if (static_branch_unlikely(&__tracepoint_##name.key)) \ - __DO_TRACE(name, \ - TP_ARGS(args), \ - TP_CONDITION(cond), 0); \ + if (static_branch_unlikely(&__tracepoint_##name.key)) { \ + if (cond) { \ + guard(preempt_notrace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ + } \ + } \ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ WARN_ONCE(!rcu_is_watching(), \ "RCU not watching for tracepoint"); \ } \ } -#define __DECLARE_TRACE_SYSCALL(name, proto, args, cond, data_proto) \ - __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), cond, PARAMS(data_proto)) \ +#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ static inline void __rust_do_trace_##name(proto) \ { \ - __DO_TRACE(name, \ - TP_ARGS(args), \ - TP_CONDITION(cond), 1); \ + guard(rcu_tasks_trace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ } \ static inline void trace_##name(proto) \ { \ might_fault(); \ - if (static_branch_unlikely(&__tracepoint_##name.key)) \ - __DO_TRACE(name, \ - TP_ARGS(args), \ - TP_CONDITION(cond), 1); \ - if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ + if (static_branch_unlikely(&__tracepoint_##name.key)) { \ + guard(rcu_tasks_trace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ + } \ + if (IS_ENABLED(CONFIG_LOCKDEP)) { \ WARN_ONCE(!rcu_is_watching(), \ "RCU not watching for tracepoint"); \ } \ @@ -341,6 +313,9 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * We have no guarantee that gcc and the linker won't up-align the tracepoint * structures, so we create an array of pointers that will be used for iteration * on the tracepoints. + * + * it_func[0] is never NULL because there is at least one element in the array + * when the array itself is non NULL. */ #define __DEFINE_TRACE_EXT(_name, _ext, proto, args) \ static const char __tpstrtab_##_name[] \ @@ -412,7 +387,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #else /* !TRACEPOINTS_ENABLED */ -#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ +#define __DECLARE_TRACE_COMMON(name, proto, args, data_proto) \ static inline void trace_##name(proto) \ { } \ static inline int \ @@ -436,7 +411,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) return false; \ } -#define __DECLARE_TRACE_SYSCALL __DECLARE_TRACE +#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) + +#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) #define DEFINE_TRACE_FN(name, reg, unreg, proto, args) #define DEFINE_TRACE_SYSCALL(name, reg, unreg, proto, args) @@ -502,7 +481,6 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #define DECLARE_TRACE_SYSCALL(name, proto, args) \ __DECLARE_TRACE_SYSCALL(name, PARAMS(proto), PARAMS(args), \ - cpu_online(raw_smp_processor_id()), \ PARAMS(void *__data, proto)) #define TRACE_EVENT_FLAGS(event, flag) diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 338e0f5efb4b..57cc4b07fd17 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -120,6 +120,7 @@ struct virtio_admin_cmd { struct scatterlist *data_sg; struct scatterlist *result_sg; struct completion completion; + u32 result_sg_size; int ret; }; diff --git a/include/linux/virtio_pci_admin.h b/include/linux/virtio_pci_admin.h index f4a100a0fe2e..dffc92c17ad2 100644 --- a/include/linux/virtio_pci_admin.h +++ b/include/linux/virtio_pci_admin.h @@ -20,4 +20,15 @@ int virtio_pci_admin_legacy_io_notify_info(struct pci_dev *pdev, u64 *bar_offset); #endif +bool virtio_pci_admin_has_dev_parts(struct pci_dev *pdev); +int virtio_pci_admin_mode_set(struct pci_dev *pdev, u8 mode); +int virtio_pci_admin_obj_create(struct pci_dev *pdev, u16 obj_type, u8 operation_type, + u32 *obj_id); +int virtio_pci_admin_obj_destroy(struct pci_dev *pdev, u16 obj_type, u32 id); +int virtio_pci_admin_dev_parts_metadata_get(struct pci_dev *pdev, u16 obj_type, + u32 id, u8 metadata_type, u32 *out); +int virtio_pci_admin_dev_parts_get(struct pci_dev *pdev, u16 obj_type, u32 id, + u8 get_type, struct scatterlist *res_sg, u32 *res_size); +int virtio_pci_admin_dev_parts_set(struct pci_dev *pdev, struct scatterlist *data_sg); + #endif /* _LINUX_VIRTIO_PCI_ADMIN_H */ diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 9adc218fb6df..b44069d29cec 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -451,6 +451,7 @@ typedef struct elf64_shdr { #define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */ #define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */ #define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */ +#define NT_RISCV_TAGGED_ADDR_CTRL 0x902 /* RISC-V tagged address control (prctl()) */ #define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */ #define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */ #define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */ diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 557a3d2ac1d4..5c6080680cb2 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -230,7 +230,7 @@ struct prctl_mm_map { # define PR_PAC_APDBKEY (1UL << 3) # define PR_PAC_APGAKEY (1UL << 4) -/* Tagged user address controls for arm64 */ +/* Tagged user address controls for arm64 and RISC-V */ #define PR_SET_TAGGED_ADDR_CTRL 55 #define PR_GET_TAGGED_ADDR_CTRL 56 # define PR_TAGGED_ADDR_ENABLE (1UL << 0) @@ -244,6 +244,9 @@ struct prctl_mm_map { # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT) /* Unused; kept only for source compatibility */ # define PR_MTE_TCF_SHIFT 1 +/* RISC-V pointer masking tag length */ +# define PR_PMLEN_SHIFT 24 +# define PR_PMLEN_MASK (0x7fUL << PR_PMLEN_SHIFT) /* Control reclaim behavior when allocating memory */ #define PR_SET_IO_FLUSHER 57 diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index a8208492e822..1beb317df1b9 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -40,6 +40,7 @@ #define _LINUX_VIRTIO_PCI_H #include <linux/types.h> +#include <linux/kernel.h> #ifndef VIRTIO_PCI_NO_LEGACY @@ -240,6 +241,17 @@ struct virtio_pci_cfg_cap { #define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ 0x5 #define VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO 0x6 +/* Device parts access commands. */ +#define VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY 0x7 +#define VIRTIO_ADMIN_CMD_DEVICE_CAP_GET 0x8 +#define VIRTIO_ADMIN_CMD_DRIVER_CAP_SET 0x9 +#define VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE 0xa +#define VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY 0xd +#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET 0xe +#define VIRTIO_ADMIN_CMD_DEV_PARTS_GET 0xf +#define VIRTIO_ADMIN_CMD_DEV_PARTS_SET 0x10 +#define VIRTIO_ADMIN_CMD_DEV_MODE_SET 0x11 + struct virtio_admin_cmd_hdr { __le16 opcode; /* @@ -286,4 +298,123 @@ struct virtio_admin_cmd_notify_info_result { struct virtio_admin_cmd_notify_info_data entries[VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO]; }; +#define VIRTIO_DEV_PARTS_CAP 0x0000 + +struct virtio_dev_parts_cap { + __u8 get_parts_resource_objects_limit; + __u8 set_parts_resource_objects_limit; +}; + +#define MAX_CAP_ID __KERNEL_DIV_ROUND_UP(VIRTIO_DEV_PARTS_CAP + 1, 64) + +struct virtio_admin_cmd_query_cap_id_result { + __le64 supported_caps[MAX_CAP_ID]; +}; + +struct virtio_admin_cmd_cap_get_data { + __le16 id; + __u8 reserved[6]; +}; + +struct virtio_admin_cmd_cap_set_data { + __le16 id; + __u8 reserved[6]; + __u8 cap_specific_data[]; +}; + +struct virtio_admin_cmd_resource_obj_cmd_hdr { + __le16 type; + __u8 reserved[2]; + __le32 id; /* Indicates unique resource object id per resource object type */ +}; + +struct virtio_admin_cmd_resource_obj_create_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __le64 flags; + __u8 resource_obj_specific_data[]; +}; + +#define VIRTIO_RESOURCE_OBJ_DEV_PARTS 0 + +#define VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET 0 +#define VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_SET 1 + +struct virtio_resource_obj_dev_parts { + __u8 type; + __u8 reserved[7]; +}; + +#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE 0 +#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_COUNT 1 +#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_LIST 2 + +struct virtio_admin_cmd_dev_parts_metadata_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __u8 type; + __u8 reserved[7]; +}; + +#define VIRTIO_DEV_PART_F_OPTIONAL 0 + +struct virtio_dev_part_hdr { + __le16 part_type; + __u8 flags; + __u8 reserved; + union { + struct { + __le32 offset; + __le32 reserved; + } pci_common_cfg; + struct { + __le16 index; + __u8 reserved[6]; + } vq_index; + } selector; + __le32 length; +}; + +struct virtio_dev_part { + struct virtio_dev_part_hdr hdr; + __u8 value[]; +}; + +struct virtio_admin_cmd_dev_parts_metadata_result { + union { + struct { + __le32 size; + __le32 reserved; + } parts_size; + struct { + __le32 count; + __le32 reserved; + } hdr_list_count; + struct { + __le32 count; + __le32 reserved; + struct virtio_dev_part_hdr hdrs[]; + } hdr_list; + }; +}; + +#define VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_SELECTED 0 +#define VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_ALL 1 + +struct virtio_admin_cmd_dev_parts_get_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __u8 type; + __u8 reserved[7]; + struct virtio_dev_part_hdr hdr_list[]; +}; + +struct virtio_admin_cmd_dev_parts_set_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + struct virtio_dev_part parts[]; +}; + +#define VIRTIO_ADMIN_CMD_DEV_MODE_F_STOPPED 0 + +struct virtio_admin_cmd_dev_mode_set_data { + __u8 flags; +}; + #endif diff --git a/kernel/fork.c b/kernel/fork.c index f253e81d0c28..1450b461d196 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -621,6 +621,12 @@ static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm) exe_file = get_mm_exe_file(oldmm); RCU_INIT_POINTER(mm->exe_file, exe_file); + /* + * We depend on the oldmm having properly denied write access to the + * exe_file already. + */ + if (exe_file && deny_write_access(exe_file)) + pr_warn_once("deny_write_access() failed in %s\n", __func__); } #ifdef CONFIG_MMU @@ -1413,11 +1419,20 @@ int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) */ old_exe_file = rcu_dereference_raw(mm->exe_file); - if (new_exe_file) + if (new_exe_file) { + /* + * We expect the caller (i.e., sys_execve) to already denied + * write access, so this is unlikely to fail. + */ + if (unlikely(deny_write_access(new_exe_file))) + return -EACCES; get_file(new_exe_file); + } rcu_assign_pointer(mm->exe_file, new_exe_file); - if (old_exe_file) + if (old_exe_file) { + allow_write_access(old_exe_file); fput(old_exe_file); + } return 0; } @@ -1456,6 +1471,9 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) return ret; } + ret = deny_write_access(new_exe_file); + if (ret) + return -EACCES; get_file(new_exe_file); /* set the new file */ @@ -1464,8 +1482,10 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) rcu_assign_pointer(mm->exe_file, new_exe_file); mmap_write_unlock(mm); - if (old_exe_file) + if (old_exe_file) { + allow_write_access(old_exe_file); fput(old_exe_file); + } return 0; } diff --git a/kernel/module/internal.h b/kernel/module/internal.h index 2ebece8a789f..daef2be83902 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -80,7 +80,12 @@ struct load_info { unsigned int used_pages; #endif struct { - unsigned int sym, str, mod, vers, info, pcpu; + unsigned int sym; + unsigned int str; + unsigned int mod; + unsigned int vers; + unsigned int info; + unsigned int pcpu; } index; }; diff --git a/kernel/module/main.c b/kernel/module/main.c index d2e1b8976c7b..5399c182b3cb 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -195,6 +195,38 @@ static unsigned int find_sec(const struct load_info *info, const char *name) return 0; } +/** + * find_any_unique_sec() - Find a unique section index by name + * @info: Load info for the module to scan + * @name: Name of the section we're looking for + * + * Locates a unique section by name. Ignores SHF_ALLOC. + * + * Return: Section index if found uniquely, zero if absent, negative count + * of total instances if multiple were found. + */ +static int find_any_unique_sec(const struct load_info *info, const char *name) +{ + unsigned int idx; + unsigned int count = 0; + int i; + + for (i = 1; i < info->hdr->e_shnum; i++) { + if (strcmp(info->secstrings + info->sechdrs[i].sh_name, + name) == 0) { + count++; + idx = i; + } + } + if (count == 1) { + return idx; + } else if (count == 0) { + return 0; + } else { + return -count; + } +} + /* Find a module section, or NULL. */ static void *section_addr(const struct load_info *info, const char *name) { @@ -1679,7 +1711,7 @@ bool __weak module_exit_section(const char *name) return strstarts(name, ".exit"); } -static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr) +static int validate_section_offset(const struct load_info *info, Elf_Shdr *shdr) { #if defined(CONFIG_64BIT) unsigned long long secend; @@ -1698,62 +1730,80 @@ static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr) return 0; } -/* - * Check userspace passed ELF module against our expectations, and cache - * useful variables for further processing as we go. - * - * This does basic validity checks against section offsets and sizes, the - * section name string table, and the indices used for it (sh_name). +/** + * elf_validity_ehdr() - Checks an ELF header for module validity + * @info: Load info containing the ELF header to check * - * As a last step, since we're already checking the ELF sections we cache - * useful variables which will be used later for our convenience: + * Checks whether an ELF header could belong to a valid module. Checks: * - * o pointers to section headers - * o cache the modinfo symbol section - * o cache the string symbol section - * o cache the module section + * * ELF header is within the data the user provided + * * ELF magic is present + * * It is relocatable (not final linked, not core file, etc.) + * * The header's machine type matches what the architecture expects. + * * Optional arch-specific hook for other properties + * - module_elf_check_arch() is currently only used by PPC to check + * ELF ABI version, but may be used by others in the future. * - * As a last step we set info->mod to the temporary copy of the module in - * info->hdr. The final one will be allocated in move_module(). Any - * modifications we make to our copy of the module will be carried over - * to the final minted module. + * Return: %0 if valid, %-ENOEXEC on failure. */ -static int elf_validity_cache_copy(struct load_info *info, int flags) +static int elf_validity_ehdr(const struct load_info *info) { - unsigned int i; - Elf_Shdr *shdr, *strhdr; - int err; - unsigned int num_mod_secs = 0, mod_idx; - unsigned int num_info_secs = 0, info_idx; - unsigned int num_sym_secs = 0, sym_idx; - if (info->len < sizeof(*(info->hdr))) { pr_err("Invalid ELF header len %lu\n", info->len); - goto no_exec; + return -ENOEXEC; } - if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) { pr_err("Invalid ELF header magic: != %s\n", ELFMAG); - goto no_exec; + return -ENOEXEC; } if (info->hdr->e_type != ET_REL) { pr_err("Invalid ELF header type: %u != %u\n", info->hdr->e_type, ET_REL); - goto no_exec; + return -ENOEXEC; } if (!elf_check_arch(info->hdr)) { pr_err("Invalid architecture in ELF header: %u\n", info->hdr->e_machine); - goto no_exec; + return -ENOEXEC; } if (!module_elf_check_arch(info->hdr)) { pr_err("Invalid module architecture in ELF header: %u\n", info->hdr->e_machine); - goto no_exec; + return -ENOEXEC; } + return 0; +} + +/** + * elf_validity_cache_sechdrs() - Cache section headers if valid + * @info: Load info to compute section headers from + * + * Checks: + * + * * ELF header is valid (see elf_validity_ehdr()) + * * Section headers are the size we expect + * * Section array fits in the user provided data + * * Section index 0 is NULL + * * Section contents are inbounds + * + * Then updates @info with a &load_info->sechdrs pointer if valid. + * + * Return: %0 if valid, negative error code if validation failed. + */ +static int elf_validity_cache_sechdrs(struct load_info *info) +{ + Elf_Shdr *sechdrs; + Elf_Shdr *shdr; + int i; + int err; + + err = elf_validity_ehdr(info); + if (err < 0) + return err; + if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) { pr_err("Invalid ELF section header size\n"); - goto no_exec; + return -ENOEXEC; } /* @@ -1765,10 +1815,66 @@ static int elf_validity_cache_copy(struct load_info *info, int flags) || (info->hdr->e_shnum * sizeof(Elf_Shdr) > info->len - info->hdr->e_shoff)) { pr_err("Invalid ELF section header overflow\n"); - goto no_exec; + return -ENOEXEC; } - info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; + sechdrs = (void *)info->hdr + info->hdr->e_shoff; + + /* + * The code assumes that section 0 has a length of zero and + * an addr of zero, so check for it. + */ + if (sechdrs[0].sh_type != SHT_NULL + || sechdrs[0].sh_size != 0 + || sechdrs[0].sh_addr != 0) { + pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n", + sechdrs[0].sh_type); + return -ENOEXEC; + } + + /* Validate contents are inbounds */ + for (i = 1; i < info->hdr->e_shnum; i++) { + shdr = &sechdrs[i]; + switch (shdr->sh_type) { + case SHT_NULL: + case SHT_NOBITS: + /* No contents, offset/size don't mean anything */ + continue; + default: + err = validate_section_offset(info, shdr); + if (err < 0) { + pr_err("Invalid ELF section in module (section %u type %u)\n", + i, shdr->sh_type); + return err; + } + } + } + + info->sechdrs = sechdrs; + + return 0; +} + +/** + * elf_validity_cache_secstrings() - Caches section names if valid + * @info: Load info to cache section names from. Must have valid sechdrs. + * + * Specifically checks: + * + * * Section name table index is inbounds of section headers + * * Section name table is not empty + * * Section name table is NUL terminated + * * All section name offsets are inbounds of the section + * + * Then updates @info with a &load_info->secstrings pointer if valid. + * + * Return: %0 if valid, negative error code if validation failed. + */ +static int elf_validity_cache_secstrings(struct load_info *info) +{ + Elf_Shdr *strhdr, *shdr; + char *secstrings; + int i; /* * Verify if the section name table index is valid. @@ -1778,165 +1884,234 @@ static int elf_validity_cache_copy(struct load_info *info, int flags) pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n", info->hdr->e_shstrndx, info->hdr->e_shstrndx, info->hdr->e_shnum); - goto no_exec; + return -ENOEXEC; } strhdr = &info->sechdrs[info->hdr->e_shstrndx]; - err = validate_section_offset(info, strhdr); - if (err < 0) { - pr_err("Invalid ELF section hdr(type %u)\n", strhdr->sh_type); - return err; - } /* * The section name table must be NUL-terminated, as required * by the spec. This makes strcmp and pr_* calls that access * strings in the section safe. */ - info->secstrings = (void *)info->hdr + strhdr->sh_offset; + secstrings = (void *)info->hdr + strhdr->sh_offset; if (strhdr->sh_size == 0) { pr_err("empty section name table\n"); - goto no_exec; + return -ENOEXEC; } - if (info->secstrings[strhdr->sh_size - 1] != '\0') { + if (secstrings[strhdr->sh_size - 1] != '\0') { pr_err("ELF Spec violation: section name table isn't null terminated\n"); - goto no_exec; - } - - /* - * The code assumes that section 0 has a length of zero and - * an addr of zero, so check for it. - */ - if (info->sechdrs[0].sh_type != SHT_NULL - || info->sechdrs[0].sh_size != 0 - || info->sechdrs[0].sh_addr != 0) { - pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n", - info->sechdrs[0].sh_type); - goto no_exec; + return -ENOEXEC; } - for (i = 1; i < info->hdr->e_shnum; i++) { + for (i = 0; i < info->hdr->e_shnum; i++) { shdr = &info->sechdrs[i]; - switch (shdr->sh_type) { - case SHT_NULL: - case SHT_NOBITS: + /* SHT_NULL means sh_name has an undefined value */ + if (shdr->sh_type == SHT_NULL) continue; - case SHT_SYMTAB: - if (shdr->sh_link == SHN_UNDEF - || shdr->sh_link >= info->hdr->e_shnum) { - pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n", - shdr->sh_link, shdr->sh_link, - info->hdr->e_shnum); - goto no_exec; - } - num_sym_secs++; - sym_idx = i; - fallthrough; - default: - err = validate_section_offset(info, shdr); - if (err < 0) { - pr_err("Invalid ELF section in module (section %u type %u)\n", - i, shdr->sh_type); - return err; - } - if (strcmp(info->secstrings + shdr->sh_name, - ".gnu.linkonce.this_module") == 0) { - num_mod_secs++; - mod_idx = i; - } else if (strcmp(info->secstrings + shdr->sh_name, - ".modinfo") == 0) { - num_info_secs++; - info_idx = i; - } - - if (shdr->sh_flags & SHF_ALLOC) { - if (shdr->sh_name >= strhdr->sh_size) { - pr_err("Invalid ELF section name in module (section %u type %u)\n", - i, shdr->sh_type); - return -ENOEXEC; - } - } - break; + if (shdr->sh_name >= strhdr->sh_size) { + pr_err("Invalid ELF section name in module (section %u type %u)\n", + i, shdr->sh_type); + return -ENOEXEC; } } - if (num_info_secs > 1) { + info->secstrings = secstrings; + return 0; +} + +/** + * elf_validity_cache_index_info() - Validate and cache modinfo section + * @info: Load info to populate the modinfo index on. + * Must have &load_info->sechdrs and &load_info->secstrings populated + * + * Checks that if there is a .modinfo section, it is unique. + * Then, it caches its index in &load_info->index.info. + * Finally, it tries to populate the name to improve error messages. + * + * Return: %0 if valid, %-ENOEXEC if multiple modinfo sections were found. + */ +static int elf_validity_cache_index_info(struct load_info *info) +{ + int info_idx; + + info_idx = find_any_unique_sec(info, ".modinfo"); + + if (info_idx == 0) + /* Early return, no .modinfo */ + return 0; + + if (info_idx < 0) { pr_err("Only one .modinfo section must exist.\n"); - goto no_exec; - } else if (num_info_secs == 1) { - /* Try to find a name early so we can log errors with a module name */ - info->index.info = info_idx; - info->name = get_modinfo(info, "name"); + return -ENOEXEC; } - if (num_sym_secs != 1) { - pr_warn("%s: module has no symbols (stripped?)\n", - info->name ?: "(missing .modinfo section or name field)"); - goto no_exec; - } + info->index.info = info_idx; + /* Try to find a name early so we can log errors with a module name */ + info->name = get_modinfo(info, "name"); - /* Sets internal symbols and strings. */ - info->index.sym = sym_idx; - shdr = &info->sechdrs[sym_idx]; - info->index.str = shdr->sh_link; - info->strtab = (char *)info->hdr + info->sechdrs[info->index.str].sh_offset; + return 0; +} - /* - * The ".gnu.linkonce.this_module" ELF section is special. It is - * what modpost uses to refer to __this_module and let's use rely - * on THIS_MODULE to point to &__this_module properly. The kernel's - * modpost declares it on each modules's *.mod.c file. If the struct - * module of the kernel changes a full kernel rebuild is required. - * - * We have a few expectaions for this special section, the following - * code validates all this for us: - * - * o Only one section must exist - * o We expect the kernel to always have to allocate it: SHF_ALLOC - * o The section size must match the kernel's run time's struct module - * size - */ - if (num_mod_secs != 1) { - pr_err("module %s: Only one .gnu.linkonce.this_module section must exist.\n", +/** + * elf_validity_cache_index_mod() - Validates and caches this_module section + * @info: Load info to cache this_module on. + * Must have &load_info->sechdrs and &load_info->secstrings populated + * + * The ".gnu.linkonce.this_module" ELF section is special. It is what modpost + * uses to refer to __this_module and let's use rely on THIS_MODULE to point + * to &__this_module properly. The kernel's modpost declares it on each + * modules's *.mod.c file. If the struct module of the kernel changes a full + * kernel rebuild is required. + * + * We have a few expectations for this special section, this function + * validates all this for us: + * + * * The section has contents + * * The section is unique + * * We expect the kernel to always have to allocate it: SHF_ALLOC + * * The section size must match the kernel's run time's struct module + * size + * + * If all checks pass, the index will be cached in &load_info->index.mod + * + * Return: %0 on validation success, %-ENOEXEC on failure + */ +static int elf_validity_cache_index_mod(struct load_info *info) +{ + Elf_Shdr *shdr; + int mod_idx; + + mod_idx = find_any_unique_sec(info, ".gnu.linkonce.this_module"); + if (mod_idx <= 0) { + pr_err("module %s: Exactly one .gnu.linkonce.this_module section must exist.\n", info->name ?: "(missing .modinfo section or name field)"); - goto no_exec; + return -ENOEXEC; } shdr = &info->sechdrs[mod_idx]; - /* - * This is already implied on the switch above, however let's be - * pedantic about it. - */ if (shdr->sh_type == SHT_NOBITS) { pr_err("module %s: .gnu.linkonce.this_module section must have a size set\n", info->name ?: "(missing .modinfo section or name field)"); - goto no_exec; + return -ENOEXEC; } if (!(shdr->sh_flags & SHF_ALLOC)) { pr_err("module %s: .gnu.linkonce.this_module must occupy memory during process execution\n", info->name ?: "(missing .modinfo section or name field)"); - goto no_exec; + return -ENOEXEC; } if (shdr->sh_size != sizeof(struct module)) { pr_err("module %s: .gnu.linkonce.this_module section size must match the kernel's built struct module size at run time\n", info->name ?: "(missing .modinfo section or name field)"); - goto no_exec; + return -ENOEXEC; } info->index.mod = mod_idx; - /* This is temporary: point mod into copy of data. */ - info->mod = (void *)info->hdr + shdr->sh_offset; + return 0; +} - /* - * If we didn't load the .modinfo 'name' field earlier, fall back to - * on-disk struct mod 'name' field. - */ - if (!info->name) - info->name = info->mod->name; +/** + * elf_validity_cache_index_sym() - Validate and cache symtab index + * @info: Load info to cache symtab index in. + * Must have &load_info->sechdrs and &load_info->secstrings populated. + * + * Checks that there is exactly one symbol table, then caches its index in + * &load_info->index.sym. + * + * Return: %0 if valid, %-ENOEXEC on failure. + */ +static int elf_validity_cache_index_sym(struct load_info *info) +{ + unsigned int sym_idx; + unsigned int num_sym_secs = 0; + int i; + + for (i = 1; i < info->hdr->e_shnum; i++) { + if (info->sechdrs[i].sh_type == SHT_SYMTAB) { + num_sym_secs++; + sym_idx = i; + } + } + + if (num_sym_secs != 1) { + pr_warn("%s: module has no symbols (stripped?)\n", + info->name ?: "(missing .modinfo section or name field)"); + return -ENOEXEC; + } + + info->index.sym = sym_idx; + + return 0; +} + +/** + * elf_validity_cache_index_str() - Validate and cache strtab index + * @info: Load info to cache strtab index in. + * Must have &load_info->sechdrs and &load_info->secstrings populated. + * Must have &load_info->index.sym populated. + * + * Looks at the symbol table's associated string table, makes sure it is + * in-bounds, and caches it. + * + * Return: %0 if valid, %-ENOEXEC on failure. + */ +static int elf_validity_cache_index_str(struct load_info *info) +{ + unsigned int str_idx = info->sechdrs[info->index.sym].sh_link; + + if (str_idx == SHN_UNDEF || str_idx >= info->hdr->e_shnum) { + pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n", + str_idx, str_idx, info->hdr->e_shnum); + return -ENOEXEC; + } + + info->index.str = str_idx; + return 0; +} + +/** + * elf_validity_cache_index() - Resolve, validate, cache section indices + * @info: Load info to read from and update. + * &load_info->sechdrs and &load_info->secstrings must be populated. + * @flags: Load flags, relevant to suppress version loading, see + * uapi/linux/module.h + * + * Populates &load_info->index, validating as it goes. + * See child functions for per-field validation: + * + * * elf_validity_cache_index_info() + * * elf_validity_cache_index_mod() + * * elf_validity_cache_index_sym() + * * elf_validity_cache_index_str() + * + * If versioning is not suppressed via flags, load the version index from + * a section called "__versions" with no validation. + * + * If CONFIG_SMP is enabled, load the percpu section by name with no + * validation. + * + * Return: 0 on success, negative error code if an index failed validation. + */ +static int elf_validity_cache_index(struct load_info *info, int flags) +{ + int err; + + err = elf_validity_cache_index_info(info); + if (err < 0) + return err; + err = elf_validity_cache_index_mod(info); + if (err < 0) + return err; + err = elf_validity_cache_index_sym(info); + if (err < 0) + return err; + err = elf_validity_cache_index_str(info); + if (err < 0) + return err; if (flags & MODULE_INIT_IGNORE_MODVERSIONS) info->index.vers = 0; /* Pretend no __versions section! */ @@ -1946,9 +2121,109 @@ static int elf_validity_cache_copy(struct load_info *info, int flags) info->index.pcpu = find_pcpusec(info); return 0; +} -no_exec: - return -ENOEXEC; +/** + * elf_validity_cache_strtab() - Validate and cache symbol string table + * @info: Load info to read from and update. + * Must have &load_info->sechdrs and &load_info->secstrings populated. + * Must have &load_info->index populated. + * + * Checks: + * + * * The string table is not empty. + * * The string table starts and ends with NUL (required by ELF spec). + * * Every &Elf_Sym->st_name offset in the symbol table is inbounds of the + * string table. + * + * And caches the pointer as &load_info->strtab in @info. + * + * Return: 0 on success, negative error code if a check failed. + */ +static int elf_validity_cache_strtab(struct load_info *info) +{ + Elf_Shdr *str_shdr = &info->sechdrs[info->index.str]; + Elf_Shdr *sym_shdr = &info->sechdrs[info->index.sym]; + char *strtab = (char *)info->hdr + str_shdr->sh_offset; + Elf_Sym *syms = (void *)info->hdr + sym_shdr->sh_offset; + int i; + + if (str_shdr->sh_size == 0) { + pr_err("empty symbol string table\n"); + return -ENOEXEC; + } + if (strtab[0] != '\0') { + pr_err("symbol string table missing leading NUL\n"); + return -ENOEXEC; + } + if (strtab[str_shdr->sh_size - 1] != '\0') { + pr_err("symbol string table isn't NUL terminated\n"); + return -ENOEXEC; + } + + /* + * Now that we know strtab is correctly structured, check symbol + * starts are inbounds before they're used later. + */ + for (i = 0; i < sym_shdr->sh_size / sizeof(*syms); i++) { + if (syms[i].st_name >= str_shdr->sh_size) { + pr_err("symbol name out of bounds in string table"); + return -ENOEXEC; + } + } + + info->strtab = strtab; + return 0; +} + +/* + * Check userspace passed ELF module against our expectations, and cache + * useful variables for further processing as we go. + * + * This does basic validity checks against section offsets and sizes, the + * section name string table, and the indices used for it (sh_name). + * + * As a last step, since we're already checking the ELF sections we cache + * useful variables which will be used later for our convenience: + * + * o pointers to section headers + * o cache the modinfo symbol section + * o cache the string symbol section + * o cache the module section + * + * As a last step we set info->mod to the temporary copy of the module in + * info->hdr. The final one will be allocated in move_module(). Any + * modifications we make to our copy of the module will be carried over + * to the final minted module. + */ +static int elf_validity_cache_copy(struct load_info *info, int flags) +{ + int err; + + err = elf_validity_cache_sechdrs(info); + if (err < 0) + return err; + err = elf_validity_cache_secstrings(info); + if (err < 0) + return err; + err = elf_validity_cache_index(info, flags); + if (err < 0) + return err; + err = elf_validity_cache_strtab(info); + if (err < 0) + return err; + + /* This is temporary: point mod into copy of data. */ + info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset; + + /* + * If we didn't load the .modinfo 'name' field earlier, fall back to + * on-disk struct mod 'name' field. + */ + if (!info->name) + info->name = info->mod->name; + + return 0; } #define COPY_CHUNK_SIZE (16*PAGE_SIZE) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 3ef047ed9705..be62f0ea1814 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2552,6 +2552,8 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status) trace_flags |= TRACE_FLAG_NEED_RESCHED; if (test_preempt_need_resched()) trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; + if (IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY) && tif_test_bit(TIF_NEED_RESCHED_LAZY)) + trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY; return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) | (min_t(unsigned int, migration_disable_value(), 0xf)) << 4; } diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index e08aee34ef63..da748b7cbc4d 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -462,17 +462,29 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) bh_off ? 'b' : '.'; - switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | + switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED)) { + case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED: + need_resched = 'B'; + break; case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED: need_resched = 'N'; break; + case TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED: + need_resched = 'L'; + break; + case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY: + need_resched = 'b'; + break; case TRACE_FLAG_NEED_RESCHED: need_resched = 'n'; break; case TRACE_FLAG_PREEMPT_RESCHED: need_resched = 'p'; break; + case TRACE_FLAG_NEED_RESCHED_LAZY: + need_resched = 'l'; + break; default: need_resched = '.'; break; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1e37c62e8595..f340017585c5 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2920,6 +2920,111 @@ config TEST_KMOD If unsure, say N. +config TEST_RUNTIME + bool + +config TEST_RUNTIME_MODULE + bool + +config TEST_KALLSYMS + tristate "module kallsyms find_symbol() test" + depends on m + select TEST_RUNTIME + select TEST_RUNTIME_MODULE + select TEST_KALLSYMS_A + select TEST_KALLSYMS_B + select TEST_KALLSYMS_C + select TEST_KALLSYMS_D + help + This allows us to stress test find_symbol() through the kallsyms + used to place symbols on the kernel ELF kallsyms and modules kallsyms + where we place kernel symbols such as exported symbols. + + We have four test modules: + + A: has KALLSYSMS_NUMSYMS exported symbols + B: uses one of A's symbols + C: adds KALLSYMS_SCALE_FACTOR * KALLSYSMS_NUMSYMS exported + D: adds 2 * the symbols than C + + We stress test find_symbol() through two means: + + 1) Upon load of B it will trigger simplify_symbols() to look for the + one symbol it uses from the module A with tons of symbols. This is an + indirect way for us to have B call resolve_symbol_wait() upon module + load. This will eventually call find_symbol() which will eventually + try to find the symbols used with find_exported_symbol_in_section(). + find_exported_symbol_in_section() uses bsearch() so a binary search + for each symbol. Binary search will at worst be O(log(n)) so the + larger TEST_MODULE_KALLSYSMS the worse the search. + + 2) The selftests should load C first, before B. Upon B's load towards + the end right before we call module B's init routine we get + complete_formation() called on the module. That will first check + for duplicate symbols with the call to verify_exported_symbols(). + That is when we'll force iteration on module C's insane symbol list. + Since it has 10 * KALLSYMS_NUMSYMS it means we can first test + just loading B without C. The amount of time it takes to load C Vs + B can give us an idea of the impact growth of the symbol space and + give us projection. Module A only uses one symbol from B so to allow + this scaling in module C to be proportional, if it used more symbols + then the first test would be doing more and increasing just the + search space would be slightly different. The last module, module D + will just increase the search space by twice the number of symbols in + C so to allow for full projects. + + tools/testing/selftests/module/find_symbol.sh + + The current defaults will incur a build delay of about 7 minutes + on an x86_64 with only 8 cores. Enable this only if you want to + stress test find_symbol() with thousands of symbols. At the same + time this is also useful to test building modules with thousands of + symbols, and if BTF is enabled this also stress tests adding BTF + information for each module. Currently enabling many more symbols + will segfault the build system. + + If unsure, say N. + +if TEST_KALLSYMS + +config TEST_KALLSYMS_A + tristate + depends on m + +config TEST_KALLSYMS_B + tristate + depends on m + +config TEST_KALLSYMS_C + tristate + depends on m + +config TEST_KALLSYMS_D + tristate + depends on m + +config TEST_KALLSYMS_NUMSYMS + int "test kallsyms number of symbols" + default 100 + help + The number of symbols to create on TEST_KALLSYMS_A, only one of which + module TEST_KALLSYMS_B will use. This also will be used + for how many symbols TEST_KALLSYMS_C will have, scaled up by + TEST_KALLSYMS_SCALE_FACTOR. Note that setting this to 10,000 will + trigger a segfault today, don't use anything close to it unless + you are aware that this should not be used for automated build tests. + +config TEST_KALLSYMS_SCALE_FACTOR + int "test kallsyms scale factor" + default 8 + help + How many more unusued symbols will TEST_KALLSYSMS_C have than + TEST_KALLSYMS_A. If 8, then module C will have 8 * syms + than module A. Then TEST_KALLSYMS_D will have double the amount + of symbols than C so to allow projections. + +endif # TEST_KALLSYMS + config TEST_DEBUG_VIRTUAL tristate "Test CONFIG_DEBUG_VIRTUAL feature" depends on DEBUG_VIRTUAL diff --git a/lib/Makefile b/lib/Makefile index 8000f2270462..a8155c972f02 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -98,6 +98,7 @@ obj-$(CONFIG_TEST_XARRAY) += test_xarray.o obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o obj-$(CONFIG_TEST_PARMAN) += test_parman.o obj-$(CONFIG_TEST_KMOD) += test_kmod.o +obj-$(CONFIG_TEST_RUNTIME) += tests/ obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o diff --git a/lib/tests/Makefile b/lib/tests/Makefile new file mode 100644 index 000000000000..8e4f42cb9c54 --- /dev/null +++ b/lib/tests/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_TEST_RUNTIME_MODULE) += module/ diff --git a/lib/tests/module/.gitignore b/lib/tests/module/.gitignore new file mode 100644 index 000000000000..8be7891b250f --- /dev/null +++ b/lib/tests/module/.gitignore @@ -0,0 +1,4 @@ +test_kallsyms_a.c +test_kallsyms_b.c +test_kallsyms_c.c +test_kallsyms_d.c diff --git a/lib/tests/module/Makefile b/lib/tests/module/Makefile new file mode 100644 index 000000000000..af5c27b996cb --- /dev/null +++ b/lib/tests/module/Makefile @@ -0,0 +1,15 @@ +obj-$(CONFIG_TEST_KALLSYMS_A) += test_kallsyms_a.o +obj-$(CONFIG_TEST_KALLSYMS_B) += test_kallsyms_b.o +obj-$(CONFIG_TEST_KALLSYMS_C) += test_kallsyms_c.o +obj-$(CONFIG_TEST_KALLSYMS_D) += test_kallsyms_d.o + +$(obj)/%.c: FORCE + @$(kecho) " GEN $@" + $(Q)$(srctree)/lib/tests/module/gen_test_kallsyms.sh $@\ + $(CONFIG_TEST_KALLSYMS_NUMSYMS) \ + $(CONFIG_TEST_KALLSYMS_SCALE_FACTOR) + +clean-files += test_kallsyms_a.c +clean-files += test_kallsyms_b.c +clean-files += test_kallsyms_c.c +clean-files += test_kallsyms_d.c diff --git a/lib/tests/module/gen_test_kallsyms.sh b/lib/tests/module/gen_test_kallsyms.sh new file mode 100755 index 000000000000..3f2c626350ad --- /dev/null +++ b/lib/tests/module/gen_test_kallsyms.sh @@ -0,0 +1,129 @@ +#!/bin/bash + +TARGET=$(basename $1) +DIR=lib/tests/module +TARGET="$DIR/$TARGET" +NUM_SYMS=$2 +SCALE_FACTOR=$3 +TEST_TYPE=$(echo $TARGET | sed -e 's|lib/tests/module/test_kallsyms_||g') +TEST_TYPE=$(echo $TEST_TYPE | sed -e 's|.c||g') + +gen_template_module_header() +{ + cat <<____END_MODULE +// SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1 +/* + * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org> + * + * Automatically generated code for testing, do not edit manually. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/printk.h> + +____END_MODULE +} + +gen_num_syms() +{ + PREFIX=$1 + NUM=$2 + for i in $(seq 1 $NUM); do + printf "int auto_test_%s_%010d = 0;\n" $PREFIX $i + printf "EXPORT_SYMBOL_GPL(auto_test_%s_%010d);\n" $PREFIX $i + done + echo +} + +gen_template_module_data_a() +{ + gen_num_syms a $1 + cat <<____END_MODULE +static int auto_runtime_test(void) +{ + return 0; +} + +____END_MODULE +} + +gen_template_module_data_b() +{ + printf "\nextern int auto_test_a_%010d;\n\n" 28 + echo "static int auto_runtime_test(void)" + echo "{" + printf "\nreturn auto_test_a_%010d;\n" 28 + echo "}" +} + +gen_template_module_data_c() +{ + gen_num_syms c $1 + cat <<____END_MODULE +static int auto_runtime_test(void) +{ + return 0; +} + +____END_MODULE +} + +gen_template_module_data_d() +{ + gen_num_syms d $1 + cat <<____END_MODULE +static int auto_runtime_test(void) +{ + return 0; +} + +____END_MODULE +} + +gen_template_module_exit() +{ + cat <<____END_MODULE +static int __init auto_test_module_init(void) +{ + return auto_runtime_test(); +} +module_init(auto_test_module_init); + +static void __exit auto_test_module_exit(void) +{ +} +module_exit(auto_test_module_exit); + +MODULE_AUTHOR("Luis Chamberlain <mcgrof@kernel.org>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Test module for kallsyms"); +____END_MODULE +} + +case $TEST_TYPE in + a) + gen_template_module_header > $TARGET + gen_template_module_data_a $NUM_SYMS >> $TARGET + gen_template_module_exit >> $TARGET + ;; + b) + gen_template_module_header > $TARGET + gen_template_module_data_b >> $TARGET + gen_template_module_exit >> $TARGET + ;; + c) + gen_template_module_header > $TARGET + gen_template_module_data_c $((NUM_SYMS * SCALE_FACTOR)) >> $TARGET + gen_template_module_exit >> $TARGET + ;; + d) + gen_template_module_header > $TARGET + gen_template_module_data_d $((NUM_SYMS * SCALE_FACTOR * 2)) >> $TARGET + gen_template_module_exit >> $TARGET + ;; + *) + ;; +esac diff --git a/mm/mm_init.c b/mm/mm_init.c index 1c205b0a86ed..24b68b425afb 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -722,6 +722,9 @@ static void __meminit init_reserved_page(unsigned long pfn, int nid) break; } __init_single_page(pfn_to_page(pfn), pfn, zid, nid); + + if (pageblock_aligned(pfn)) + set_pageblock_migratetype(pfn_to_page(pfn), MIGRATE_MOVABLE); } #else static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} @@ -2572,8 +2575,8 @@ static void __init report_meminit(void) stack = "off"; pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", - stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off", - want_init_on_free() ? "on" : "off"); + stack, str_on_off(want_init_on_alloc(GFP_KERNEL)), + str_on_off(want_init_on_free())); if (want_init_on_free()) pr_info("mem auto-init: clearing system memory may take some time...\n"); } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 1f6d083682b8..b31192d473d0 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -1318,7 +1318,8 @@ static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err) struct mgmt_mode *cp; /* Make sure cmd still outstanding. */ - if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev)) + if (err == -ECANCELED || + cmd != pending_find(MGMT_OP_SET_POWERED, hdev)) return; cp = cmd->param; @@ -1351,7 +1352,13 @@ static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err) static int set_powered_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; - struct mgmt_mode *cp = cmd->param; + struct mgmt_mode *cp; + + /* Make sure cmd still outstanding. */ + if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev)) + return -ECANCELED; + + cp = cmd->param; BT_DBG("%s", hdev->name); @@ -1511,7 +1518,8 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "err %d", err); /* Make sure cmd still outstanding. */ - if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev)) + if (err == -ECANCELED || + cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev)) return; hci_dev_lock(hdev); @@ -1685,7 +1693,8 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "err %d", err); /* Make sure cmd still outstanding. */ - if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) + if (err == -ECANCELED || + cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) return; hci_dev_lock(hdev); @@ -1917,7 +1926,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) bool changed; /* Make sure cmd still outstanding. */ - if (cmd != pending_find(MGMT_OP_SET_SSP, hdev)) + if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev)) return; if (err) { @@ -3841,7 +3850,8 @@ static void set_name_complete(struct hci_dev *hdev, void *data, int err) bt_dev_dbg(hdev, "err %d", err); - if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev)) + if (err == -ECANCELED || + cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev)) return; if (status) { @@ -4016,7 +4026,8 @@ static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err) struct sk_buff *skb = cmd->skb; u8 status = mgmt_status(err); - if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) + if (err == -ECANCELED || + cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) return; if (!status) { @@ -5907,13 +5918,16 @@ static void start_discovery_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; + bt_dev_dbg(hdev, "err %d", err); + + if (err == -ECANCELED) + return; + if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) && cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) && cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev)) return; - bt_dev_dbg(hdev, "err %d", err); - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), cmd->param, 1); mgmt_pending_remove(cmd); @@ -6146,7 +6160,8 @@ static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; - if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev)) + if (err == -ECANCELED || + cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev)) return; bt_dev_dbg(hdev, "err %d", err); @@ -8137,7 +8152,8 @@ static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data, u8 status = mgmt_status(err); u16 eir_len; - if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev)) + if (err == -ECANCELED || + cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev)) return; if (!status) { diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 1b8e468d24cf..78f7bca24487 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -143,6 +143,7 @@ static void sco_sock_timeout(struct work_struct *work) sco_conn_lock(conn); if (!conn->hcon) { sco_conn_unlock(conn); + sco_conn_put(conn); return; } sk = sco_sock_hold(conn); @@ -192,7 +193,6 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon) conn->hcon = hcon; sco_conn_unlock(conn); } - sco_conn_put(conn); return conn; } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index dd142f444659..58df76fe408a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2442,7 +2442,9 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); if (IS_ERR(tgt_net)) { NL_SET_ERR_MSG(extack, "Invalid target network namespace id"); - return PTR_ERR(tgt_net); + err = PTR_ERR(tgt_net); + netnsid = -1; + goto out; } break; case IFLA_EXT_MASK: @@ -2457,7 +2459,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) default: if (cb->strict_check) { NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request"); - return -EINVAL; + err = -EINVAL; + goto out; } } } @@ -2479,11 +2482,14 @@ walk_entries: break; } - if (kind_ops) - rtnl_link_ops_put(kind_ops, ops_srcu_index); cb->seq = tgt_net->dev_base_seq; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); + +out: + + if (kind_ops) + rtnl_link_ops_put(kind_ops, ops_srcu_index); if (netnsid >= 0) put_net(tgt_net); diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 9e64496a5c1c..31a416ee21ad 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -268,6 +268,8 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master) skb->dev = master->dev; skb->priority = TC_PRIO_CONTROL; + skb_reset_network_header(skb); + skb_reset_transport_header(skb); if (dev_hard_header(skb, skb->dev, ETH_P_PRP, hsr->sup_multicast_addr, skb->dev->dev_addr, skb->len) <= 0) @@ -275,8 +277,6 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master) skb_reset_mac_header(skb); skb_reset_mac_len(skb); - skb_reset_network_header(skb); - skb_reset_transport_header(skb); return skb; out: diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 491c2c6b683e..6872b5aff73e 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -1191,7 +1191,7 @@ no_ownership: drop: __inet_csk_reqsk_queue_drop(sk_listener, oreq, true); - reqsk_put(req); + reqsk_put(oreq); } static bool reqsk_queue_hash_req(struct request_sock *req, diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index c58dd78509a2..c5b8ec5c0a8c 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -120,6 +120,11 @@ static void ipmr_expire_process(struct timer_list *t); lockdep_rtnl_is_held() || \ list_empty(&net->ipv4.mr_tables)) +static bool ipmr_can_free_table(struct net *net) +{ + return !check_net(net) || !net->ipv4.mr_rules_ops; +} + static struct mr_table *ipmr_mr_table_iter(struct net *net, struct mr_table *mrt) { @@ -137,7 +142,7 @@ static struct mr_table *ipmr_mr_table_iter(struct net *net, return ret; } -static struct mr_table *ipmr_get_table(struct net *net, u32 id) +static struct mr_table *__ipmr_get_table(struct net *net, u32 id) { struct mr_table *mrt; @@ -148,6 +153,16 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id) return NULL; } +static struct mr_table *ipmr_get_table(struct net *net, u32 id) +{ + struct mr_table *mrt; + + rcu_read_lock(); + mrt = __ipmr_get_table(net, id); + rcu_read_unlock(); + return mrt; +} + static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, struct mr_table **mrt) { @@ -189,7 +204,7 @@ static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, arg->table = fib_rule_get_table(rule, arg); - mrt = ipmr_get_table(rule->fr_net, arg->table); + mrt = __ipmr_get_table(rule->fr_net, arg->table); if (!mrt) return -EAGAIN; res->mrt = mrt; @@ -302,6 +317,11 @@ EXPORT_SYMBOL(ipmr_rule_default); #define ipmr_for_each_table(mrt, net) \ for (mrt = net->ipv4.mrt; mrt; mrt = NULL) +static bool ipmr_can_free_table(struct net *net) +{ + return !check_net(net); +} + static struct mr_table *ipmr_mr_table_iter(struct net *net, struct mr_table *mrt) { @@ -315,6 +335,8 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id) return net->ipv4.mrt; } +#define __ipmr_get_table ipmr_get_table + static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, struct mr_table **mrt) { @@ -403,7 +425,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) if (id != RT_TABLE_DEFAULT && id >= 1000000000) return ERR_PTR(-EINVAL); - mrt = ipmr_get_table(net, id); + mrt = __ipmr_get_table(net, id); if (mrt) return mrt; @@ -413,6 +435,10 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) static void ipmr_free_table(struct mr_table *mrt) { + struct net *net = read_pnet(&mrt->net); + + WARN_ON_ONCE(!ipmr_can_free_table(net)); + timer_shutdown_sync(&mrt->ipmr_expire_timer); mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC | MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC); @@ -1374,7 +1400,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval, goto out_unlock; } - mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); + mrt = __ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); if (!mrt) { ret = -ENOENT; goto out_unlock; @@ -2262,11 +2288,13 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, struct mr_table *mrt; int err; - mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); - if (!mrt) + rcu_read_lock(); + mrt = __ipmr_get_table(net, RT_TABLE_DEFAULT); + if (!mrt) { + rcu_read_unlock(); return -ENOENT; + } - rcu_read_lock(); cache = ipmr_cache_find(mrt, saddr, daddr); if (!cache && skb->dev) { int vif = ipmr_find_vif(mrt, skb->dev); @@ -2550,7 +2578,7 @@ static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, grp = nla_get_in_addr_default(tb[RTA_DST], 0); tableid = nla_get_u32_default(tb[RTA_TABLE], 0); - mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT); + mrt = __ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT); if (!mrt) { err = -ENOENT; goto errout_free; @@ -2604,7 +2632,7 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) if (filter.table_id) { struct mr_table *mrt; - mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id); + mrt = __ipmr_get_table(sock_net(skb->sk), filter.table_id); if (!mrt) { if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR) return skb->len; @@ -2712,7 +2740,7 @@ static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh, break; } } - mrt = ipmr_get_table(net, tblid); + mrt = __ipmr_get_table(net, tblid); if (!mrt) { ret = -ENOENT; goto out; @@ -2920,13 +2948,15 @@ static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) struct net *net = seq_file_net(seq); struct mr_table *mrt; - mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); - if (!mrt) + rcu_read_lock(); + mrt = __ipmr_get_table(net, RT_TABLE_DEFAULT); + if (!mrt) { + rcu_read_unlock(); return ERR_PTR(-ENOENT); + } iter->mrt = mrt; - rcu_read_lock(); return mr_vif_seq_start(seq, pos); } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 96b5b2b0d507..c489a1e6aec9 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2570,6 +2570,24 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev) return idev; } +static void delete_tempaddrs(struct inet6_dev *idev, + struct inet6_ifaddr *ifp) +{ + struct inet6_ifaddr *ift, *tmp; + + write_lock_bh(&idev->lock); + list_for_each_entry_safe(ift, tmp, &idev->tempaddr_list, tmp_list) { + if (ift->ifpub != ifp) + continue; + + in6_ifa_hold(ift); + write_unlock_bh(&idev->lock); + ipv6_del_addr(ift); + write_lock_bh(&idev->lock); + } + write_unlock_bh(&idev->lock); +} + static void manage_tempaddrs(struct inet6_dev *idev, struct inet6_ifaddr *ifp, __u32 valid_lft, __u32 prefered_lft, @@ -3124,11 +3142,12 @@ static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags, in6_ifa_hold(ifp); read_unlock_bh(&idev->lock); - if (!(ifp->flags & IFA_F_TEMPORARY) && - (ifa_flags & IFA_F_MANAGETEMPADDR)) - manage_tempaddrs(idev, ifp, 0, 0, false, - jiffies); ipv6_del_addr(ifp); + + if (!(ifp->flags & IFA_F_TEMPORARY) && + (ifp->flags & IFA_F_MANAGETEMPADDR)) + delete_tempaddrs(idev, ifp); + addrconf_verify_rtnl(net); if (ipv6_addr_is_multicast(pfx)) { ipv6_mc_config(net->ipv6.mc_autojoin_sk, @@ -4952,14 +4971,12 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp, } if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) { - if (was_managetempaddr && - !(ifp->flags & IFA_F_MANAGETEMPADDR)) { - cfg->valid_lft = 0; - cfg->preferred_lft = 0; - } - manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft, - cfg->preferred_lft, !was_managetempaddr, - jiffies); + if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR)) + delete_tempaddrs(ifp->idev, ifp); + else + manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft, + cfg->preferred_lft, !was_managetempaddr, + jiffies); } addrconf_verify_rtnl(net); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index d66f58932a79..7f1902ac3586 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -108,6 +108,11 @@ static void ipmr_expire_process(struct timer_list *t); lockdep_rtnl_is_held() || \ list_empty(&net->ipv6.mr6_tables)) +static bool ip6mr_can_free_table(struct net *net) +{ + return !check_net(net) || !net->ipv6.mr6_rules_ops; +} + static struct mr_table *ip6mr_mr_table_iter(struct net *net, struct mr_table *mrt) { @@ -125,7 +130,7 @@ static struct mr_table *ip6mr_mr_table_iter(struct net *net, return ret; } -static struct mr_table *ip6mr_get_table(struct net *net, u32 id) +static struct mr_table *__ip6mr_get_table(struct net *net, u32 id) { struct mr_table *mrt; @@ -136,6 +141,16 @@ static struct mr_table *ip6mr_get_table(struct net *net, u32 id) return NULL; } +static struct mr_table *ip6mr_get_table(struct net *net, u32 id) +{ + struct mr_table *mrt; + + rcu_read_lock(); + mrt = __ip6mr_get_table(net, id); + rcu_read_unlock(); + return mrt; +} + static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6, struct mr_table **mrt) { @@ -177,7 +192,7 @@ static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp, arg->table = fib_rule_get_table(rule, arg); - mrt = ip6mr_get_table(rule->fr_net, arg->table); + mrt = __ip6mr_get_table(rule->fr_net, arg->table); if (!mrt) return -EAGAIN; res->mrt = mrt; @@ -291,6 +306,11 @@ EXPORT_SYMBOL(ip6mr_rule_default); #define ip6mr_for_each_table(mrt, net) \ for (mrt = net->ipv6.mrt6; mrt; mrt = NULL) +static bool ip6mr_can_free_table(struct net *net) +{ + return !check_net(net); +} + static struct mr_table *ip6mr_mr_table_iter(struct net *net, struct mr_table *mrt) { @@ -304,6 +324,8 @@ static struct mr_table *ip6mr_get_table(struct net *net, u32 id) return net->ipv6.mrt6; } +#define __ip6mr_get_table ip6mr_get_table + static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6, struct mr_table **mrt) { @@ -382,7 +404,7 @@ static struct mr_table *ip6mr_new_table(struct net *net, u32 id) { struct mr_table *mrt; - mrt = ip6mr_get_table(net, id); + mrt = __ip6mr_get_table(net, id); if (mrt) return mrt; @@ -392,6 +414,10 @@ static struct mr_table *ip6mr_new_table(struct net *net, u32 id) static void ip6mr_free_table(struct mr_table *mrt) { + struct net *net = read_pnet(&mrt->net); + + WARN_ON_ONCE(!ip6mr_can_free_table(net)); + timer_shutdown_sync(&mrt->ipmr_expire_timer); mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC | MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC); @@ -411,13 +437,15 @@ static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos) struct net *net = seq_file_net(seq); struct mr_table *mrt; - mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); - if (!mrt) + rcu_read_lock(); + mrt = __ip6mr_get_table(net, RT6_TABLE_DFLT); + if (!mrt) { + rcu_read_unlock(); return ERR_PTR(-ENOENT); + } iter->mrt = mrt; - rcu_read_lock(); return mr_vif_seq_start(seq, pos); } @@ -2278,11 +2306,13 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, struct mfc6_cache *cache; struct rt6_info *rt = dst_rt6_info(skb_dst(skb)); - mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); - if (!mrt) + rcu_read_lock(); + mrt = __ip6mr_get_table(net, RT6_TABLE_DFLT); + if (!mrt) { + rcu_read_unlock(); return -ENOENT; + } - rcu_read_lock(); cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); if (!cache && skb->dev) { int vif = ip6mr_find_vif(mrt, skb->dev); @@ -2562,7 +2592,7 @@ static int ip6mr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, grp = nla_get_in6_addr(tb[RTA_DST]); tableid = nla_get_u32_default(tb[RTA_TABLE], 0); - mrt = ip6mr_get_table(net, tableid ?: RT_TABLE_DEFAULT); + mrt = __ip6mr_get_table(net, tableid ?: RT_TABLE_DEFAULT); if (!mrt) { NL_SET_ERR_MSG_MOD(extack, "MR table does not exist"); return -ENOENT; @@ -2609,7 +2639,7 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) if (filter.table_id) { struct mr_table *mrt; - mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id); + mrt = __ip6mr_get_table(sock_net(skb->sk), filter.table_id); if (!mrt) { if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR) return skb->len; diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index c00323fa9eb6..7929df08d4e0 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1236,7 +1236,9 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, return -EOPNOTSUPP; /* receive/dequeue next skb: - * the function understands MSG_PEEK and, thus, does not dequeue skb */ + * the function understands MSG_PEEK and, thus, does not dequeue skb + * only refcount is increased. + */ skb = skb_recv_datagram(sk, flags, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) @@ -1252,9 +1254,8 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, cskb = skb; if (skb_copy_datagram_msg(cskb, offset, msg, copied)) { - if (!(flags & MSG_PEEK)) - skb_queue_head(&sk->sk_receive_queue, skb); - return -EFAULT; + err = -EFAULT; + goto err_out; } /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */ @@ -1271,11 +1272,8 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, sizeof(IUCV_SKB_CB(skb)->class), (void *)&IUCV_SKB_CB(skb)->class); - if (err) { - if (!(flags & MSG_PEEK)) - skb_queue_head(&sk->sk_receive_queue, skb); - return err; - } + if (err) + goto err_out; /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { @@ -1331,8 +1329,18 @@ done: /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) copied = rlen; + if (flags & MSG_PEEK) + skb_unref(skb); return copied; + +err_out: + if (!(flags & MSG_PEEK)) + skb_queue_head(&sk->sk_receive_queue, skb); + else + skb_unref(skb); + + return err; } static inline __poll_t iucv_accept_poll(struct sock *parent) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 3eec23ac5ab1..369a2f2e459c 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1870,15 +1870,31 @@ static __net_exit void l2tp_pre_exit_net(struct net *net) } } +static int l2tp_idr_item_unexpected(int id, void *p, void *data) +{ + const char *idr_name = data; + + pr_err("l2tp: %s IDR not empty at net %d exit\n", idr_name, id); + WARN_ON_ONCE(1); + return 1; +} + static __net_exit void l2tp_exit_net(struct net *net) { struct l2tp_net *pn = l2tp_pernet(net); - WARN_ON_ONCE(!idr_is_empty(&pn->l2tp_v2_session_idr)); + /* Our per-net IDRs should be empty. Check that is so, to + * help catch cleanup races or refcnt leaks. + */ + idr_for_each(&pn->l2tp_v2_session_idr, l2tp_idr_item_unexpected, + "v2_session"); + idr_for_each(&pn->l2tp_v3_session_idr, l2tp_idr_item_unexpected, + "v3_session"); + idr_for_each(&pn->l2tp_tunnel_idr, l2tp_idr_item_unexpected, + "tunnel"); + idr_destroy(&pn->l2tp_v2_session_idr); - WARN_ON_ONCE(!idr_is_empty(&pn->l2tp_v3_session_idr)); idr_destroy(&pn->l2tp_v3_session_idr); - WARN_ON_ONCE(!idr_is_empty(&pn->l2tp_tunnel_idr)); idr_destroy(&pn->l2tp_tunnel_idr); } diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 4eb52add7103..0259cde394ba 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -1098,7 +1098,7 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname, lock_sock(sk); if (unlikely(level != SOL_LLC || optlen != sizeof(int))) goto out; - rc = copy_from_sockptr(&opt, optval, sizeof(opt)); + rc = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (rc) goto out; rc = -EINVAL; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index dd3517b0fdfd..f4e7b5e4bb59 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2181,9 +2181,14 @@ netlink_ack_tlv_len(struct netlink_sock *nlk, int err, return tlvlen; } +static bool nlmsg_check_in_payload(const struct nlmsghdr *nlh, const void *addr) +{ + return !WARN_ON(addr < nlmsg_data(nlh) || + addr - (const void *) nlh >= nlh->nlmsg_len); +} + static void -netlink_ack_tlv_fill(struct sk_buff *in_skb, struct sk_buff *skb, - const struct nlmsghdr *nlh, int err, +netlink_ack_tlv_fill(struct sk_buff *skb, const struct nlmsghdr *nlh, int err, const struct netlink_ext_ack *extack) { if (extack->_msg) @@ -2195,9 +2200,7 @@ netlink_ack_tlv_fill(struct sk_buff *in_skb, struct sk_buff *skb, if (!err) return; - if (extack->bad_attr && - !WARN_ON((u8 *)extack->bad_attr < in_skb->data || - (u8 *)extack->bad_attr >= in_skb->data + in_skb->len)) + if (extack->bad_attr && nlmsg_check_in_payload(nlh, extack->bad_attr)) WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS, (u8 *)extack->bad_attr - (const u8 *)nlh)); if (extack->policy) @@ -2206,9 +2209,7 @@ netlink_ack_tlv_fill(struct sk_buff *in_skb, struct sk_buff *skb, if (extack->miss_type) WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_TYPE, extack->miss_type)); - if (extack->miss_nest && - !WARN_ON((u8 *)extack->miss_nest < in_skb->data || - (u8 *)extack->miss_nest > in_skb->data + in_skb->len)) + if (extack->miss_nest && nlmsg_check_in_payload(nlh, extack->miss_nest)) WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_NEST, (u8 *)extack->miss_nest - (const u8 *)nlh)); } @@ -2237,7 +2238,7 @@ static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb, if (extack_len) { nlh->nlmsg_flags |= NLM_F_ACK_TLVS; if (skb_tailroom(skb) >= extack_len) { - netlink_ack_tlv_fill(cb->skb, skb, cb->nlh, + netlink_ack_tlv_fill(skb, cb->nlh, nlk->dump_done_errno, extack); nlmsg_end(skb, nlh); } @@ -2496,7 +2497,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, } if (tlvlen) - netlink_ack_tlv_fill(in_skb, skb, nlh, err, extack); + netlink_ack_tlv_fill(skb, nlh, err, extack); nlmsg_end(skb, rep); diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index f4844683e120..9d8bd0b37e41 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -707,9 +707,10 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, ret = -EISCONN; if (rx->sk.sk_state != RXRPC_UNBOUND) goto error; - ret = copy_from_sockptr(&min_sec_level, optval, - sizeof(unsigned int)); - if (ret < 0) + ret = copy_safe_from_sockptr(&min_sec_level, + sizeof(min_sec_level), + optval, optlen); + if (ret) goto error; ret = -EINVAL; if (min_sec_level > RXRPC_SECURITY_MAX) diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index a97638bef6da..a5e87f9ea986 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -332,6 +332,12 @@ static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb, */ if (q->internal.qlen >= 8) return false; + + /* Ordering invariants fall apart if some delayed flows + * are ready but we haven't serviced them, yet. + */ + if (q->time_next_delayed_flow <= now + q->offload_horizon) + return false; } sk = skb->sk; diff --git a/scripts/Makefile.clang b/scripts/Makefile.clang index 6c23c6af797f..2435efae67f5 100644 --- a/scripts/Makefile.clang +++ b/scripts/Makefile.clang @@ -10,6 +10,7 @@ CLANG_TARGET_FLAGS_mips := mipsel-linux-gnu CLANG_TARGET_FLAGS_powerpc := powerpc64le-linux-gnu CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu +CLANG_TARGET_FLAGS_sparc := sparc64-linux-gnu CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu CLANG_TARGET_FLAGS_um := $(CLANG_TARGET_FLAGS_$(SUBARCH)) CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(SRCARCH)) diff --git a/scripts/export_report.pl b/scripts/export_report.pl deleted file mode 100755 index feb3d5542a62..000000000000 --- a/scripts/export_report.pl +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env perl -# SPDX-License-Identifier: GPL-2.0-only -# -# (C) Copyright IBM Corporation 2006. -# Author : Ram Pai (linuxram@us.ibm.com) -# -# Usage: export_report.pl -k Module.symvers [-o report_file ] -f *.mod.c -# - -use warnings; -use Getopt::Std; -use strict; - -sub numerically { - my $no1 = (split /\s+/, $a)[1]; - my $no2 = (split /\s+/, $b)[1]; - return $no1 <=> $no2; -} - -sub alphabetically { - my ($module1, $value1) = @{$a}; - my ($module2, $value2) = @{$b}; - return $value1 <=> $value2 || $module2 cmp $module1; -} - -sub print_depends_on { - my ($href) = @_; - print "\n"; - for my $mod (sort keys %$href) { - my $list = $href->{$mod}; - print "\t$mod:\n"; - foreach my $sym (sort numerically @{$list}) { - my ($symbol, $no) = split /\s+/, $sym; - printf("\t\t%-25s\n", $symbol); - } - print "\n"; - } - print "\n"; - print "~"x80 , "\n"; -} - -sub usage { - print "Usage: @_ -h -k Module.symvers [ -o outputfile ] \n", - "\t-f: treat all the non-option argument as .mod.c files. ", - "Recommend using this as the last option\n", - "\t-h: print detailed help\n", - "\t-k: the path to Module.symvers file. By default uses ", - "the file from the current directory\n", - "\t-o outputfile: output the report to outputfile\n"; - exit 0; -} - -sub collectcfiles { - my @file; - open my $fh, '< modules.order' or die "cannot open modules.order: $!\n"; - while (<$fh>) { - s/\.ko$/.mod.c/; - push (@file, $_) - } - close($fh); - chomp @file; - return @file; -} - -my (%SYMBOL, %MODULE, %opt, @allcfiles); - -if (not getopts('hk:o:f',\%opt) or defined $opt{'h'}) { - usage($0); -} - -if (defined $opt{'f'}) { - @allcfiles = @ARGV; -} else { - @allcfiles = collectcfiles(); -} - -if (not defined $opt{'k'}) { - $opt{'k'} = "Module.symvers"; -} - -open (my $module_symvers, '<', $opt{'k'}) - or die "Sorry, cannot open $opt{'k'}: $!\n"; - -if (defined $opt{'o'}) { - open (my $out, '>', $opt{'o'}) - or die "Sorry, cannot open $opt{'o'} $!\n"; - - select $out; -} - -# -# collect all the symbols and their attributes from the -# Module.symvers file -# -while ( <$module_symvers> ) { - chomp; - my (undef, $symbol, $module, $gpl, $namespace) = split('\t'); - $SYMBOL { $symbol } = [ $module , "0" , $symbol, $gpl]; -} -close($module_symvers); - -# -# collect the usage count of each symbol. -# -my $modversion_warnings = 0; - -foreach my $thismod (@allcfiles) { - my $module; - - unless (open ($module, '<', $thismod)) { - warn "Sorry, cannot open $thismod: $!\n"; - next; - } - - my $state=0; - while ( <$module> ) { - chomp; - if ($state == 0) { - $state = 1 if ($_ =~ /static const struct modversion_info/); - next; - } - if ($state == 1) { - $state = 2 if ($_ =~ /__attribute__\(\(section\("__versions"\)\)\)/); - next; - } - if ($state == 2) { - if ( $_ !~ /0x[0-9a-f]+,/ ) { - next; - } - my $sym = (split /([,"])/,)[4]; - my ($module, $value, $symbol, $gpl) = @{$SYMBOL{$sym}}; - $SYMBOL{ $sym } = [ $module, $value+1, $symbol, $gpl]; - push(@{$MODULE{$thismod}} , $sym); - } - } - if ($state != 2) { - warn "WARNING:$thismod is not built with CONFIG_MODVERSIONS enabled\n"; - $modversion_warnings++; - } - close($module); -} - -print "\tThis file reports the exported symbols usage patterns by in-tree\n", - "\t\t\t\tmodules\n"; -printf("%s\n\n\n","x"x80); -printf("\t\t\t\tINDEX\n\n\n"); -printf("SECTION 1: Usage counts of all exported symbols\n"); -printf("SECTION 2: List of modules and the exported symbols they use\n"); -printf("%s\n\n\n","x"x80); -printf("SECTION 1:\tThe exported symbols and their usage count\n\n"); -printf("%-25s\t%-25s\t%-5s\t%-25s\n", "Symbol", "Module", "Usage count", - "export type"); - -# -# print the list of unused exported symbols -# -foreach my $list (sort alphabetically values(%SYMBOL)) { - my ($module, $value, $symbol, $gpl) = @{$list}; - printf("%-25s\t%-25s\t%-10s\t", $symbol, $module, $value); - if (defined $gpl) { - printf("%-25s\n",$gpl); - } else { - printf("\n"); - } -} -printf("%s\n\n\n","x"x80); - -printf("SECTION 2:\n\tThis section reports export-symbol-usage of in-kernel -modules. Each module lists the modules, and the symbols from that module that -it uses. Each listed symbol reports the number of modules using it\n"); - -print "\nNOTE: Got $modversion_warnings CONFIG_MODVERSIONS warnings\n\n" - if $modversion_warnings; - -print "~"x80 , "\n"; -for my $thismod (sort keys %MODULE) { - my $list = $MODULE{$thismod}; - my %depends; - $thismod =~ s/\.mod\.c/.ko/; - print "\t\t\t$thismod\n"; - foreach my $symbol (@{$list}) { - my ($module, $value, undef, $gpl) = @{$SYMBOL{$symbol}}; - push (@{$depends{"$module"}}, "$symbol $value"); - } - print_depends_on(\%depends); -} diff --git a/scripts/module.lds.S b/scripts/module.lds.S index 711c6e029936..c2f80f9141d4 100644 --- a/scripts/module.lds.S +++ b/scripts/module.lds.S @@ -18,10 +18,10 @@ SECTIONS { *(.export_symbol) } - __ksymtab 0 : { *(SORT(___ksymtab+*)) } - __ksymtab_gpl 0 : { *(SORT(___ksymtab_gpl+*)) } - __kcrctab 0 : { *(SORT(___kcrctab+*)) } - __kcrctab_gpl 0 : { *(SORT(___kcrctab_gpl+*)) } + __ksymtab 0 : ALIGN(8) { *(SORT(___ksymtab+*)) } + __ksymtab_gpl 0 : ALIGN(8) { *(SORT(___ksymtab_gpl+*)) } + __kcrctab 0 : ALIGN(4) { *(SORT(___kcrctab+*)) } + __kcrctab_gpl 0 : ALIGN(4) { *(SORT(___kcrctab_gpl+*)) } .ctors 0 : ALIGN(8) { *(SORT(.ctors.*)) *(.ctors) } .init_array 0 : ALIGN(8) { *(SORT(.init_array.*)) *(.init_array) } @@ -29,6 +29,7 @@ SECTIONS { .altinstructions 0 : ALIGN(8) { KEEP(*(.altinstructions)) } __bug_table 0 : ALIGN(8) { KEEP(*(__bug_table)) } __jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) } + __ex_table 0 : ALIGN(4) { KEEP(*(__ex_table)) } __patchable_function_entries : { *(__patchable_function_entries) } diff --git a/sound/soc/amd/ps/acp63.h b/sound/soc/amd/ps/acp63.h index 39208305dd6c..e54eabaa4d3e 100644 --- a/sound/soc/amd/ps/acp63.h +++ b/sound/soc/amd/ps/acp63.h @@ -231,6 +231,7 @@ struct sdw_dma_ring_buf_reg { * @sdw_en_stat: flag set to true when any one of the SoundWire manager instance is enabled * @addr: pci ioremap address * @reg_range: ACP reigister range + * @acp_rev: ACP PCI revision id * @sdw0-dma_intr_stat: DMA interrupt status array for SoundWire manager-SW0 instance * @sdw_dma_intr_stat: DMA interrupt status array for SoundWire manager-SW1 instance */ @@ -254,6 +255,7 @@ struct acp63_dev_data { bool sdw_en_stat; u32 addr; u32 reg_range; + u32 acp_rev; u16 sdw0_dma_intr_stat[ACP63_SDW0_DMA_MAX_STREAMS]; u16 sdw1_dma_intr_stat[ACP63_SDW1_DMA_MAX_STREAMS]; }; diff --git a/sound/soc/amd/ps/pci-ps.c b/sound/soc/amd/ps/pci-ps.c index a7583844f5b4..aef73ec6f7ef 100644 --- a/sound/soc/amd/ps/pci-ps.c +++ b/sound/soc/amd/ps/pci-ps.c @@ -267,6 +267,7 @@ static int amd_sdw_probe(struct device *dev) sdw_res.acp_lock = &acp_data->acp_lock; sdw_res.count = acp_data->info.count; sdw_res.mmio_base = acp_data->acp63_base; + sdw_res.acp_rev = acp_data->acp_rev; sdw_res.link_mask = acp_data->info.link_mask; ret = sdw_amd_probe(&sdw_res, &acp_data->sdw); if (ret) @@ -575,6 +576,7 @@ static int snd_acp63_probe(struct pci_dev *pci, } adata->addr = addr; adata->reg_range = ACP63_REG_END - ACP63_REG_START; + adata->acp_rev = pci->revision; pci_set_master(pci); pci_set_drvdata(pci, adata); mutex_init(&adata->acp_lock); diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c index 95d4762c9d93..f7814dadf3ba 100644 --- a/sound/soc/sof/amd/acp.c +++ b/sound/soc/sof/amd/acp.c @@ -693,6 +693,7 @@ static int amd_sof_sdw_probe(struct snd_sof_dev *sdev) sdw_res.count = acp_data->info.count; sdw_res.link_mask = acp_data->info.link_mask; sdw_res.mmio_base = sdev->bar[ACP_DSP_BAR]; + sdw_res.acp_rev = acp_data->pci_rev; ret = sdw_amd_probe(&sdw_res, &acp_data->sdw); if (ret) diff --git a/tools/testing/selftests/drivers/net/hw/lib/py/linkconfig.py b/tools/testing/selftests/drivers/net/hw/lib/py/linkconfig.py index db84000fc75b..79fde603cbbc 100644 --- a/tools/testing/selftests/drivers/net/hw/lib/py/linkconfig.py +++ b/tools/testing/selftests/drivers/net/hw/lib/py/linkconfig.py @@ -218,5 +218,5 @@ class LinkConfig: json_data = process[0] """Check if the field exist in the json data""" if field not in json_data: - raise KsftSkipEx(f"Field {field} does not exist in the output of interface {json_data["ifname"]}") + raise KsftSkipEx(f'Field {field} does not exist in the output of interface {json_data["ifname"]}') return json_data[field] diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c index 8e34f7fa44e9..54ab484d0000 100644 --- a/tools/testing/selftests/kvm/riscv/get-reg-list.c +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c @@ -41,9 +41,11 @@ bool filter_reg(__u64 reg) case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_I: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_M: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_V: + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SMNPM: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SMSTATEEN: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSAIA: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSCOFPMF: + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSNPM: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSTC: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVINVAL: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVNAPOT: @@ -414,9 +416,11 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off) KVM_ISA_EXT_ARR(I), KVM_ISA_EXT_ARR(M), KVM_ISA_EXT_ARR(V), + KVM_ISA_EXT_ARR(SMNPM), KVM_ISA_EXT_ARR(SMSTATEEN), KVM_ISA_EXT_ARR(SSAIA), KVM_ISA_EXT_ARR(SSCOFPMF), + KVM_ISA_EXT_ARR(SSNPM), KVM_ISA_EXT_ARR(SSTC), KVM_ISA_EXT_ARR(SVINVAL), KVM_ISA_EXT_ARR(SVNAPOT), @@ -946,8 +950,10 @@ KVM_ISA_EXT_SUBLIST_CONFIG(aia, AIA); KVM_ISA_EXT_SUBLIST_CONFIG(fp_f, FP_F); KVM_ISA_EXT_SUBLIST_CONFIG(fp_d, FP_D); KVM_ISA_EXT_SIMPLE_CONFIG(h, H); +KVM_ISA_EXT_SIMPLE_CONFIG(smnpm, SMNPM); KVM_ISA_EXT_SUBLIST_CONFIG(smstateen, SMSTATEEN); KVM_ISA_EXT_SIMPLE_CONFIG(sscofpmf, SSCOFPMF); +KVM_ISA_EXT_SIMPLE_CONFIG(ssnpm, SSNPM); KVM_ISA_EXT_SIMPLE_CONFIG(sstc, SSTC); KVM_ISA_EXT_SIMPLE_CONFIG(svinval, SVINVAL); KVM_ISA_EXT_SIMPLE_CONFIG(svnapot, SVNAPOT); @@ -1009,8 +1015,10 @@ struct vcpu_reg_list *vcpu_configs[] = { &config_fp_f, &config_fp_d, &config_h, + &config_smnpm, &config_smstateen, &config_sscofpmf, + &config_ssnpm, &config_sstc, &config_svinval, &config_svnapot, diff --git a/tools/testing/selftests/module/Makefile b/tools/testing/selftests/module/Makefile new file mode 100644 index 000000000000..6132d7ddb08b --- /dev/null +++ b/tools/testing/selftests/module/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Makefile for module loading selftests + +# No binaries, but make sure arg-less "make" doesn't trigger "run_tests" +all: + +TEST_PROGS := find_symbol.sh + +include ../lib.mk + +# Nothing to clean up. +clean: diff --git a/tools/testing/selftests/module/config b/tools/testing/selftests/module/config new file mode 100644 index 000000000000..b0c206b1ad47 --- /dev/null +++ b/tools/testing/selftests/module/config @@ -0,0 +1,3 @@ +CONFIG_TEST_RUNTIME=y +CONFIG_TEST_RUNTIME_MODULE=y +CONFIG_TEST_KALLSYMS=m diff --git a/tools/testing/selftests/module/find_symbol.sh b/tools/testing/selftests/module/find_symbol.sh new file mode 100755 index 000000000000..140364d3c49f --- /dev/null +++ b/tools/testing/selftests/module/find_symbol.sh @@ -0,0 +1,81 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1 +# Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org> +# +# This is a stress test script for kallsyms through find_symbol() + +set -e + +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +test_reqs() +{ + if ! which modprobe 2> /dev/null > /dev/null; then + echo "$0: You need modprobe installed" >&2 + exit $ksft_skip + fi + + if ! which kmod 2> /dev/null > /dev/null; then + echo "$0: You need kmod installed" >&2 + exit $ksft_skip + fi + + if ! which perf 2> /dev/null > /dev/null; then + echo "$0: You need perf installed" >&2 + exit $ksft_skip + fi + + uid=$(id -u) + if [ $uid -ne 0 ]; then + echo $msg must be run as root >&2 + exit $ksft_skip + fi +} + +load_mod() +{ + local STATS="-e duration_time" + STATS="$STATS -e user_time" + STATS="$STATS -e system_time" + STATS="$STATS -e page-faults" + local MOD=$1 + + local ARCH="$(uname -m)" + case "${ARCH}" in + x86_64) + perf stat $STATS $MODPROBE test_kallsyms_b + ;; + *) + time $MODPROBE test_kallsyms_b + exit 1 + ;; + esac +} + +remove_all() +{ + $MODPROBE -r test_kallsyms_b + for i in a b c d; do + $MODPROBE -r test_kallsyms_$i + done +} +test_reqs + +MODPROBE=$(</proc/sys/kernel/modprobe) + +remove_all +load_mod test_kallsyms_b +remove_all + +# Now pollute the namespace +$MODPROBE test_kallsyms_c +load_mod test_kallsyms_b + +# Now pollute the namespace with twice the number of symbols than the last time +remove_all +$MODPROBE test_kallsyms_c +$MODPROBE test_kallsyms_d +load_mod test_kallsyms_b + +exit 0 diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c index 68801e1a9ec2..70f65eb320a7 100644 --- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c +++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c @@ -1026,7 +1026,7 @@ FIXTURE_SETUP(mount_setattr_idmapped) "size=100000,mode=700"), 0); ASSERT_EQ(mount("testing", "/mnt", "tmpfs", MS_NOATIME | MS_NODEV, - "size=100000,mode=700"), 0); + "size=2m,mode=700"), 0); ASSERT_EQ(mkdir("/mnt/A", 0777), 0); diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 3d487b03c4a0..cb2fc601de66 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -78,7 +78,6 @@ TEST_PROGS += test_vxlan_vnifiltering.sh TEST_GEN_FILES += io_uring_zerocopy_tx TEST_PROGS += io_uring_zerocopy_tx.sh TEST_GEN_FILES += bind_bhash -TEST_GEN_PROGS += netlink-dumps TEST_GEN_PROGS += sk_bind_sendto_listen TEST_GEN_PROGS += sk_connect_zero_addr TEST_GEN_PROGS += sk_so_peek_off @@ -101,7 +100,7 @@ TEST_PROGS += ipv6_route_update_soft_lockup.sh TEST_PROGS += busy_poll_test.sh # YNL files, must be before "include ..lib.mk" -YNL_GEN_FILES := busy_poller +YNL_GEN_FILES := busy_poller netlink-dumps TEST_GEN_FILES += $(YNL_GEN_FILES) TEST_FILES := settings diff --git a/tools/testing/selftests/net/netlink-dumps.c b/tools/testing/selftests/net/netlink-dumps.c index 84e29b7dffb6..07423f256f96 100644 --- a/tools/testing/selftests/net/netlink-dumps.c +++ b/tools/testing/selftests/net/netlink-dumps.c @@ -12,11 +12,140 @@ #include <unistd.h> #include <linux/genetlink.h> +#include <linux/neighbour.h> +#include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/mqueue.h> +#include <linux/rtnetlink.h> #include "../kselftest_harness.h" +#include <ynl.h> + +struct ext_ack { + int err; + + __u32 attr_offs; + __u32 miss_type; + __u32 miss_nest; + const char *str; +}; + +/* 0: no done, 1: done found, 2: extack found, -1: error */ +static int nl_get_extack(char *buf, size_t n, struct ext_ack *ea) +{ + const struct nlmsghdr *nlh; + const struct nlattr *attr; + ssize_t rem; + + for (rem = n; rem > 0; NLMSG_NEXT(nlh, rem)) { + nlh = (struct nlmsghdr *)&buf[n - rem]; + if (!NLMSG_OK(nlh, rem)) + return -1; + + if (nlh->nlmsg_type != NLMSG_DONE) + continue; + + ea->err = -*(int *)NLMSG_DATA(nlh); + + if (!(nlh->nlmsg_flags & NLM_F_ACK_TLVS)) + return 1; + + ynl_attr_for_each(attr, nlh, sizeof(int)) { + switch (ynl_attr_type(attr)) { + case NLMSGERR_ATTR_OFFS: + ea->attr_offs = ynl_attr_get_u32(attr); + break; + case NLMSGERR_ATTR_MISS_TYPE: + ea->miss_type = ynl_attr_get_u32(attr); + break; + case NLMSGERR_ATTR_MISS_NEST: + ea->miss_nest = ynl_attr_get_u32(attr); + break; + case NLMSGERR_ATTR_MSG: + ea->str = ynl_attr_get_str(attr); + break; + } + } + + return 2; + } + + return 0; +} + +static const struct { + struct nlmsghdr nlhdr; + struct ndmsg ndm; + struct nlattr ahdr; + __u32 val; +} dump_neigh_bad = { + .nlhdr = { + .nlmsg_len = sizeof(dump_neigh_bad), + .nlmsg_type = RTM_GETNEIGH, + .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_DUMP, + .nlmsg_seq = 1, + }, + .ndm = { + .ndm_family = 123, + }, + .ahdr = { + .nla_len = 4 + 4, + .nla_type = NDA_FLAGS_EXT, + }, + .val = -1, // should fail MASK validation +}; + +TEST(dump_extack) +{ + int netlink_sock; + char buf[8192]; + int one = 1; + int i, cnt; + ssize_t n; + + netlink_sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); + ASSERT_GE(netlink_sock, 0); + + n = setsockopt(netlink_sock, SOL_NETLINK, NETLINK_CAP_ACK, + &one, sizeof(one)); + ASSERT_EQ(n, 0); + n = setsockopt(netlink_sock, SOL_NETLINK, NETLINK_EXT_ACK, + &one, sizeof(one)); + ASSERT_EQ(n, 0); + n = setsockopt(netlink_sock, SOL_NETLINK, NETLINK_GET_STRICT_CHK, + &one, sizeof(one)); + ASSERT_EQ(n, 0); + + /* Dump so many times we fill up the buffer */ + cnt = 64; + for (i = 0; i < cnt; i++) { + n = send(netlink_sock, &dump_neigh_bad, + sizeof(dump_neigh_bad), 0); + ASSERT_EQ(n, sizeof(dump_neigh_bad)); + } + + /* Read out the ENOBUFS */ + n = recv(netlink_sock, buf, sizeof(buf), MSG_DONTWAIT); + EXPECT_EQ(n, -1); + EXPECT_EQ(errno, ENOBUFS); + + for (i = 0; i < cnt; i++) { + struct ext_ack ea = {}; + + n = recv(netlink_sock, buf, sizeof(buf), MSG_DONTWAIT); + if (n < 0) { + ASSERT_GE(i, 10); + break; + } + ASSERT_GE(n, (ssize_t)sizeof(struct nlmsghdr)); + + EXPECT_EQ(nl_get_extack(buf, n, &ea), 2); + EXPECT_EQ(ea.attr_offs, + sizeof(struct nlmsghdr) + sizeof(struct ndmsg)); + } +} + static const struct { struct nlmsghdr nlhdr; struct genlmsghdr genlhdr; diff --git a/tools/testing/selftests/net/rds/Makefile b/tools/testing/selftests/net/rds/Makefile index 1803c39dbacb..612a7219990e 100644 --- a/tools/testing/selftests/net/rds/Makefile +++ b/tools/testing/selftests/net/rds/Makefile @@ -3,10 +3,9 @@ all: @echo mk_build_dir="$(shell pwd)" > include.sh -TEST_PROGS := run.sh \ - test.py +TEST_PROGS := run.sh -TEST_FILES := include.sh +TEST_FILES := include.sh test.py EXTRA_CLEAN := /tmp/rds_logs include.sh diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 7f05b5f9b76f..2e8243a65b50 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -29,6 +29,7 @@ ALL_TESTS=" kci_test_bridge_parent_id kci_test_address_proto kci_test_enslave_bonding + kci_test_mngtmpaddr " devdummy="test-dummy0" @@ -44,6 +45,7 @@ check_err() if [ $ret -eq 0 ]; then ret=$1 fi + [ -n "$2" ] && echo "$2" } # same but inverted -- used when command must fail for test to pass @@ -1239,6 +1241,99 @@ kci_test_enslave_bonding() ip netns del "$testns" } +# Called to validate the addresses on $IFNAME: +# +# 1. Every `temporary` address must have a matching `mngtmpaddr` +# 2. Every `mngtmpaddr` address must have some un`deprecated` `temporary` +# +# If the mngtmpaddr or tempaddr checking failed, return 0 and stop slowwait +validate_mngtmpaddr() +{ + local dev=$1 + local prefix="" + local addr_list=$(ip -j -n $testns addr show dev ${dev}) + local temp_addrs=$(echo ${addr_list} | \ + jq -r '.[].addr_info[] | select(.temporary == true) | .local') + local mng_prefixes=$(echo ${addr_list} | \ + jq -r '.[].addr_info[] | select(.mngtmpaddr == true) | .local' | \ + cut -d: -f1-4 | tr '\n' ' ') + local undep_prefixes=$(echo ${addr_list} | \ + jq -r '.[].addr_info[] | select(.temporary == true and .deprecated != true) | .local' | \ + cut -d: -f1-4 | tr '\n' ' ') + + # 1. All temporary addresses (temp and dep) must have a matching mngtmpaddr + for address in ${temp_addrs}; do + prefix=$(echo ${address} | cut -d: -f1-4) + if [[ ! " ${mng_prefixes} " =~ " $prefix " ]]; then + check_err 1 "FAIL: Temporary $address with no matching mngtmpaddr!"; + return 0 + fi + done + + # 2. All mngtmpaddr addresses must have a temporary address (not dep) + for prefix in ${mng_prefixes}; do + if [[ ! " ${undep_prefixes} " =~ " $prefix " ]]; then + check_err 1 "FAIL: No undeprecated temporary in $prefix!"; + return 0 + fi + done + + return 1 +} + +kci_test_mngtmpaddr() +{ + local ret=0 + + setup_ns testns + if [ $? -ne 0 ]; then + end_test "SKIP mngtmpaddr tests: cannot add net namespace $testns" + return $ksft_skip + fi + + # 1. Create a dummy Ethernet interface + run_cmd ip -n $testns link add ${devdummy} type dummy + run_cmd ip -n $testns link set ${devdummy} up + run_cmd ip netns exec $testns sysctl -w net.ipv6.conf.${devdummy}.use_tempaddr=1 + run_cmd ip netns exec $testns sysctl -w net.ipv6.conf.${devdummy}.temp_prefered_lft=10 + run_cmd ip netns exec $testns sysctl -w net.ipv6.conf.${devdummy}.temp_valid_lft=25 + run_cmd ip netns exec $testns sysctl -w net.ipv6.conf.${devdummy}.max_desync_factor=1 + + # 2. Create several mngtmpaddr addresses on that interface. + # with temp_*_lft configured to be pretty short (10 and 35 seconds + # for prefer/valid respectively) + for i in $(seq 1 9); do + run_cmd ip -n $testns addr add 2001:db8:7e57:${i}::1/64 mngtmpaddr dev ${devdummy} + done + + # 3. Confirm that a preferred temporary address exists for each mngtmpaddr + # address at all times, polling once per second for 30 seconds. + slowwait 30 validate_mngtmpaddr ${devdummy} + + # 4. Delete each mngtmpaddr address, one at a time (alternating between + # deleting and merely un-mngtmpaddr-ing), and confirm that the other + # mngtmpaddr addresses still have preferred temporaries. + for i in $(seq 1 9); do + (( $i % 4 == 0 )) && mng_flag="mngtmpaddr" || mng_flag="" + if (( $i % 2 == 0 )); then + run_cmd ip -n $testns addr del 2001:db8:7e57:${i}::1/64 $mng_flag dev ${devdummy} + else + run_cmd ip -n $testns addr change 2001:db8:7e57:${i}::1/64 dev ${devdummy} + fi + # the temp addr should be deleted + validate_mngtmpaddr ${devdummy} + done + + if [ $ret -ne 0 ]; then + end_test "FAIL: mngtmpaddr add/remove incorrect" + else + end_test "PASS: mngtmpaddr add/remove correctly" + fi + + ip netns del "$testns" + return $ret +} + kci_test_rtnl() { local current_test diff --git a/tools/testing/selftests/riscv/Makefile b/tools/testing/selftests/riscv/Makefile index 7ce03d832b64..099b8c1f46f8 100644 --- a/tools/testing/selftests/riscv/Makefile +++ b/tools/testing/selftests/riscv/Makefile @@ -5,7 +5,7 @@ ARCH ?= $(shell uname -m 2>/dev/null || echo not) ifneq (,$(filter $(ARCH),riscv)) -RISCV_SUBTARGETS ?= hwprobe vector mm sigreturn +RISCV_SUBTARGETS ?= abi hwprobe mm sigreturn vector else RISCV_SUBTARGETS := endif diff --git a/tools/testing/selftests/riscv/abi/.gitignore b/tools/testing/selftests/riscv/abi/.gitignore new file mode 100644 index 000000000000..b38358f91c4d --- /dev/null +++ b/tools/testing/selftests/riscv/abi/.gitignore @@ -0,0 +1 @@ +pointer_masking diff --git a/tools/testing/selftests/riscv/abi/Makefile b/tools/testing/selftests/riscv/abi/Makefile new file mode 100644 index 000000000000..ed82ff9c664e --- /dev/null +++ b/tools/testing/selftests/riscv/abi/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +CFLAGS += -I$(top_srcdir)/tools/include + +TEST_GEN_PROGS := pointer_masking + +include ../../lib.mk + +$(OUTPUT)/pointer_masking: pointer_masking.c + $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^ diff --git a/tools/testing/selftests/riscv/abi/pointer_masking.c b/tools/testing/selftests/riscv/abi/pointer_masking.c new file mode 100644 index 000000000000..dee41b7ee3e3 --- /dev/null +++ b/tools/testing/selftests/riscv/abi/pointer_masking.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <errno.h> +#include <fcntl.h> +#include <setjmp.h> +#include <signal.h> +#include <stdbool.h> +#include <sys/prctl.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "../../kselftest.h" + +#ifndef PR_PMLEN_SHIFT +#define PR_PMLEN_SHIFT 24 +#endif +#ifndef PR_PMLEN_MASK +#define PR_PMLEN_MASK (0x7fUL << PR_PMLEN_SHIFT) +#endif + +static int dev_zero; + +static int pipefd[2]; + +static sigjmp_buf jmpbuf; + +static void sigsegv_handler(int sig) +{ + siglongjmp(jmpbuf, 1); +} + +static int min_pmlen; +static int max_pmlen; + +static inline bool valid_pmlen(int pmlen) +{ + return pmlen == 0 || pmlen == 7 || pmlen == 16; +} + +static void test_pmlen(void) +{ + ksft_print_msg("Testing available PMLEN values\n"); + + for (int request = 0; request <= 16; request++) { + int pmlen, ret; + + ret = prctl(PR_SET_TAGGED_ADDR_CTRL, request << PR_PMLEN_SHIFT, 0, 0, 0); + if (ret) + goto pr_set_error; + + ret = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0); + ksft_test_result(ret >= 0, "PMLEN=%d PR_GET_TAGGED_ADDR_CTRL\n", request); + if (ret < 0) + goto pr_get_error; + + pmlen = (ret & PR_PMLEN_MASK) >> PR_PMLEN_SHIFT; + ksft_test_result(pmlen >= request, "PMLEN=%d constraint\n", request); + ksft_test_result(valid_pmlen(pmlen), "PMLEN=%d validity\n", request); + + if (min_pmlen == 0) + min_pmlen = pmlen; + if (max_pmlen < pmlen) + max_pmlen = pmlen; + + continue; + +pr_set_error: + ksft_test_result_skip("PMLEN=%d PR_GET_TAGGED_ADDR_CTRL\n", request); +pr_get_error: + ksft_test_result_skip("PMLEN=%d constraint\n", request); + ksft_test_result_skip("PMLEN=%d validity\n", request); + } + + if (max_pmlen == 0) + ksft_exit_fail_msg("Failed to enable pointer masking\n"); +} + +static int set_tagged_addr_ctrl(int pmlen, bool tagged_addr_abi) +{ + int arg, ret; + + arg = pmlen << PR_PMLEN_SHIFT | tagged_addr_abi; + ret = prctl(PR_SET_TAGGED_ADDR_CTRL, arg, 0, 0, 0); + if (!ret) { + ret = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0); + if (ret == arg) + return 0; + } + + return ret < 0 ? -errno : -ENODATA; +} + +static void test_dereference_pmlen(int pmlen) +{ + static volatile int i; + volatile int *p; + int ret; + + ret = set_tagged_addr_ctrl(pmlen, false); + if (ret) + return ksft_test_result_error("PMLEN=%d setup (%d)\n", pmlen, ret); + + i = pmlen; + + if (pmlen) { + p = (volatile int *)((uintptr_t)&i | 1UL << (__riscv_xlen - pmlen)); + + /* These dereferences should succeed. */ + if (sigsetjmp(jmpbuf, 1)) + return ksft_test_result_fail("PMLEN=%d valid tag\n", pmlen); + if (*p != pmlen) + return ksft_test_result_fail("PMLEN=%d bad value\n", pmlen); + ++*p; + } + + p = (volatile int *)((uintptr_t)&i | 1UL << (__riscv_xlen - pmlen - 1)); + + /* These dereferences should raise SIGSEGV. */ + if (sigsetjmp(jmpbuf, 1)) + return ksft_test_result_pass("PMLEN=%d dereference\n", pmlen); + ++*p; + ksft_test_result_fail("PMLEN=%d invalid tag\n", pmlen); +} + +static void test_dereference(void) +{ + ksft_print_msg("Testing userspace pointer dereference\n"); + + signal(SIGSEGV, sigsegv_handler); + + test_dereference_pmlen(0); + test_dereference_pmlen(min_pmlen); + test_dereference_pmlen(max_pmlen); + + signal(SIGSEGV, SIG_DFL); +} + +static void execve_child_sigsegv_handler(int sig) +{ + exit(42); +} + +static int execve_child(void) +{ + static volatile int i; + volatile int *p = (volatile int *)((uintptr_t)&i | 1UL << (__riscv_xlen - 7)); + + signal(SIGSEGV, execve_child_sigsegv_handler); + + /* This dereference should raise SIGSEGV. */ + return *p; +} + +static void test_fork_exec(void) +{ + int ret, status; + + ksft_print_msg("Testing fork/exec behavior\n"); + + ret = set_tagged_addr_ctrl(min_pmlen, false); + if (ret) + return ksft_test_result_error("setup (%d)\n", ret); + + if (fork()) { + wait(&status); + ksft_test_result(WIFEXITED(status) && WEXITSTATUS(status) == 42, + "dereference after fork\n"); + } else { + static volatile int i = 42; + volatile int *p; + + p = (volatile int *)((uintptr_t)&i | 1UL << (__riscv_xlen - min_pmlen)); + + /* This dereference should succeed. */ + exit(*p); + } + + if (fork()) { + wait(&status); + ksft_test_result(WIFEXITED(status) && WEXITSTATUS(status) == 42, + "dereference after fork+exec\n"); + } else { + /* Will call execve_child(). */ + execve("/proc/self/exe", (char *const []) { "", NULL }, NULL); + } +} + +static void test_tagged_addr_abi_sysctl(void) +{ + char value; + int fd; + + ksft_print_msg("Testing tagged address ABI sysctl\n"); + + fd = open("/proc/sys/abi/tagged_addr_disabled", O_WRONLY); + if (fd < 0) { + ksft_test_result_skip("failed to open sysctl file\n"); + ksft_test_result_skip("failed to open sysctl file\n"); + return; + } + + value = '1'; + pwrite(fd, &value, 1, 0); + ksft_test_result(set_tagged_addr_ctrl(min_pmlen, true) == -EINVAL, + "sysctl disabled\n"); + + value = '0'; + pwrite(fd, &value, 1, 0); + ksft_test_result(set_tagged_addr_ctrl(min_pmlen, true) == 0, + "sysctl enabled\n"); + + set_tagged_addr_ctrl(0, false); + + close(fd); +} + +static void test_tagged_addr_abi_pmlen(int pmlen) +{ + int i, *p, ret; + + i = ~pmlen; + + if (pmlen) { + p = (int *)((uintptr_t)&i | 1UL << (__riscv_xlen - pmlen)); + + ret = set_tagged_addr_ctrl(pmlen, false); + if (ret) + return ksft_test_result_error("PMLEN=%d ABI disabled setup (%d)\n", + pmlen, ret); + + ret = write(pipefd[1], p, sizeof(*p)); + if (ret >= 0 || errno != EFAULT) + return ksft_test_result_fail("PMLEN=%d ABI disabled write\n", pmlen); + + ret = read(dev_zero, p, sizeof(*p)); + if (ret >= 0 || errno != EFAULT) + return ksft_test_result_fail("PMLEN=%d ABI disabled read\n", pmlen); + + if (i != ~pmlen) + return ksft_test_result_fail("PMLEN=%d ABI disabled value\n", pmlen); + + ret = set_tagged_addr_ctrl(pmlen, true); + if (ret) + return ksft_test_result_error("PMLEN=%d ABI enabled setup (%d)\n", + pmlen, ret); + + ret = write(pipefd[1], p, sizeof(*p)); + if (ret != sizeof(*p)) + return ksft_test_result_fail("PMLEN=%d ABI enabled write\n", pmlen); + + ret = read(dev_zero, p, sizeof(*p)); + if (ret != sizeof(*p)) + return ksft_test_result_fail("PMLEN=%d ABI enabled read\n", pmlen); + + if (i) + return ksft_test_result_fail("PMLEN=%d ABI enabled value\n", pmlen); + + i = ~pmlen; + } else { + /* The tagged address ABI cannot be enabled when PMLEN == 0. */ + ret = set_tagged_addr_ctrl(pmlen, true); + if (ret != -EINVAL) + return ksft_test_result_error("PMLEN=%d ABI setup (%d)\n", + pmlen, ret); + } + + p = (int *)((uintptr_t)&i | 1UL << (__riscv_xlen - pmlen - 1)); + + ret = write(pipefd[1], p, sizeof(*p)); + if (ret >= 0 || errno != EFAULT) + return ksft_test_result_fail("PMLEN=%d invalid tag write (%d)\n", pmlen, errno); + + ret = read(dev_zero, p, sizeof(*p)); + if (ret >= 0 || errno != EFAULT) + return ksft_test_result_fail("PMLEN=%d invalid tag read\n", pmlen); + + if (i != ~pmlen) + return ksft_test_result_fail("PMLEN=%d invalid tag value\n", pmlen); + + ksft_test_result_pass("PMLEN=%d tagged address ABI\n", pmlen); +} + +static void test_tagged_addr_abi(void) +{ + ksft_print_msg("Testing tagged address ABI\n"); + + test_tagged_addr_abi_pmlen(0); + test_tagged_addr_abi_pmlen(min_pmlen); + test_tagged_addr_abi_pmlen(max_pmlen); +} + +static struct test_info { + unsigned int nr_tests; + void (*test_fn)(void); +} tests[] = { + { .nr_tests = 17 * 3, test_pmlen }, + { .nr_tests = 3, test_dereference }, + { .nr_tests = 2, test_fork_exec }, + { .nr_tests = 2, test_tagged_addr_abi_sysctl }, + { .nr_tests = 3, test_tagged_addr_abi }, +}; + +int main(int argc, char **argv) +{ + unsigned int plan = 0; + int ret; + + /* Check if this is the child process after execve(). */ + if (!argv[0][0]) + return execve_child(); + + dev_zero = open("/dev/zero", O_RDWR); + if (dev_zero < 0) + return 1; + + /* Write to a pipe so the kernel must dereference the buffer pointer. */ + ret = pipe(pipefd); + if (ret) + return 1; + + ksft_print_header(); + + for (int i = 0; i < ARRAY_SIZE(tests); i++) + plan += tests[i].nr_tests; + + ksft_set_plan(plan); + + for (int i = 0; i < ARRAY_SIZE(tests); i++) + tests[i].test_fn(); + + ksft_finished(); +} diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c index 72aa1fdeb699..196a102e34fb 100644 --- a/virt/kvm/vfio.c +++ b/virt/kvm/vfio.c @@ -347,7 +347,7 @@ static void kvm_vfio_release(struct kvm_device *dev) static int kvm_vfio_create(struct kvm_device *dev, u32 type); -static struct kvm_device_ops kvm_vfio_ops = { +static const struct kvm_device_ops kvm_vfio_ops = { .name = "kvm-vfio", .create = kvm_vfio_create, .release = kvm_vfio_release, |